Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma-buf/st-dma-fence.c
51623 views
1
/* SPDX-License-Identifier: MIT */
2
3
/*
4
* Copyright © 2019 Intel Corporation
5
*/
6
7
#include <linux/delay.h>
8
#include <linux/dma-fence.h>
9
#include <linux/kernel.h>
10
#include <linux/kthread.h>
11
#include <linux/sched/signal.h>
12
#include <linux/slab.h>
13
#include <linux/spinlock.h>
14
15
#include "selftest.h"
16
17
static struct kmem_cache *slab_fences;
18
19
static struct mock_fence {
20
struct dma_fence base;
21
struct spinlock lock;
22
} *to_mock_fence(struct dma_fence *f) {
23
return container_of(f, struct mock_fence, base);
24
}
25
26
static const char *mock_name(struct dma_fence *f)
27
{
28
return "mock";
29
}
30
31
static void mock_fence_release(struct dma_fence *f)
32
{
33
kmem_cache_free(slab_fences, to_mock_fence(f));
34
}
35
36
static const struct dma_fence_ops mock_ops = {
37
.get_driver_name = mock_name,
38
.get_timeline_name = mock_name,
39
.release = mock_fence_release,
40
};
41
42
static struct dma_fence *mock_fence(void)
43
{
44
struct mock_fence *f;
45
46
f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
47
if (!f)
48
return NULL;
49
50
spin_lock_init(&f->lock);
51
dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
52
53
return &f->base;
54
}
55
56
static int sanitycheck(void *arg)
57
{
58
struct dma_fence *f;
59
60
f = mock_fence();
61
if (!f)
62
return -ENOMEM;
63
64
dma_fence_enable_sw_signaling(f);
65
66
dma_fence_signal(f);
67
dma_fence_put(f);
68
69
return 0;
70
}
71
72
static int test_signaling(void *arg)
73
{
74
struct dma_fence *f;
75
int err = -EINVAL;
76
77
f = mock_fence();
78
if (!f)
79
return -ENOMEM;
80
81
dma_fence_enable_sw_signaling(f);
82
83
if (dma_fence_is_signaled(f)) {
84
pr_err("Fence unexpectedly signaled on creation\n");
85
goto err_free;
86
}
87
88
if (dma_fence_check_and_signal(f)) {
89
pr_err("Fence reported being already signaled\n");
90
goto err_free;
91
}
92
93
if (!dma_fence_is_signaled(f)) {
94
pr_err("Fence not reporting signaled\n");
95
goto err_free;
96
}
97
98
if (!dma_fence_test_signaled_flag(f)) {
99
pr_err("Fence reported not being already signaled\n");
100
goto err_free;
101
}
102
103
err = 0;
104
err_free:
105
dma_fence_put(f);
106
return err;
107
}
108
109
struct simple_cb {
110
struct dma_fence_cb cb;
111
bool seen;
112
};
113
114
static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
115
{
116
smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
117
}
118
119
static int test_add_callback(void *arg)
120
{
121
struct simple_cb cb = {};
122
struct dma_fence *f;
123
int err = -EINVAL;
124
125
f = mock_fence();
126
if (!f)
127
return -ENOMEM;
128
129
if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
130
pr_err("Failed to add callback, fence already signaled!\n");
131
goto err_free;
132
}
133
134
dma_fence_signal(f);
135
if (!cb.seen) {
136
pr_err("Callback failed!\n");
137
goto err_free;
138
}
139
140
err = 0;
141
err_free:
142
dma_fence_put(f);
143
return err;
144
}
145
146
static int test_late_add_callback(void *arg)
147
{
148
struct simple_cb cb = {};
149
struct dma_fence *f;
150
int err = -EINVAL;
151
152
f = mock_fence();
153
if (!f)
154
return -ENOMEM;
155
156
dma_fence_enable_sw_signaling(f);
157
158
dma_fence_signal(f);
159
160
if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
161
pr_err("Added callback, but fence was already signaled!\n");
162
goto err_free;
163
}
164
165
dma_fence_signal(f);
166
if (cb.seen) {
167
pr_err("Callback called after failed attachment !\n");
168
goto err_free;
169
}
170
171
err = 0;
172
err_free:
173
dma_fence_put(f);
174
return err;
175
}
176
177
static int test_rm_callback(void *arg)
178
{
179
struct simple_cb cb = {};
180
struct dma_fence *f;
181
int err = -EINVAL;
182
183
f = mock_fence();
184
if (!f)
185
return -ENOMEM;
186
187
if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
188
pr_err("Failed to add callback, fence already signaled!\n");
189
goto err_free;
190
}
191
192
if (!dma_fence_remove_callback(f, &cb.cb)) {
193
pr_err("Failed to remove callback!\n");
194
goto err_free;
195
}
196
197
dma_fence_signal(f);
198
if (cb.seen) {
199
pr_err("Callback still signaled after removal!\n");
200
goto err_free;
201
}
202
203
err = 0;
204
err_free:
205
dma_fence_put(f);
206
return err;
207
}
208
209
static int test_late_rm_callback(void *arg)
210
{
211
struct simple_cb cb = {};
212
struct dma_fence *f;
213
int err = -EINVAL;
214
215
f = mock_fence();
216
if (!f)
217
return -ENOMEM;
218
219
if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
220
pr_err("Failed to add callback, fence already signaled!\n");
221
goto err_free;
222
}
223
224
dma_fence_signal(f);
225
if (!cb.seen) {
226
pr_err("Callback failed!\n");
227
goto err_free;
228
}
229
230
if (dma_fence_remove_callback(f, &cb.cb)) {
231
pr_err("Callback removal succeed after being executed!\n");
232
goto err_free;
233
}
234
235
err = 0;
236
err_free:
237
dma_fence_put(f);
238
return err;
239
}
240
241
static int test_status(void *arg)
242
{
243
struct dma_fence *f;
244
int err = -EINVAL;
245
246
f = mock_fence();
247
if (!f)
248
return -ENOMEM;
249
250
dma_fence_enable_sw_signaling(f);
251
252
if (dma_fence_get_status(f)) {
253
pr_err("Fence unexpectedly has signaled status on creation\n");
254
goto err_free;
255
}
256
257
dma_fence_signal(f);
258
if (!dma_fence_get_status(f)) {
259
pr_err("Fence not reporting signaled status\n");
260
goto err_free;
261
}
262
263
err = 0;
264
err_free:
265
dma_fence_put(f);
266
return err;
267
}
268
269
static int test_error(void *arg)
270
{
271
struct dma_fence *f;
272
int err = -EINVAL;
273
274
f = mock_fence();
275
if (!f)
276
return -ENOMEM;
277
278
dma_fence_enable_sw_signaling(f);
279
280
dma_fence_set_error(f, -EIO);
281
282
if (dma_fence_get_status(f)) {
283
pr_err("Fence unexpectedly has error status before signal\n");
284
goto err_free;
285
}
286
287
dma_fence_signal(f);
288
if (dma_fence_get_status(f) != -EIO) {
289
pr_err("Fence not reporting error status, got %d\n",
290
dma_fence_get_status(f));
291
goto err_free;
292
}
293
294
err = 0;
295
err_free:
296
dma_fence_put(f);
297
return err;
298
}
299
300
static int test_wait(void *arg)
301
{
302
struct dma_fence *f;
303
int err = -EINVAL;
304
305
f = mock_fence();
306
if (!f)
307
return -ENOMEM;
308
309
dma_fence_enable_sw_signaling(f);
310
311
if (dma_fence_wait_timeout(f, false, 0) != 0) {
312
pr_err("Wait reported complete before being signaled\n");
313
goto err_free;
314
}
315
316
dma_fence_signal(f);
317
318
if (dma_fence_wait_timeout(f, false, 0) != 1) {
319
pr_err("Wait reported incomplete after being signaled\n");
320
goto err_free;
321
}
322
323
err = 0;
324
err_free:
325
dma_fence_signal(f);
326
dma_fence_put(f);
327
return err;
328
}
329
330
struct wait_timer {
331
struct timer_list timer;
332
struct dma_fence *f;
333
};
334
335
static void wait_timer(struct timer_list *timer)
336
{
337
struct wait_timer *wt = timer_container_of(wt, timer, timer);
338
339
dma_fence_signal(wt->f);
340
}
341
342
static int test_wait_timeout(void *arg)
343
{
344
struct wait_timer wt;
345
int err = -EINVAL;
346
347
timer_setup_on_stack(&wt.timer, wait_timer, 0);
348
349
wt.f = mock_fence();
350
if (!wt.f)
351
return -ENOMEM;
352
353
dma_fence_enable_sw_signaling(wt.f);
354
355
if (dma_fence_wait_timeout(wt.f, false, 1) != 0) {
356
pr_err("Wait reported complete before being signaled\n");
357
goto err_free;
358
}
359
360
mod_timer(&wt.timer, jiffies + 1);
361
362
if (dma_fence_wait_timeout(wt.f, false, HZ) == 0) {
363
if (timer_pending(&wt.timer)) {
364
pr_notice("Timer did not fire within one HZ!\n");
365
err = 0; /* not our fault! */
366
} else {
367
pr_err("Wait reported incomplete after timeout\n");
368
}
369
goto err_free;
370
}
371
372
err = 0;
373
err_free:
374
timer_delete_sync(&wt.timer);
375
timer_destroy_on_stack(&wt.timer);
376
dma_fence_signal(wt.f);
377
dma_fence_put(wt.f);
378
return err;
379
}
380
381
static int test_stub(void *arg)
382
{
383
struct dma_fence *f[64];
384
int err = -EINVAL;
385
int i;
386
387
for (i = 0; i < ARRAY_SIZE(f); i++) {
388
f[i] = dma_fence_get_stub();
389
if (!dma_fence_is_signaled(f[i])) {
390
pr_err("Obtained unsignaled stub fence!\n");
391
goto err;
392
}
393
}
394
395
err = 0;
396
err:
397
while (i--)
398
dma_fence_put(f[i]);
399
return err;
400
}
401
402
/* Now off to the races! */
403
404
struct race_thread {
405
struct dma_fence __rcu **fences;
406
struct task_struct *task;
407
bool before;
408
int id;
409
};
410
411
static void __wait_for_callbacks(struct dma_fence *f)
412
{
413
spin_lock_irq(f->lock);
414
spin_unlock_irq(f->lock);
415
}
416
417
static int thread_signal_callback(void *arg)
418
{
419
const struct race_thread *t = arg;
420
unsigned long pass = 0;
421
unsigned long miss = 0;
422
int err = 0;
423
424
while (!err && !kthread_should_stop()) {
425
struct dma_fence *f1, *f2;
426
struct simple_cb cb;
427
428
f1 = mock_fence();
429
if (!f1) {
430
err = -ENOMEM;
431
break;
432
}
433
434
dma_fence_enable_sw_signaling(f1);
435
436
rcu_assign_pointer(t->fences[t->id], f1);
437
smp_wmb();
438
439
rcu_read_lock();
440
do {
441
f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
442
} while (!f2 && !kthread_should_stop());
443
rcu_read_unlock();
444
445
if (t->before)
446
dma_fence_signal(f1);
447
448
smp_store_mb(cb.seen, false);
449
if (!f2 ||
450
dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
451
miss++;
452
cb.seen = true;
453
}
454
455
if (!t->before)
456
dma_fence_signal(f1);
457
458
if (!cb.seen) {
459
dma_fence_wait(f2, false);
460
__wait_for_callbacks(f2);
461
}
462
463
if (!READ_ONCE(cb.seen)) {
464
pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
465
t->id, pass, miss,
466
t->before ? "before" : "after",
467
dma_fence_is_signaled(f2) ? "yes" : "no");
468
err = -EINVAL;
469
}
470
471
dma_fence_put(f2);
472
473
rcu_assign_pointer(t->fences[t->id], NULL);
474
smp_wmb();
475
476
dma_fence_put(f1);
477
478
pass++;
479
}
480
481
pr_info("%s[%d] completed %lu passes, %lu misses\n",
482
__func__, t->id, pass, miss);
483
return err;
484
}
485
486
static int race_signal_callback(void *arg)
487
{
488
struct dma_fence __rcu *f[2] = {};
489
int ret = 0;
490
int pass;
491
492
for (pass = 0; !ret && pass <= 1; pass++) {
493
struct race_thread t[2];
494
int i;
495
496
for (i = 0; i < ARRAY_SIZE(t); i++) {
497
t[i].fences = f;
498
t[i].id = i;
499
t[i].before = pass;
500
t[i].task = kthread_run(thread_signal_callback, &t[i],
501
"dma-fence:%d", i);
502
if (IS_ERR(t[i].task)) {
503
ret = PTR_ERR(t[i].task);
504
while (--i >= 0)
505
kthread_stop_put(t[i].task);
506
return ret;
507
}
508
get_task_struct(t[i].task);
509
}
510
511
msleep(50);
512
513
for (i = 0; i < ARRAY_SIZE(t); i++) {
514
int err;
515
516
err = kthread_stop_put(t[i].task);
517
if (err && !ret)
518
ret = err;
519
}
520
}
521
522
return ret;
523
}
524
525
int dma_fence(void)
526
{
527
static const struct subtest tests[] = {
528
SUBTEST(sanitycheck),
529
SUBTEST(test_signaling),
530
SUBTEST(test_add_callback),
531
SUBTEST(test_late_add_callback),
532
SUBTEST(test_rm_callback),
533
SUBTEST(test_late_rm_callback),
534
SUBTEST(test_status),
535
SUBTEST(test_error),
536
SUBTEST(test_wait),
537
SUBTEST(test_wait_timeout),
538
SUBTEST(test_stub),
539
SUBTEST(race_signal_callback),
540
};
541
int ret;
542
543
pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
544
545
slab_fences = KMEM_CACHE(mock_fence,
546
SLAB_TYPESAFE_BY_RCU |
547
SLAB_HWCACHE_ALIGN);
548
if (!slab_fences)
549
return -ENOMEM;
550
551
ret = subtests(tests, NULL);
552
553
kmem_cache_destroy(slab_fences);
554
555
return ret;
556
}
557
558