Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/locking/test-ww_mutex.c
25923 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Module-based API test facility for ww_mutexes
4
*/
5
6
#include <linux/kernel.h>
7
8
#include <linux/completion.h>
9
#include <linux/delay.h>
10
#include <linux/kthread.h>
11
#include <linux/module.h>
12
#include <linux/prandom.h>
13
#include <linux/slab.h>
14
#include <linux/ww_mutex.h>
15
16
static DEFINE_WD_CLASS(ww_class);
17
struct workqueue_struct *wq;
18
19
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20
#define ww_acquire_init_noinject(a, b) do { \
21
ww_acquire_init((a), (b)); \
22
(a)->deadlock_inject_countdown = ~0U; \
23
} while (0)
24
#else
25
#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26
#endif
27
28
struct test_mutex {
29
struct work_struct work;
30
struct ww_mutex mutex;
31
struct completion ready, go, done;
32
unsigned int flags;
33
};
34
35
#define TEST_MTX_SPIN BIT(0)
36
#define TEST_MTX_TRY BIT(1)
37
#define TEST_MTX_CTX BIT(2)
38
#define __TEST_MTX_LAST BIT(3)
39
40
static void test_mutex_work(struct work_struct *work)
41
{
42
struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44
complete(&mtx->ready);
45
wait_for_completion(&mtx->go);
46
47
if (mtx->flags & TEST_MTX_TRY) {
48
while (!ww_mutex_trylock(&mtx->mutex, NULL))
49
cond_resched();
50
} else {
51
ww_mutex_lock(&mtx->mutex, NULL);
52
}
53
complete(&mtx->done);
54
ww_mutex_unlock(&mtx->mutex);
55
}
56
57
static int __test_mutex(unsigned int flags)
58
{
59
#define TIMEOUT (HZ / 16)
60
struct test_mutex mtx;
61
struct ww_acquire_ctx ctx;
62
int ret;
63
64
ww_mutex_init(&mtx.mutex, &ww_class);
65
if (flags & TEST_MTX_CTX)
66
ww_acquire_init(&ctx, &ww_class);
67
68
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
69
init_completion(&mtx.ready);
70
init_completion(&mtx.go);
71
init_completion(&mtx.done);
72
mtx.flags = flags;
73
74
schedule_work(&mtx.work);
75
76
wait_for_completion(&mtx.ready);
77
ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
78
complete(&mtx.go);
79
if (flags & TEST_MTX_SPIN) {
80
unsigned long timeout = jiffies + TIMEOUT;
81
82
ret = 0;
83
do {
84
if (completion_done(&mtx.done)) {
85
ret = -EINVAL;
86
break;
87
}
88
cond_resched();
89
} while (time_before(jiffies, timeout));
90
} else {
91
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
92
}
93
ww_mutex_unlock(&mtx.mutex);
94
if (flags & TEST_MTX_CTX)
95
ww_acquire_fini(&ctx);
96
97
if (ret) {
98
pr_err("%s(flags=%x): mutual exclusion failure\n",
99
__func__, flags);
100
ret = -EINVAL;
101
}
102
103
flush_work(&mtx.work);
104
destroy_work_on_stack(&mtx.work);
105
return ret;
106
#undef TIMEOUT
107
}
108
109
static int test_mutex(void)
110
{
111
int ret;
112
int i;
113
114
for (i = 0; i < __TEST_MTX_LAST; i++) {
115
ret = __test_mutex(i);
116
if (ret)
117
return ret;
118
}
119
120
return 0;
121
}
122
123
static int test_aa(bool trylock)
124
{
125
struct ww_mutex mutex;
126
struct ww_acquire_ctx ctx;
127
int ret;
128
const char *from = trylock ? "trylock" : "lock";
129
130
ww_mutex_init(&mutex, &ww_class);
131
ww_acquire_init(&ctx, &ww_class);
132
133
if (!trylock) {
134
ret = ww_mutex_lock(&mutex, &ctx);
135
if (ret) {
136
pr_err("%s: initial lock failed!\n", __func__);
137
goto out;
138
}
139
} else {
140
ret = !ww_mutex_trylock(&mutex, &ctx);
141
if (ret) {
142
pr_err("%s: initial trylock failed!\n", __func__);
143
goto out;
144
}
145
}
146
147
if (ww_mutex_trylock(&mutex, NULL)) {
148
pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
149
ww_mutex_unlock(&mutex);
150
ret = -EINVAL;
151
goto out;
152
}
153
154
if (ww_mutex_trylock(&mutex, &ctx)) {
155
pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
156
ww_mutex_unlock(&mutex);
157
ret = -EINVAL;
158
goto out;
159
}
160
161
ret = ww_mutex_lock(&mutex, &ctx);
162
if (ret != -EALREADY) {
163
pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
164
__func__, ret, from);
165
if (!ret)
166
ww_mutex_unlock(&mutex);
167
ret = -EINVAL;
168
goto out;
169
}
170
171
ww_mutex_unlock(&mutex);
172
ret = 0;
173
out:
174
ww_acquire_fini(&ctx);
175
return ret;
176
}
177
178
struct test_abba {
179
struct work_struct work;
180
struct ww_mutex a_mutex;
181
struct ww_mutex b_mutex;
182
struct completion a_ready;
183
struct completion b_ready;
184
bool resolve, trylock;
185
int result;
186
};
187
188
static void test_abba_work(struct work_struct *work)
189
{
190
struct test_abba *abba = container_of(work, typeof(*abba), work);
191
struct ww_acquire_ctx ctx;
192
int err;
193
194
ww_acquire_init_noinject(&ctx, &ww_class);
195
if (!abba->trylock)
196
ww_mutex_lock(&abba->b_mutex, &ctx);
197
else
198
WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
199
200
WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
201
202
complete(&abba->b_ready);
203
wait_for_completion(&abba->a_ready);
204
205
err = ww_mutex_lock(&abba->a_mutex, &ctx);
206
if (abba->resolve && err == -EDEADLK) {
207
ww_mutex_unlock(&abba->b_mutex);
208
ww_mutex_lock_slow(&abba->a_mutex, &ctx);
209
err = ww_mutex_lock(&abba->b_mutex, &ctx);
210
}
211
212
if (!err)
213
ww_mutex_unlock(&abba->a_mutex);
214
ww_mutex_unlock(&abba->b_mutex);
215
ww_acquire_fini(&ctx);
216
217
abba->result = err;
218
}
219
220
static int test_abba(bool trylock, bool resolve)
221
{
222
struct test_abba abba;
223
struct ww_acquire_ctx ctx;
224
int err, ret;
225
226
ww_mutex_init(&abba.a_mutex, &ww_class);
227
ww_mutex_init(&abba.b_mutex, &ww_class);
228
INIT_WORK_ONSTACK(&abba.work, test_abba_work);
229
init_completion(&abba.a_ready);
230
init_completion(&abba.b_ready);
231
abba.trylock = trylock;
232
abba.resolve = resolve;
233
234
schedule_work(&abba.work);
235
236
ww_acquire_init_noinject(&ctx, &ww_class);
237
if (!trylock)
238
ww_mutex_lock(&abba.a_mutex, &ctx);
239
else
240
WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
241
242
WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
243
244
complete(&abba.a_ready);
245
wait_for_completion(&abba.b_ready);
246
247
err = ww_mutex_lock(&abba.b_mutex, &ctx);
248
if (resolve && err == -EDEADLK) {
249
ww_mutex_unlock(&abba.a_mutex);
250
ww_mutex_lock_slow(&abba.b_mutex, &ctx);
251
err = ww_mutex_lock(&abba.a_mutex, &ctx);
252
}
253
254
if (!err)
255
ww_mutex_unlock(&abba.b_mutex);
256
ww_mutex_unlock(&abba.a_mutex);
257
ww_acquire_fini(&ctx);
258
259
flush_work(&abba.work);
260
destroy_work_on_stack(&abba.work);
261
262
ret = 0;
263
if (resolve) {
264
if (err || abba.result) {
265
pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
266
__func__, err, abba.result);
267
ret = -EINVAL;
268
}
269
} else {
270
if (err != -EDEADLK && abba.result != -EDEADLK) {
271
pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
272
__func__, err, abba.result);
273
ret = -EINVAL;
274
}
275
}
276
return ret;
277
}
278
279
struct test_cycle {
280
struct work_struct work;
281
struct ww_mutex a_mutex;
282
struct ww_mutex *b_mutex;
283
struct completion *a_signal;
284
struct completion b_signal;
285
int result;
286
};
287
288
static void test_cycle_work(struct work_struct *work)
289
{
290
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
291
struct ww_acquire_ctx ctx;
292
int err, erra = 0;
293
294
ww_acquire_init_noinject(&ctx, &ww_class);
295
ww_mutex_lock(&cycle->a_mutex, &ctx);
296
297
complete(cycle->a_signal);
298
wait_for_completion(&cycle->b_signal);
299
300
err = ww_mutex_lock(cycle->b_mutex, &ctx);
301
if (err == -EDEADLK) {
302
err = 0;
303
ww_mutex_unlock(&cycle->a_mutex);
304
ww_mutex_lock_slow(cycle->b_mutex, &ctx);
305
erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
306
}
307
308
if (!err)
309
ww_mutex_unlock(cycle->b_mutex);
310
if (!erra)
311
ww_mutex_unlock(&cycle->a_mutex);
312
ww_acquire_fini(&ctx);
313
314
cycle->result = err ?: erra;
315
}
316
317
static int __test_cycle(unsigned int nthreads)
318
{
319
struct test_cycle *cycles;
320
unsigned int n, last = nthreads - 1;
321
int ret;
322
323
cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
324
if (!cycles)
325
return -ENOMEM;
326
327
for (n = 0; n < nthreads; n++) {
328
struct test_cycle *cycle = &cycles[n];
329
330
ww_mutex_init(&cycle->a_mutex, &ww_class);
331
if (n == last)
332
cycle->b_mutex = &cycles[0].a_mutex;
333
else
334
cycle->b_mutex = &cycles[n + 1].a_mutex;
335
336
if (n == 0)
337
cycle->a_signal = &cycles[last].b_signal;
338
else
339
cycle->a_signal = &cycles[n - 1].b_signal;
340
init_completion(&cycle->b_signal);
341
342
INIT_WORK(&cycle->work, test_cycle_work);
343
cycle->result = 0;
344
}
345
346
for (n = 0; n < nthreads; n++)
347
queue_work(wq, &cycles[n].work);
348
349
flush_workqueue(wq);
350
351
ret = 0;
352
for (n = 0; n < nthreads; n++) {
353
struct test_cycle *cycle = &cycles[n];
354
355
if (!cycle->result)
356
continue;
357
358
pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
359
n, nthreads, cycle->result);
360
ret = -EINVAL;
361
break;
362
}
363
364
for (n = 0; n < nthreads; n++)
365
ww_mutex_destroy(&cycles[n].a_mutex);
366
kfree(cycles);
367
return ret;
368
}
369
370
static int test_cycle(unsigned int ncpus)
371
{
372
unsigned int n;
373
int ret;
374
375
for (n = 2; n <= ncpus + 1; n++) {
376
ret = __test_cycle(n);
377
if (ret)
378
return ret;
379
}
380
381
return 0;
382
}
383
384
struct stress {
385
struct work_struct work;
386
struct ww_mutex *locks;
387
unsigned long timeout;
388
int nlocks;
389
};
390
391
struct rnd_state rng;
392
DEFINE_SPINLOCK(rng_lock);
393
394
static inline u32 prandom_u32_below(u32 ceil)
395
{
396
u32 ret;
397
398
spin_lock(&rng_lock);
399
ret = prandom_u32_state(&rng) % ceil;
400
spin_unlock(&rng_lock);
401
return ret;
402
}
403
404
static int *get_random_order(int count)
405
{
406
int *order;
407
int n, r;
408
409
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
410
if (!order)
411
return order;
412
413
for (n = 0; n < count; n++)
414
order[n] = n;
415
416
for (n = count - 1; n > 1; n--) {
417
r = prandom_u32_below(n + 1);
418
if (r != n)
419
swap(order[n], order[r]);
420
}
421
422
return order;
423
}
424
425
static void dummy_load(struct stress *stress)
426
{
427
usleep_range(1000, 2000);
428
}
429
430
static void stress_inorder_work(struct work_struct *work)
431
{
432
struct stress *stress = container_of(work, typeof(*stress), work);
433
const int nlocks = stress->nlocks;
434
struct ww_mutex *locks = stress->locks;
435
struct ww_acquire_ctx ctx;
436
int *order;
437
438
order = get_random_order(nlocks);
439
if (!order)
440
return;
441
442
do {
443
int contended = -1;
444
int n, err;
445
446
ww_acquire_init(&ctx, &ww_class);
447
retry:
448
err = 0;
449
for (n = 0; n < nlocks; n++) {
450
if (n == contended)
451
continue;
452
453
err = ww_mutex_lock(&locks[order[n]], &ctx);
454
if (err < 0)
455
break;
456
}
457
if (!err)
458
dummy_load(stress);
459
460
if (contended > n)
461
ww_mutex_unlock(&locks[order[contended]]);
462
contended = n;
463
while (n--)
464
ww_mutex_unlock(&locks[order[n]]);
465
466
if (err == -EDEADLK) {
467
if (!time_after(jiffies, stress->timeout)) {
468
ww_mutex_lock_slow(&locks[order[contended]], &ctx);
469
goto retry;
470
}
471
}
472
473
ww_acquire_fini(&ctx);
474
if (err) {
475
pr_err_once("stress (%s) failed with %d\n",
476
__func__, err);
477
break;
478
}
479
} while (!time_after(jiffies, stress->timeout));
480
481
kfree(order);
482
}
483
484
struct reorder_lock {
485
struct list_head link;
486
struct ww_mutex *lock;
487
};
488
489
static void stress_reorder_work(struct work_struct *work)
490
{
491
struct stress *stress = container_of(work, typeof(*stress), work);
492
LIST_HEAD(locks);
493
struct ww_acquire_ctx ctx;
494
struct reorder_lock *ll, *ln;
495
int *order;
496
int n, err;
497
498
order = get_random_order(stress->nlocks);
499
if (!order)
500
return;
501
502
for (n = 0; n < stress->nlocks; n++) {
503
ll = kmalloc(sizeof(*ll), GFP_KERNEL);
504
if (!ll)
505
goto out;
506
507
ll->lock = &stress->locks[order[n]];
508
list_add(&ll->link, &locks);
509
}
510
kfree(order);
511
order = NULL;
512
513
do {
514
ww_acquire_init(&ctx, &ww_class);
515
516
list_for_each_entry(ll, &locks, link) {
517
err = ww_mutex_lock(ll->lock, &ctx);
518
if (!err)
519
continue;
520
521
ln = ll;
522
list_for_each_entry_continue_reverse(ln, &locks, link)
523
ww_mutex_unlock(ln->lock);
524
525
if (err != -EDEADLK) {
526
pr_err_once("stress (%s) failed with %d\n",
527
__func__, err);
528
break;
529
}
530
531
ww_mutex_lock_slow(ll->lock, &ctx);
532
list_move(&ll->link, &locks); /* restarts iteration */
533
}
534
535
dummy_load(stress);
536
list_for_each_entry(ll, &locks, link)
537
ww_mutex_unlock(ll->lock);
538
539
ww_acquire_fini(&ctx);
540
} while (!time_after(jiffies, stress->timeout));
541
542
out:
543
list_for_each_entry_safe(ll, ln, &locks, link)
544
kfree(ll);
545
kfree(order);
546
}
547
548
static void stress_one_work(struct work_struct *work)
549
{
550
struct stress *stress = container_of(work, typeof(*stress), work);
551
const int nlocks = stress->nlocks;
552
struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
553
int err;
554
555
do {
556
err = ww_mutex_lock(lock, NULL);
557
if (!err) {
558
dummy_load(stress);
559
ww_mutex_unlock(lock);
560
} else {
561
pr_err_once("stress (%s) failed with %d\n",
562
__func__, err);
563
break;
564
}
565
} while (!time_after(jiffies, stress->timeout));
566
}
567
568
#define STRESS_INORDER BIT(0)
569
#define STRESS_REORDER BIT(1)
570
#define STRESS_ONE BIT(2)
571
#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
572
573
static int stress(int nlocks, int nthreads, unsigned int flags)
574
{
575
struct ww_mutex *locks;
576
struct stress *stress_array;
577
int n, count;
578
579
locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
580
if (!locks)
581
return -ENOMEM;
582
583
stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
584
GFP_KERNEL);
585
if (!stress_array) {
586
kfree(locks);
587
return -ENOMEM;
588
}
589
590
for (n = 0; n < nlocks; n++)
591
ww_mutex_init(&locks[n], &ww_class);
592
593
count = 0;
594
for (n = 0; nthreads; n++) {
595
struct stress *stress;
596
void (*fn)(struct work_struct *work);
597
598
fn = NULL;
599
switch (n & 3) {
600
case 0:
601
if (flags & STRESS_INORDER)
602
fn = stress_inorder_work;
603
break;
604
case 1:
605
if (flags & STRESS_REORDER)
606
fn = stress_reorder_work;
607
break;
608
case 2:
609
if (flags & STRESS_ONE)
610
fn = stress_one_work;
611
break;
612
}
613
614
if (!fn)
615
continue;
616
617
stress = &stress_array[count++];
618
619
INIT_WORK(&stress->work, fn);
620
stress->locks = locks;
621
stress->nlocks = nlocks;
622
stress->timeout = jiffies + 2*HZ;
623
624
queue_work(wq, &stress->work);
625
nthreads--;
626
}
627
628
flush_workqueue(wq);
629
630
for (n = 0; n < nlocks; n++)
631
ww_mutex_destroy(&locks[n]);
632
kfree(stress_array);
633
kfree(locks);
634
635
return 0;
636
}
637
638
static int __init test_ww_mutex_init(void)
639
{
640
int ncpus = num_online_cpus();
641
int ret, i;
642
643
printk(KERN_INFO "Beginning ww mutex selftests\n");
644
645
prandom_seed_state(&rng, get_random_u64());
646
647
wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
648
if (!wq)
649
return -ENOMEM;
650
651
ret = test_mutex();
652
if (ret)
653
return ret;
654
655
ret = test_aa(false);
656
if (ret)
657
return ret;
658
659
ret = test_aa(true);
660
if (ret)
661
return ret;
662
663
for (i = 0; i < 4; i++) {
664
ret = test_abba(i & 1, i & 2);
665
if (ret)
666
return ret;
667
}
668
669
ret = test_cycle(ncpus);
670
if (ret)
671
return ret;
672
673
ret = stress(16, 2*ncpus, STRESS_INORDER);
674
if (ret)
675
return ret;
676
677
ret = stress(16, 2*ncpus, STRESS_REORDER);
678
if (ret)
679
return ret;
680
681
ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
682
if (ret)
683
return ret;
684
685
printk(KERN_INFO "All ww mutex selftests passed\n");
686
return 0;
687
}
688
689
static void __exit test_ww_mutex_exit(void)
690
{
691
destroy_workqueue(wq);
692
}
693
694
module_init(test_ww_mutex_init);
695
module_exit(test_ww_mutex_exit);
696
697
MODULE_LICENSE("GPL");
698
MODULE_AUTHOR("Intel Corporation");
699
MODULE_DESCRIPTION("API test facility for ww_mutexes");
700
701