Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/bcachefs/btree_locking.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include "bcachefs.h"
4
#include "btree_cache.h"
5
#include "btree_locking.h"
6
#include "btree_types.h"
7
8
static struct lock_class_key bch2_btree_node_lock_key;
9
10
void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
11
enum six_lock_init_flags flags,
12
gfp_t gfp)
13
{
14
__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp);
15
lockdep_set_notrack_class(&b->lock);
16
}
17
18
/* Btree node locking: */
19
20
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
21
struct btree_path *skip,
22
struct btree_bkey_cached_common *b,
23
unsigned level)
24
{
25
struct btree_path *path;
26
struct six_lock_count ret;
27
unsigned i;
28
29
memset(&ret, 0, sizeof(ret));
30
31
if (IS_ERR_OR_NULL(b))
32
return ret;
33
34
trans_for_each_path(trans, path, i)
35
if (path != skip && &path->l[level].b->c == b) {
36
int t = btree_node_locked_type(path, level);
37
38
if (t != BTREE_NODE_UNLOCKED)
39
ret.n[t]++;
40
}
41
42
return ret;
43
}
44
45
/* unlock */
46
47
void bch2_btree_node_unlock_write(struct btree_trans *trans,
48
struct btree_path *path, struct btree *b)
49
{
50
bch2_btree_node_unlock_write_inlined(trans, path, b);
51
}
52
53
/* lock */
54
55
/*
56
* @trans wants to lock @b with type @type
57
*/
58
struct trans_waiting_for_lock {
59
struct btree_trans *trans;
60
struct btree_bkey_cached_common *node_want;
61
enum six_lock_type lock_want;
62
63
/* for iterating over held locks :*/
64
u8 path_idx;
65
u8 level;
66
u64 lock_start_time;
67
};
68
69
struct lock_graph {
70
struct trans_waiting_for_lock g[8];
71
unsigned nr;
72
};
73
74
static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
75
{
76
struct trans_waiting_for_lock *i;
77
78
prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
79
80
for (i = g->g; i < g->g + g->nr; i++) {
81
struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
82
if (!task)
83
continue;
84
85
bch2_btree_trans_to_text(out, i->trans);
86
bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
87
}
88
}
89
90
static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
91
{
92
struct trans_waiting_for_lock *i;
93
94
for (i = g->g; i != g->g + g->nr; i++) {
95
struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
96
if (i != g->g)
97
prt_str(out, "<- ");
98
prt_printf(out, "%u ", task ? task->pid : 0);
99
}
100
prt_newline(out);
101
}
102
103
static void lock_graph_up(struct lock_graph *g)
104
{
105
closure_put(&g->g[--g->nr].trans->ref);
106
}
107
108
static noinline void lock_graph_pop_all(struct lock_graph *g)
109
{
110
while (g->nr)
111
lock_graph_up(g);
112
}
113
114
static noinline void lock_graph_pop_from(struct lock_graph *g, struct trans_waiting_for_lock *i)
115
{
116
while (g->g + g->nr > i)
117
lock_graph_up(g);
118
}
119
120
static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
121
{
122
g->g[g->nr++] = (struct trans_waiting_for_lock) {
123
.trans = trans,
124
.node_want = trans->locking,
125
.lock_want = trans->locking_wait.lock_want,
126
};
127
}
128
129
static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
130
{
131
closure_get(&trans->ref);
132
__lock_graph_down(g, trans);
133
}
134
135
static bool lock_graph_remove_non_waiters(struct lock_graph *g,
136
struct trans_waiting_for_lock *from)
137
{
138
struct trans_waiting_for_lock *i;
139
140
if (from->trans->locking != from->node_want) {
141
lock_graph_pop_from(g, from);
142
return true;
143
}
144
145
for (i = from + 1; i < g->g + g->nr; i++)
146
if (i->trans->locking != i->node_want ||
147
i->trans->locking_wait.start_time != i[-1].lock_start_time) {
148
lock_graph_pop_from(g, i);
149
return true;
150
}
151
152
return false;
153
}
154
155
static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
156
{
157
struct bch_fs *c = trans->c;
158
159
count_event(c, trans_restart_would_deadlock);
160
161
if (trace_trans_restart_would_deadlock_enabled()) {
162
struct printbuf buf = PRINTBUF;
163
164
buf.atomic++;
165
print_cycle(&buf, g);
166
167
trace_trans_restart_would_deadlock(trans, buf.buf);
168
printbuf_exit(&buf);
169
}
170
}
171
172
static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
173
{
174
if (i == g->g) {
175
trace_would_deadlock(g, i->trans);
176
return btree_trans_restart_foreign_task(i->trans,
177
BCH_ERR_transaction_restart_would_deadlock,
178
_THIS_IP_);
179
} else {
180
i->trans->lock_must_abort = true;
181
wake_up_process(i->trans->locking_wait.task);
182
return 0;
183
}
184
}
185
186
static int btree_trans_abort_preference(struct btree_trans *trans)
187
{
188
if (trans->lock_may_not_fail)
189
return 0;
190
if (trans->locking_wait.lock_want == SIX_LOCK_write)
191
return 1;
192
if (!trans->in_traverse_all)
193
return 2;
194
return 3;
195
}
196
197
static noinline __noreturn void break_cycle_fail(struct lock_graph *g)
198
{
199
struct printbuf buf = PRINTBUF;
200
buf.atomic++;
201
202
prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
203
204
for (struct trans_waiting_for_lock *i = g->g; i < g->g + g->nr; i++) {
205
struct btree_trans *trans = i->trans;
206
207
bch2_btree_trans_to_text(&buf, trans);
208
209
prt_printf(&buf, "backtrace:\n");
210
printbuf_indent_add(&buf, 2);
211
bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
212
printbuf_indent_sub(&buf, 2);
213
prt_newline(&buf);
214
}
215
216
bch2_print_str(g->g->trans->c, KERN_ERR, buf.buf);
217
printbuf_exit(&buf);
218
BUG();
219
}
220
221
static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle,
222
struct trans_waiting_for_lock *from)
223
{
224
struct trans_waiting_for_lock *i, *abort = NULL;
225
unsigned best = 0, pref;
226
int ret;
227
228
if (lock_graph_remove_non_waiters(g, from))
229
return 0;
230
231
/* Only checking, for debugfs: */
232
if (cycle) {
233
print_cycle(cycle, g);
234
ret = -1;
235
goto out;
236
}
237
238
for (i = from; i < g->g + g->nr; i++) {
239
pref = btree_trans_abort_preference(i->trans);
240
if (pref > best) {
241
abort = i;
242
best = pref;
243
}
244
}
245
246
if (unlikely(!best))
247
break_cycle_fail(g);
248
249
ret = abort_lock(g, abort);
250
out:
251
if (ret)
252
lock_graph_pop_all(g);
253
else
254
lock_graph_pop_from(g, abort);
255
return ret;
256
}
257
258
static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
259
struct printbuf *cycle)
260
{
261
struct btree_trans *orig_trans = g->g->trans;
262
263
for (struct trans_waiting_for_lock *i = g->g; i < g->g + g->nr; i++)
264
if (i->trans == trans) {
265
closure_put(&trans->ref);
266
return break_cycle(g, cycle, i);
267
}
268
269
if (unlikely(g->nr == ARRAY_SIZE(g->g))) {
270
closure_put(&trans->ref);
271
272
if (orig_trans->lock_may_not_fail)
273
return 0;
274
275
lock_graph_pop_all(g);
276
277
if (cycle)
278
return 0;
279
280
trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
281
return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
282
}
283
284
__lock_graph_down(g, trans);
285
return 0;
286
}
287
288
static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
289
{
290
return t1 + t2 > 1;
291
}
292
293
int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
294
{
295
struct lock_graph g;
296
struct trans_waiting_for_lock *top;
297
struct btree_bkey_cached_common *b;
298
btree_path_idx_t path_idx;
299
int ret = 0;
300
301
g.nr = 0;
302
303
if (trans->lock_must_abort && !trans->lock_may_not_fail) {
304
if (cycle)
305
return -1;
306
307
trace_would_deadlock(&g, trans);
308
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
309
}
310
311
lock_graph_down(&g, trans);
312
313
/* trans->paths is rcu protected vs. freeing */
314
guard(rcu)();
315
if (cycle)
316
cycle->atomic++;
317
next:
318
if (!g.nr)
319
goto out;
320
321
top = &g.g[g.nr - 1];
322
323
struct btree_path *paths = rcu_dereference(top->trans->paths);
324
if (!paths)
325
goto up;
326
327
unsigned long *paths_allocated = trans_paths_allocated(paths);
328
329
trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
330
path_idx, top->path_idx) {
331
struct btree_path *path = paths + path_idx;
332
if (!path->nodes_locked)
333
continue;
334
335
if (path_idx != top->path_idx) {
336
top->path_idx = path_idx;
337
top->level = 0;
338
top->lock_start_time = 0;
339
}
340
341
for (;
342
top->level < BTREE_MAX_DEPTH;
343
top->level++, top->lock_start_time = 0) {
344
int lock_held = btree_node_locked_type(path, top->level);
345
346
if (lock_held == BTREE_NODE_UNLOCKED)
347
continue;
348
349
b = &READ_ONCE(path->l[top->level].b)->c;
350
351
if (IS_ERR_OR_NULL(b)) {
352
/*
353
* If we get here, it means we raced with the
354
* other thread updating its btree_path
355
* structures - which means it can't be blocked
356
* waiting on a lock:
357
*/
358
if (!lock_graph_remove_non_waiters(&g, g.g)) {
359
/*
360
* If lock_graph_remove_non_waiters()
361
* didn't do anything, it must be
362
* because we're being called by debugfs
363
* checking for lock cycles, which
364
* invokes us on btree_transactions that
365
* aren't actually waiting on anything.
366
* Just bail out:
367
*/
368
lock_graph_pop_all(&g);
369
}
370
371
goto next;
372
}
373
374
if (list_empty_careful(&b->lock.wait_list))
375
continue;
376
377
raw_spin_lock(&b->lock.wait_lock);
378
list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
379
BUG_ON(b != trans->locking);
380
381
if (top->lock_start_time &&
382
time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
383
continue;
384
385
top->lock_start_time = trans->locking_wait.start_time;
386
387
/* Don't check for self deadlock: */
388
if (trans == top->trans ||
389
!lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
390
continue;
391
392
closure_get(&trans->ref);
393
raw_spin_unlock(&b->lock.wait_lock);
394
395
ret = lock_graph_descend(&g, trans, cycle);
396
if (ret)
397
goto out;
398
goto next;
399
400
}
401
raw_spin_unlock(&b->lock.wait_lock);
402
}
403
}
404
up:
405
if (g.nr > 1 && cycle)
406
print_chain(cycle, &g);
407
lock_graph_up(&g);
408
goto next;
409
out:
410
if (cycle)
411
--cycle->atomic;
412
return ret;
413
}
414
415
int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
416
{
417
struct btree_trans *trans = p;
418
419
return bch2_check_for_deadlock(trans, NULL);
420
}
421
422
int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
423
struct btree_bkey_cached_common *b,
424
bool lock_may_not_fail)
425
{
426
int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
427
int ret;
428
429
/*
430
* Must drop our read locks before calling six_lock_write() -
431
* six_unlock() won't do wakeups until the reader count
432
* goes to 0, and it's safe because we have the node intent
433
* locked:
434
*/
435
six_lock_readers_add(&b->lock, -readers);
436
ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
437
lock_may_not_fail, _RET_IP_);
438
six_lock_readers_add(&b->lock, readers);
439
440
if (ret)
441
mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
442
443
return ret;
444
}
445
446
void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
447
struct btree_path *path,
448
struct btree_bkey_cached_common *b)
449
{
450
int ret = __btree_node_lock_write(trans, path, b, true);
451
BUG_ON(ret);
452
}
453
454
/* relock */
455
456
static int btree_path_get_locks(struct btree_trans *trans,
457
struct btree_path *path,
458
bool upgrade,
459
struct get_locks_fail *f,
460
int restart_err)
461
{
462
unsigned l = path->level;
463
464
do {
465
if (!btree_path_node(path, l))
466
break;
467
468
if (!(upgrade
469
? bch2_btree_node_upgrade(trans, path, l)
470
: bch2_btree_node_relock(trans, path, l)))
471
goto err;
472
473
l++;
474
} while (l < path->locks_want);
475
476
if (path->uptodate == BTREE_ITER_NEED_RELOCK)
477
path->uptodate = BTREE_ITER_UPTODATE;
478
479
return path->uptodate < BTREE_ITER_NEED_RELOCK ? 0 : -1;
480
err:
481
if (f) {
482
f->l = l;
483
f->b = path->l[l].b;
484
}
485
486
/*
487
* Do transaction restart before unlocking, so we don't pop
488
* should_be_locked asserts
489
*/
490
if (restart_err) {
491
btree_trans_restart(trans, restart_err);
492
} else if (path->should_be_locked && !trans->restarted) {
493
if (upgrade)
494
path->locks_want = l;
495
return -1;
496
}
497
498
__bch2_btree_path_unlock(trans, path);
499
btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
500
501
/*
502
* When we fail to get a lock, we have to ensure that any child nodes
503
* can't be relocked so bch2_btree_path_traverse has to walk back up to
504
* the node that we failed to relock:
505
*/
506
do {
507
path->l[l].b = upgrade
508
? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
509
: ERR_PTR(-BCH_ERR_no_btree_node_relock);
510
} while (l--);
511
512
return -restart_err ?: -1;
513
}
514
515
bool __bch2_btree_node_relock(struct btree_trans *trans,
516
struct btree_path *path, unsigned level,
517
bool trace)
518
{
519
struct btree *b = btree_path_node(path, level);
520
int want = __btree_lock_want(path, level);
521
522
if (race_fault())
523
goto fail;
524
525
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
526
(btree_node_lock_seq_matches(path, b, level) &&
527
btree_node_lock_increment(trans, &b->c, level, want))) {
528
mark_btree_node_locked(trans, path, level, want);
529
return true;
530
}
531
fail:
532
if (trace && !trans->notrace_relock_fail)
533
trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
534
return false;
535
}
536
537
/* upgrade */
538
539
bool bch2_btree_node_upgrade(struct btree_trans *trans,
540
struct btree_path *path, unsigned level)
541
{
542
struct btree *b = path->l[level].b;
543
544
if (!is_btree_node(path, level))
545
return false;
546
547
switch (btree_lock_want(path, level)) {
548
case BTREE_NODE_UNLOCKED:
549
BUG_ON(btree_node_locked(path, level));
550
return true;
551
case BTREE_NODE_READ_LOCKED:
552
BUG_ON(btree_node_intent_locked(path, level));
553
return bch2_btree_node_relock(trans, path, level);
554
case BTREE_NODE_INTENT_LOCKED:
555
break;
556
case BTREE_NODE_WRITE_LOCKED:
557
BUG();
558
}
559
560
if (btree_node_intent_locked(path, level))
561
return true;
562
563
if (race_fault())
564
return false;
565
566
if (btree_node_locked(path, level)
567
? six_lock_tryupgrade(&b->c.lock)
568
: six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
569
goto success;
570
571
if (btree_node_lock_seq_matches(path, b, level) &&
572
btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
573
btree_node_unlock(trans, path, level);
574
goto success;
575
}
576
577
trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
578
return false;
579
success:
580
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
581
return true;
582
}
583
584
/* Btree path locking: */
585
586
/*
587
* Only for btree_cache.c - only relocks intent locks
588
*/
589
int bch2_btree_path_relock_intent(struct btree_trans *trans,
590
struct btree_path *path)
591
{
592
unsigned l;
593
594
for (l = path->level;
595
l < path->locks_want && btree_path_node(path, l);
596
l++) {
597
if (!bch2_btree_node_relock(trans, path, l)) {
598
__bch2_btree_path_unlock(trans, path);
599
btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
600
trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
601
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
602
}
603
}
604
605
return 0;
606
}
607
608
__flatten
609
bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
610
{
611
bool ret = !btree_path_get_locks(trans, path, false, NULL, 0);
612
bch2_trans_verify_locks(trans);
613
return ret;
614
}
615
616
int __bch2_btree_path_relock(struct btree_trans *trans,
617
struct btree_path *path, unsigned long trace_ip)
618
{
619
if (!bch2_btree_path_relock_norestart(trans, path)) {
620
trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
621
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
622
}
623
624
return 0;
625
}
626
627
bool __bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
628
struct btree_path *path,
629
unsigned new_locks_want)
630
{
631
path->locks_want = new_locks_want;
632
633
/*
634
* If we need it locked, we can't touch it. Otherwise, we can return
635
* success - bch2_path_get() will use this path, and it'll just be
636
* retraversed:
637
*/
638
bool ret = !btree_path_get_locks(trans, path, true, NULL, 0) ||
639
!path->should_be_locked;
640
641
bch2_btree_path_verify_locks(trans, path);
642
return ret;
643
}
644
645
int __bch2_btree_path_upgrade(struct btree_trans *trans,
646
struct btree_path *path,
647
unsigned new_locks_want)
648
{
649
unsigned old_locks = path->nodes_locked;
650
unsigned old_locks_want = path->locks_want;
651
652
path->locks_want = max_t(unsigned, path->locks_want, new_locks_want);
653
654
struct get_locks_fail f = {};
655
int ret = btree_path_get_locks(trans, path, true, &f,
656
BCH_ERR_transaction_restart_upgrade);
657
if (!ret)
658
goto out;
659
660
/*
661
* XXX: this is ugly - we'd prefer to not be mucking with other
662
* iterators in the btree_trans here.
663
*
664
* On failure to upgrade the iterator, setting iter->locks_want and
665
* calling get_locks() is sufficient to make bch2_btree_path_traverse()
666
* get the locks we want on transaction restart.
667
*
668
* But if this iterator was a clone, on transaction restart what we did
669
* to this iterator isn't going to be preserved.
670
*
671
* Possibly we could add an iterator field for the parent iterator when
672
* an iterator is a copy - for now, we'll just upgrade any other
673
* iterators with the same btree id.
674
*
675
* The code below used to be needed to ensure ancestor nodes get locked
676
* before interior nodes - now that's handled by
677
* bch2_btree_path_traverse_all().
678
*/
679
if (!path->cached && !trans->in_traverse_all) {
680
struct btree_path *linked;
681
unsigned i;
682
683
trans_for_each_path(trans, linked, i)
684
if (linked != path &&
685
linked->cached == path->cached &&
686
linked->btree_id == path->btree_id &&
687
linked->locks_want < new_locks_want) {
688
linked->locks_want = new_locks_want;
689
btree_path_get_locks(trans, linked, true, NULL, 0);
690
}
691
}
692
693
count_event(trans->c, trans_restart_upgrade);
694
if (trace_trans_restart_upgrade_enabled()) {
695
struct printbuf buf = PRINTBUF;
696
697
prt_printf(&buf, "%s %pS\n", trans->fn, (void *) _RET_IP_);
698
prt_printf(&buf, "btree %s pos\n", bch2_btree_id_str(path->btree_id));
699
bch2_bpos_to_text(&buf, path->pos);
700
prt_printf(&buf, "locks want %u -> %u level %u\n",
701
old_locks_want, new_locks_want, f.l);
702
prt_printf(&buf, "nodes_locked %x -> %x\n",
703
old_locks, path->nodes_locked);
704
prt_printf(&buf, "node %s ", IS_ERR(f.b) ? bch2_err_str(PTR_ERR(f.b)) :
705
!f.b ? "(null)" : "(node)");
706
prt_printf(&buf, "path seq %u node seq %u\n",
707
IS_ERR_OR_NULL(f.b) ? 0 : f.b->c.lock.seq,
708
path->l[f.l].lock_seq);
709
710
trace_trans_restart_upgrade(trans->c, buf.buf);
711
printbuf_exit(&buf);
712
}
713
out:
714
bch2_trans_verify_locks(trans);
715
return ret;
716
}
717
718
void __bch2_btree_path_downgrade(struct btree_trans *trans,
719
struct btree_path *path,
720
unsigned new_locks_want)
721
{
722
unsigned l, old_locks_want = path->locks_want;
723
724
if (trans->restarted)
725
return;
726
727
EBUG_ON(path->locks_want < new_locks_want);
728
729
path->locks_want = new_locks_want;
730
731
while (path->nodes_locked &&
732
(l = btree_path_highest_level_locked(path)) >= path->locks_want) {
733
if (l > path->level) {
734
btree_node_unlock(trans, path, l);
735
} else {
736
if (btree_node_intent_locked(path, l)) {
737
six_lock_downgrade(&path->l[l].b->c.lock);
738
mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
739
}
740
break;
741
}
742
}
743
744
bch2_btree_path_verify_locks(trans, path);
745
746
trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
747
}
748
749
/* Btree transaction locking: */
750
751
void bch2_trans_downgrade(struct btree_trans *trans)
752
{
753
struct btree_path *path;
754
unsigned i;
755
756
if (trans->restarted)
757
return;
758
759
trans_for_each_path(trans, path, i)
760
if (path->ref)
761
bch2_btree_path_downgrade(trans, path);
762
}
763
764
static inline void __bch2_trans_unlock(struct btree_trans *trans)
765
{
766
struct btree_path *path;
767
unsigned i;
768
769
trans_for_each_path(trans, path, i)
770
__bch2_btree_path_unlock(trans, path);
771
}
772
773
static noinline __cold void bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
774
struct get_locks_fail *f, bool trace, ulong ip)
775
{
776
if (!trace)
777
goto out;
778
779
if (trace_trans_restart_relock_enabled()) {
780
struct printbuf buf = PRINTBUF;
781
782
bch2_bpos_to_text(&buf, path->pos);
783
prt_printf(&buf, " %s l=%u seq=%u node seq=",
784
bch2_btree_id_str(path->btree_id),
785
f->l, path->l[f->l].lock_seq);
786
if (IS_ERR_OR_NULL(f->b)) {
787
prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
788
} else {
789
prt_printf(&buf, "%u", f->b->c.lock.seq);
790
791
struct six_lock_count c =
792
bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
793
prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
794
795
c = six_lock_counts(&f->b->c.lock);
796
prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
797
}
798
799
trace_trans_restart_relock(trans, ip, buf.buf);
800
printbuf_exit(&buf);
801
}
802
803
count_event(trans->c, trans_restart_relock);
804
out:
805
__bch2_trans_unlock(trans);
806
bch2_trans_verify_locks(trans);
807
}
808
809
static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace, ulong ip)
810
{
811
bch2_trans_verify_locks(trans);
812
813
if (unlikely(trans->restarted))
814
return -((int) trans->restarted);
815
if (unlikely(trans->locked))
816
goto out;
817
818
struct btree_path *path;
819
unsigned i;
820
821
trans_for_each_path(trans, path, i) {
822
struct get_locks_fail f;
823
int ret;
824
825
if (path->should_be_locked &&
826
(ret = btree_path_get_locks(trans, path, false, &f,
827
BCH_ERR_transaction_restart_relock))) {
828
bch2_trans_relock_fail(trans, path, &f, trace, ip);
829
return ret;
830
}
831
}
832
833
trans_set_locked(trans, true);
834
out:
835
bch2_trans_verify_locks(trans);
836
return 0;
837
}
838
839
int bch2_trans_relock(struct btree_trans *trans)
840
{
841
return __bch2_trans_relock(trans, true, _RET_IP_);
842
}
843
844
int bch2_trans_relock_notrace(struct btree_trans *trans)
845
{
846
return __bch2_trans_relock(trans, false, _RET_IP_);
847
}
848
849
void bch2_trans_unlock(struct btree_trans *trans)
850
{
851
trans_set_unlocked(trans);
852
853
__bch2_trans_unlock(trans);
854
}
855
856
void bch2_trans_unlock_long(struct btree_trans *trans)
857
{
858
bch2_trans_unlock(trans);
859
bch2_trans_srcu_unlock(trans);
860
}
861
862
void bch2_trans_unlock_write(struct btree_trans *trans)
863
{
864
struct btree_path *path;
865
unsigned i;
866
867
trans_for_each_path(trans, path, i)
868
for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++)
869
if (btree_node_write_locked(path, l))
870
bch2_btree_node_unlock_write(trans, path, path->l[l].b);
871
}
872
873
int __bch2_trans_mutex_lock(struct btree_trans *trans,
874
struct mutex *lock)
875
{
876
int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
877
878
if (ret)
879
mutex_unlock(lock);
880
return ret;
881
}
882
883
/* Debug */
884
885
void __bch2_btree_path_verify_locks(struct btree_trans *trans, struct btree_path *path)
886
{
887
if (!path->nodes_locked && btree_path_node(path, path->level)) {
888
/*
889
* A path may be uptodate and yet have nothing locked if and only if
890
* there is no node at path->level, which generally means we were
891
* iterating over all nodes and got to the end of the btree
892
*/
893
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE);
894
BUG_ON(path->should_be_locked && trans->locked && !trans->restarted);
895
}
896
897
if (!path->nodes_locked)
898
return;
899
900
for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
901
int want = btree_lock_want(path, l);
902
int have = btree_node_locked_type_nowrite(path, l);
903
904
BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
905
906
BUG_ON(is_btree_node(path, l) && want != have);
907
908
BUG_ON(btree_node_locked(path, l) &&
909
path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock));
910
}
911
}
912
913
static bool bch2_trans_locked(struct btree_trans *trans)
914
{
915
struct btree_path *path;
916
unsigned i;
917
918
trans_for_each_path(trans, path, i)
919
if (path->nodes_locked)
920
return true;
921
return false;
922
}
923
924
void __bch2_trans_verify_locks(struct btree_trans *trans)
925
{
926
if (!trans->locked) {
927
BUG_ON(bch2_trans_locked(trans));
928
return;
929
}
930
931
struct btree_path *path;
932
unsigned i;
933
934
trans_for_each_path(trans, path, i)
935
__bch2_btree_path_verify_locks(trans, path);
936
}
937
938