Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/bcachefs/btree_journal_iter.c
26288 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include "bcachefs.h"
4
#include "bkey_buf.h"
5
#include "bset.h"
6
#include "btree_cache.h"
7
#include "btree_journal_iter.h"
8
#include "journal_io.h"
9
10
#include <linux/sort.h>
11
12
/*
13
* For managing keys we read from the journal: until journal replay works normal
14
* btree lookups need to be able to find and return keys from the journal where
15
* they overwrite what's in the btree, so we have a special iterator and
16
* operations for the regular btree iter code to use:
17
*/
18
19
static inline size_t pos_to_idx(struct journal_keys *keys, size_t pos)
20
{
21
size_t gap_size = keys->size - keys->nr;
22
23
BUG_ON(pos >= keys->gap && pos < keys->gap + gap_size);
24
25
if (pos >= keys->gap)
26
pos -= gap_size;
27
return pos;
28
}
29
30
static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
31
{
32
size_t gap_size = keys->size - keys->nr;
33
34
if (idx >= keys->gap)
35
idx += gap_size;
36
return idx;
37
}
38
39
static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
40
{
41
return keys->data + idx_to_pos(keys, idx);
42
}
43
44
static size_t __bch2_journal_key_search(struct journal_keys *keys,
45
enum btree_id id, unsigned level,
46
struct bpos pos)
47
{
48
size_t l = 0, r = keys->nr, m;
49
50
while (l < r) {
51
m = l + ((r - l) >> 1);
52
if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
53
l = m + 1;
54
else
55
r = m;
56
}
57
58
BUG_ON(l < keys->nr &&
59
__journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
60
61
BUG_ON(l &&
62
__journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
63
64
return l;
65
}
66
67
static size_t bch2_journal_key_search(struct journal_keys *keys,
68
enum btree_id id, unsigned level,
69
struct bpos pos)
70
{
71
return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
72
}
73
74
/* Returns first non-overwritten key >= search key: */
75
struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *c, enum btree_id btree_id,
76
unsigned level, struct bpos pos,
77
struct bpos end_pos, size_t *idx)
78
{
79
struct journal_keys *keys = &c->journal_keys;
80
unsigned iters = 0;
81
struct journal_key *k;
82
83
BUG_ON(*idx > keys->nr);
84
search:
85
if (!*idx)
86
*idx = __bch2_journal_key_search(keys, btree_id, level, pos);
87
88
while (*idx &&
89
__journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) {
90
--(*idx);
91
iters++;
92
if (iters == 10) {
93
*idx = 0;
94
goto search;
95
}
96
}
97
98
struct bkey_i *ret = NULL;
99
rcu_read_lock(); /* for overwritten_ranges */
100
101
while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
102
if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
103
break;
104
105
if (k->overwritten) {
106
if (k->overwritten_range)
107
*idx = rcu_dereference(k->overwritten_range)->end;
108
else
109
*idx += 1;
110
continue;
111
}
112
113
if (__journal_key_cmp(btree_id, level, pos, k) <= 0) {
114
ret = k->k;
115
break;
116
}
117
118
(*idx)++;
119
iters++;
120
if (iters == 10) {
121
*idx = 0;
122
rcu_read_unlock();
123
goto search;
124
}
125
}
126
127
rcu_read_unlock();
128
return ret;
129
}
130
131
struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *c, enum btree_id btree_id,
132
unsigned level, struct bpos pos,
133
struct bpos end_pos, size_t *idx)
134
{
135
struct journal_keys *keys = &c->journal_keys;
136
unsigned iters = 0;
137
struct journal_key *k;
138
139
BUG_ON(*idx > keys->nr);
140
141
if (!keys->nr)
142
return NULL;
143
search:
144
if (!*idx)
145
*idx = __bch2_journal_key_search(keys, btree_id, level, pos);
146
147
while (*idx < keys->nr &&
148
__journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx)) >= 0) {
149
(*idx)++;
150
iters++;
151
if (iters == 10) {
152
*idx = 0;
153
goto search;
154
}
155
}
156
157
if (*idx == keys->nr)
158
--(*idx);
159
160
struct bkey_i *ret = NULL;
161
rcu_read_lock(); /* for overwritten_ranges */
162
163
while (true) {
164
k = idx_to_key(keys, *idx);
165
if (__journal_key_cmp(btree_id, level, end_pos, k) > 0)
166
break;
167
168
if (k->overwritten) {
169
if (k->overwritten_range)
170
*idx = rcu_dereference(k->overwritten_range)->start;
171
if (!*idx)
172
break;
173
--(*idx);
174
continue;
175
}
176
177
if (__journal_key_cmp(btree_id, level, pos, k) >= 0) {
178
ret = k->k;
179
break;
180
}
181
182
if (!*idx)
183
break;
184
--(*idx);
185
iters++;
186
if (iters == 10) {
187
*idx = 0;
188
goto search;
189
}
190
}
191
192
rcu_read_unlock();
193
return ret;
194
}
195
196
struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
197
unsigned level, struct bpos pos)
198
{
199
size_t idx = 0;
200
201
return bch2_journal_keys_peek_max(c, btree_id, level, pos, pos, &idx);
202
}
203
204
static void journal_iter_verify(struct journal_iter *iter)
205
{
206
#ifdef CONFIG_BCACHEFS_DEBUG
207
struct journal_keys *keys = iter->keys;
208
size_t gap_size = keys->size - keys->nr;
209
210
BUG_ON(iter->idx >= keys->gap &&
211
iter->idx < keys->gap + gap_size);
212
213
if (iter->idx < keys->size) {
214
struct journal_key *k = keys->data + iter->idx;
215
216
int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k);
217
BUG_ON(cmp > 0);
218
}
219
#endif
220
}
221
222
static void journal_iters_fix(struct bch_fs *c)
223
{
224
struct journal_keys *keys = &c->journal_keys;
225
/* The key we just inserted is immediately before the gap: */
226
size_t gap_end = keys->gap + (keys->size - keys->nr);
227
struct journal_key *new_key = &keys->data[keys->gap - 1];
228
struct journal_iter *iter;
229
230
/*
231
* If an iterator points one after the key we just inserted, decrement
232
* the iterator so it points at the key we just inserted - if the
233
* decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
234
* handle that:
235
*/
236
list_for_each_entry(iter, &c->journal_iters, list) {
237
journal_iter_verify(iter);
238
if (iter->idx == gap_end &&
239
new_key->btree_id == iter->btree_id &&
240
new_key->level == iter->level)
241
iter->idx = keys->gap - 1;
242
journal_iter_verify(iter);
243
}
244
}
245
246
static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
247
{
248
struct journal_keys *keys = &c->journal_keys;
249
struct journal_iter *iter;
250
size_t gap_size = keys->size - keys->nr;
251
252
list_for_each_entry(iter, &c->journal_iters, list) {
253
if (iter->idx > old_gap)
254
iter->idx -= gap_size;
255
if (iter->idx >= new_gap)
256
iter->idx += gap_size;
257
}
258
}
259
260
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
261
unsigned level, struct bkey_i *k)
262
{
263
struct journal_key n = {
264
.btree_id = id,
265
.level = level,
266
.k = k,
267
.allocated = true,
268
/*
269
* Ensure these keys are done last by journal replay, to unblock
270
* journal reclaim:
271
*/
272
.journal_seq = U64_MAX,
273
};
274
struct journal_keys *keys = &c->journal_keys;
275
size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
276
277
BUG_ON(test_bit(BCH_FS_rw, &c->flags));
278
279
if (idx < keys->size &&
280
journal_key_cmp(&n, &keys->data[idx]) == 0) {
281
if (keys->data[idx].allocated)
282
kfree(keys->data[idx].k);
283
keys->data[idx] = n;
284
return 0;
285
}
286
287
if (idx > keys->gap)
288
idx -= keys->size - keys->nr;
289
290
size_t old_gap = keys->gap;
291
292
if (keys->nr == keys->size) {
293
journal_iters_move_gap(c, old_gap, keys->size);
294
old_gap = keys->size;
295
296
struct journal_keys new_keys = {
297
.nr = keys->nr,
298
.size = max_t(size_t, keys->size, 8) * 2,
299
};
300
301
new_keys.data = bch2_kvmalloc(new_keys.size * sizeof(new_keys.data[0]), GFP_KERNEL);
302
if (!new_keys.data) {
303
bch_err(c, "%s: error allocating new key array (size %zu)",
304
__func__, new_keys.size);
305
return bch_err_throw(c, ENOMEM_journal_key_insert);
306
}
307
308
/* Since @keys was full, there was no gap: */
309
memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr);
310
kvfree(keys->data);
311
keys->data = new_keys.data;
312
keys->nr = new_keys.nr;
313
keys->size = new_keys.size;
314
315
/* And now the gap is at the end: */
316
keys->gap = keys->nr;
317
}
318
319
journal_iters_move_gap(c, old_gap, idx);
320
321
move_gap(keys, idx);
322
323
keys->nr++;
324
keys->data[keys->gap++] = n;
325
326
journal_iters_fix(c);
327
328
return 0;
329
}
330
331
/*
332
* Can only be used from the recovery thread while we're still RO - can't be
333
* used once we've got RW, as journal_keys is at that point used by multiple
334
* threads:
335
*/
336
int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
337
unsigned level, struct bkey_i *k)
338
{
339
struct bkey_i *n;
340
int ret;
341
342
n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
343
if (!n)
344
return bch_err_throw(c, ENOMEM_journal_key_insert);
345
346
bkey_copy(n, k);
347
ret = bch2_journal_key_insert_take(c, id, level, n);
348
if (ret)
349
kfree(n);
350
return ret;
351
}
352
353
int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
354
unsigned level, struct bpos pos)
355
{
356
struct bkey_i whiteout;
357
358
bkey_init(&whiteout.k);
359
whiteout.k.p = pos;
360
361
return bch2_journal_key_insert(c, id, level, &whiteout);
362
}
363
364
bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
365
unsigned level, struct bpos pos)
366
{
367
struct journal_keys *keys = &trans->c->journal_keys;
368
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
369
370
if (!trans->journal_replay_not_finished)
371
return false;
372
373
return (idx < keys->size &&
374
keys->data[idx].btree_id == btree &&
375
keys->data[idx].level == level &&
376
bpos_eq(keys->data[idx].k->k.p, pos) &&
377
bkey_deleted(&keys->data[idx].k->k));
378
}
379
380
static void __bch2_journal_key_overwritten(struct journal_keys *keys, size_t pos)
381
{
382
struct journal_key *k = keys->data + pos;
383
size_t idx = pos_to_idx(keys, pos);
384
385
k->overwritten = true;
386
387
struct journal_key *prev = idx > 0 ? keys->data + idx_to_pos(keys, idx - 1) : NULL;
388
struct journal_key *next = idx + 1 < keys->nr ? keys->data + idx_to_pos(keys, idx + 1) : NULL;
389
390
bool prev_overwritten = prev && prev->overwritten;
391
bool next_overwritten = next && next->overwritten;
392
393
struct journal_key_range_overwritten *prev_range =
394
prev_overwritten ? prev->overwritten_range : NULL;
395
struct journal_key_range_overwritten *next_range =
396
next_overwritten ? next->overwritten_range : NULL;
397
398
BUG_ON(prev_range && prev_range->end != idx);
399
BUG_ON(next_range && next_range->start != idx + 1);
400
401
if (prev_range && next_range) {
402
prev_range->end = next_range->end;
403
404
keys->data[pos].overwritten_range = prev_range;
405
for (size_t i = next_range->start; i < next_range->end; i++) {
406
struct journal_key *ip = keys->data + idx_to_pos(keys, i);
407
BUG_ON(ip->overwritten_range != next_range);
408
ip->overwritten_range = prev_range;
409
}
410
411
kfree_rcu_mightsleep(next_range);
412
} else if (prev_range) {
413
prev_range->end++;
414
k->overwritten_range = prev_range;
415
if (next_overwritten) {
416
prev_range->end++;
417
next->overwritten_range = prev_range;
418
}
419
} else if (next_range) {
420
next_range->start--;
421
k->overwritten_range = next_range;
422
if (prev_overwritten) {
423
next_range->start--;
424
prev->overwritten_range = next_range;
425
}
426
} else if (prev_overwritten || next_overwritten) {
427
struct journal_key_range_overwritten *r = kmalloc(sizeof(*r), GFP_KERNEL);
428
if (!r)
429
return;
430
431
r->start = idx - (size_t) prev_overwritten;
432
r->end = idx + 1 + (size_t) next_overwritten;
433
434
rcu_assign_pointer(k->overwritten_range, r);
435
if (prev_overwritten)
436
prev->overwritten_range = r;
437
if (next_overwritten)
438
next->overwritten_range = r;
439
}
440
}
441
442
void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
443
unsigned level, struct bpos pos)
444
{
445
struct journal_keys *keys = &c->journal_keys;
446
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
447
448
if (idx < keys->size &&
449
keys->data[idx].btree_id == btree &&
450
keys->data[idx].level == level &&
451
bpos_eq(keys->data[idx].k->k.p, pos) &&
452
!keys->data[idx].overwritten) {
453
mutex_lock(&keys->overwrite_lock);
454
__bch2_journal_key_overwritten(keys, idx);
455
mutex_unlock(&keys->overwrite_lock);
456
}
457
}
458
459
static void bch2_journal_iter_advance(struct journal_iter *iter)
460
{
461
if (iter->idx < iter->keys->size) {
462
iter->idx++;
463
if (iter->idx == iter->keys->gap)
464
iter->idx += iter->keys->size - iter->keys->nr;
465
}
466
}
467
468
static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
469
{
470
journal_iter_verify(iter);
471
472
guard(rcu)();
473
while (iter->idx < iter->keys->size) {
474
struct journal_key *k = iter->keys->data + iter->idx;
475
476
int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k);
477
if (cmp < 0)
478
break;
479
BUG_ON(cmp);
480
481
if (!k->overwritten)
482
return bkey_i_to_s_c(k->k);
483
484
if (k->overwritten_range)
485
iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end);
486
else
487
bch2_journal_iter_advance(iter);
488
}
489
490
return bkey_s_c_null;
491
}
492
493
static void bch2_journal_iter_exit(struct journal_iter *iter)
494
{
495
list_del(&iter->list);
496
}
497
498
static void bch2_journal_iter_init(struct bch_fs *c,
499
struct journal_iter *iter,
500
enum btree_id id, unsigned level,
501
struct bpos pos)
502
{
503
iter->btree_id = id;
504
iter->level = level;
505
iter->keys = &c->journal_keys;
506
iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
507
508
journal_iter_verify(iter);
509
}
510
511
static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
512
{
513
return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
514
iter->b, &iter->unpacked);
515
}
516
517
static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
518
{
519
bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
520
}
521
522
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
523
{
524
if (bpos_eq(iter->pos, SPOS_MAX))
525
iter->at_end = true;
526
else
527
iter->pos = bpos_successor(iter->pos);
528
}
529
530
static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter)
531
{
532
struct btree_and_journal_iter iter = *_iter;
533
struct bch_fs *c = iter.trans->c;
534
unsigned level = iter.journal.level;
535
struct bkey_buf tmp;
536
unsigned nr = test_bit(BCH_FS_started, &c->flags)
537
? (level > 1 ? 0 : 2)
538
: (level > 1 ? 1 : 16);
539
540
iter.prefetch = false;
541
iter.fail_if_too_many_whiteouts = true;
542
bch2_bkey_buf_init(&tmp);
543
544
while (nr--) {
545
bch2_btree_and_journal_iter_advance(&iter);
546
struct bkey_s_c k = bch2_btree_and_journal_iter_peek(&iter);
547
if (!k.k)
548
break;
549
550
bch2_bkey_buf_reassemble(&tmp, c, k);
551
bch2_btree_node_prefetch(iter.trans, NULL, tmp.k, iter.journal.btree_id, level - 1);
552
}
553
554
bch2_bkey_buf_exit(&tmp, c);
555
}
556
557
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
558
{
559
struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
560
size_t iters = 0;
561
562
if (iter->prefetch && iter->journal.level)
563
btree_and_journal_iter_prefetch(iter);
564
again:
565
if (iter->at_end)
566
return bkey_s_c_null;
567
568
iters++;
569
570
if (iters > 20 && iter->fail_if_too_many_whiteouts)
571
return bkey_s_c_null;
572
573
while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
574
bpos_lt(btree_k.k->p, iter->pos))
575
bch2_journal_iter_advance_btree(iter);
576
577
if (iter->trans->journal_replay_not_finished)
578
while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
579
bpos_lt(journal_k.k->p, iter->pos))
580
bch2_journal_iter_advance(&iter->journal);
581
582
ret = journal_k.k &&
583
(!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
584
? journal_k
585
: btree_k;
586
587
if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
588
ret = bkey_s_c_null;
589
590
if (ret.k) {
591
iter->pos = ret.k->p;
592
if (bkey_deleted(ret.k)) {
593
bch2_btree_and_journal_iter_advance(iter);
594
goto again;
595
}
596
} else {
597
iter->pos = SPOS_MAX;
598
iter->at_end = true;
599
}
600
601
return ret;
602
}
603
604
void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
605
{
606
bch2_journal_iter_exit(&iter->journal);
607
}
608
609
void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
610
struct btree_and_journal_iter *iter,
611
struct btree *b,
612
struct btree_node_iter node_iter,
613
struct bpos pos)
614
{
615
memset(iter, 0, sizeof(*iter));
616
617
iter->trans = trans;
618
iter->b = b;
619
iter->node_iter = node_iter;
620
iter->pos = b->data->min_key;
621
iter->at_end = false;
622
INIT_LIST_HEAD(&iter->journal.list);
623
624
if (trans->journal_replay_not_finished) {
625
bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
626
if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
627
list_add(&iter->journal.list, &trans->c->journal_iters);
628
}
629
}
630
631
/*
632
* this version is used by btree_gc before filesystem has gone RW and
633
* multithreaded, so uses the journal_iters list:
634
*/
635
void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
636
struct btree_and_journal_iter *iter,
637
struct btree *b)
638
{
639
struct btree_node_iter node_iter;
640
641
bch2_btree_node_iter_init_from_start(&node_iter, b);
642
__bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
643
}
644
645
/* sort and dedup all keys in the journal: */
646
647
/*
648
* When keys compare equal, oldest compares first:
649
*/
650
static int journal_sort_key_cmp(const void *_l, const void *_r)
651
{
652
const struct journal_key *l = _l;
653
const struct journal_key *r = _r;
654
int rewind = l->rewind && r->rewind ? -1 : 1;
655
656
return journal_key_cmp(l, r) ?:
657
((cmp_int(l->journal_seq, r->journal_seq) ?:
658
cmp_int(l->journal_offset, r->journal_offset)) * rewind);
659
}
660
661
void bch2_journal_keys_put(struct bch_fs *c)
662
{
663
struct journal_keys *keys = &c->journal_keys;
664
665
BUG_ON(atomic_read(&keys->ref) <= 0);
666
667
if (!atomic_dec_and_test(&keys->ref))
668
return;
669
670
move_gap(keys, keys->nr);
671
672
darray_for_each(*keys, i) {
673
if (i->overwritten_range &&
674
(i == &darray_last(*keys) ||
675
i->overwritten_range != i[1].overwritten_range))
676
kfree(i->overwritten_range);
677
678
if (i->allocated)
679
kfree(i->k);
680
}
681
682
kvfree(keys->data);
683
keys->data = NULL;
684
keys->nr = keys->gap = keys->size = 0;
685
686
struct journal_replay **i;
687
struct genradix_iter iter;
688
689
genradix_for_each(&c->journal_entries, iter, i)
690
kvfree(*i);
691
genradix_free(&c->journal_entries);
692
}
693
694
static void __journal_keys_sort(struct journal_keys *keys)
695
{
696
sort_nonatomic(keys->data, keys->nr, sizeof(keys->data[0]),
697
journal_sort_key_cmp, NULL);
698
699
cond_resched();
700
701
struct journal_key *dst = keys->data;
702
703
darray_for_each(*keys, src) {
704
/*
705
* We don't accumulate accounting keys here because we have to
706
* compare each individual accounting key against the version in
707
* the btree during replay:
708
*/
709
if (src->k->k.type != KEY_TYPE_accounting &&
710
src + 1 < &darray_top(*keys) &&
711
!journal_key_cmp(src, src + 1))
712
continue;
713
714
*dst++ = *src;
715
}
716
717
keys->nr = dst - keys->data;
718
}
719
720
int bch2_journal_keys_sort(struct bch_fs *c)
721
{
722
struct genradix_iter iter;
723
struct journal_replay *i, **_i;
724
struct journal_keys *keys = &c->journal_keys;
725
size_t nr_read = 0;
726
727
u64 rewind_seq = c->opts.journal_rewind ?: U64_MAX;
728
729
genradix_for_each(&c->journal_entries, iter, _i) {
730
i = *_i;
731
732
if (journal_replay_ignore(i))
733
continue;
734
735
cond_resched();
736
737
vstruct_for_each(&i->j, entry) {
738
bool rewind = !entry->level &&
739
!btree_id_is_alloc(entry->btree_id) &&
740
le64_to_cpu(i->j.seq) >= rewind_seq;
741
742
if (entry->type != (rewind
743
? BCH_JSET_ENTRY_overwrite
744
: BCH_JSET_ENTRY_btree_keys))
745
continue;
746
747
if (!rewind && le64_to_cpu(i->j.seq) < c->journal_replay_seq_start)
748
continue;
749
750
jset_entry_for_each_key(entry, k) {
751
struct journal_key n = (struct journal_key) {
752
.btree_id = entry->btree_id,
753
.level = entry->level,
754
.rewind = rewind,
755
.k = k,
756
.journal_seq = le64_to_cpu(i->j.seq),
757
.journal_offset = k->_data - i->j._data,
758
};
759
760
if (darray_push(keys, n)) {
761
__journal_keys_sort(keys);
762
763
if (keys->nr * 8 > keys->size * 7) {
764
bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu",
765
keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq));
766
return bch_err_throw(c, ENOMEM_journal_keys_sort);
767
}
768
769
BUG_ON(darray_push(keys, n));
770
}
771
772
nr_read++;
773
}
774
}
775
}
776
777
__journal_keys_sort(keys);
778
keys->gap = keys->nr;
779
780
bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
781
return 0;
782
}
783
784
void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
785
unsigned level_min, unsigned level_max,
786
struct bpos start, struct bpos end)
787
{
788
struct journal_keys *keys = &c->journal_keys;
789
size_t dst = 0;
790
791
move_gap(keys, keys->nr);
792
793
darray_for_each(*keys, i)
794
if (!(i->btree_id == btree &&
795
i->level >= level_min &&
796
i->level <= level_max &&
797
bpos_ge(i->k->k.p, start) &&
798
bpos_le(i->k->k.p, end)))
799
keys->data[dst++] = *i;
800
keys->nr = keys->gap = dst;
801
}
802
803
void bch2_journal_keys_dump(struct bch_fs *c)
804
{
805
struct journal_keys *keys = &c->journal_keys;
806
struct printbuf buf = PRINTBUF;
807
808
pr_info("%zu keys:", keys->nr);
809
810
move_gap(keys, keys->nr);
811
812
darray_for_each(*keys, i) {
813
printbuf_reset(&buf);
814
prt_printf(&buf, "btree=");
815
bch2_btree_id_to_text(&buf, i->btree_id);
816
prt_printf(&buf, " l=%u ", i->level);
817
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
818
pr_err("%s", buf.buf);
819
}
820
printbuf_exit(&buf);
821
}
822
823
void bch2_fs_journal_keys_init(struct bch_fs *c)
824
{
825
struct journal_keys *keys = &c->journal_keys;
826
827
atomic_set(&keys->ref, 1);
828
keys->initial_ref_held = true;
829
mutex_init(&keys->overwrite_lock);
830
}
831
832