Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/bcachefs/btree_gc.h
26278 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _BCACHEFS_BTREE_GC_H
3
#define _BCACHEFS_BTREE_GC_H
4
5
#include "bkey.h"
6
#include "btree_gc_types.h"
7
#include "btree_types.h"
8
9
int bch2_check_topology(struct bch_fs *);
10
int bch2_check_allocations(struct bch_fs *);
11
12
/*
13
* For concurrent mark and sweep (with other index updates), we define a total
14
* ordering of _all_ references GC walks:
15
*
16
* Note that some references will have the same GC position as others - e.g.
17
* everything within the same btree node; in those cases we're relying on
18
* whatever locking exists for where those references live, i.e. the write lock
19
* on a btree node.
20
*
21
* That locking is also required to ensure GC doesn't pass the updater in
22
* between the updater adding/removing the reference and updating the GC marks;
23
* without that, we would at best double count sometimes.
24
*
25
* That part is important - whenever calling bch2_mark_pointers(), a lock _must_
26
* be held that prevents GC from passing the position the updater is at.
27
*
28
* (What about the start of gc, when we're clearing all the marks? GC clears the
29
* mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
30
* position inside its cmpxchg loop, so crap magically works).
31
*/
32
33
/* Position of (the start of) a gc phase: */
34
static inline struct gc_pos gc_phase(enum gc_phase phase)
35
{
36
return (struct gc_pos) { .phase = phase, };
37
}
38
39
static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
40
struct bpos pos)
41
{
42
return (struct gc_pos) {
43
.phase = GC_PHASE_btree,
44
.btree = btree,
45
.level = level,
46
.pos = pos,
47
};
48
}
49
50
static inline int gc_btree_order(enum btree_id btree)
51
{
52
if (btree == BTREE_ID_alloc)
53
return -2;
54
if (btree == BTREE_ID_stripes)
55
return -1;
56
return btree;
57
}
58
59
static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
60
{
61
return cmp_int(l.phase, r.phase) ?:
62
cmp_int(gc_btree_order(l.btree),
63
gc_btree_order(r.btree)) ?:
64
cmp_int(l.level, r.level) ?:
65
bpos_cmp(l.pos, r.pos);
66
}
67
68
static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
69
{
70
unsigned seq;
71
bool ret;
72
73
do {
74
seq = read_seqcount_begin(&c->gc_pos_lock);
75
ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
76
} while (read_seqcount_retry(&c->gc_pos_lock, seq));
77
78
return ret;
79
}
80
81
void bch2_gc_pos_to_text(struct printbuf *, struct gc_pos *);
82
83
int bch2_gc_gens(struct bch_fs *);
84
void bch2_gc_gens_async(struct bch_fs *);
85
86
void bch2_fs_btree_gc_init_early(struct bch_fs *);
87
88
#endif /* _BCACHEFS_BTREE_GC_H */
89
90