Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/btrfs/extent-io-tree.h
26278 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
#ifndef BTRFS_EXTENT_IO_TREE_H
4
#define BTRFS_EXTENT_IO_TREE_H
5
6
#include <linux/rbtree.h>
7
#include <linux/spinlock.h>
8
#include <linux/refcount.h>
9
#include <linux/list.h>
10
#include <linux/wait.h>
11
#include "misc.h"
12
13
struct extent_changeset;
14
struct btrfs_fs_info;
15
struct btrfs_inode;
16
17
/* Bits for the extent state */
18
enum {
19
ENUM_BIT(EXTENT_DIRTY),
20
ENUM_BIT(EXTENT_LOCKED),
21
ENUM_BIT(EXTENT_DIO_LOCKED),
22
ENUM_BIT(EXTENT_DIRTY_LOG1),
23
ENUM_BIT(EXTENT_DIRTY_LOG2),
24
ENUM_BIT(EXTENT_DELALLOC),
25
ENUM_BIT(EXTENT_DEFRAG),
26
ENUM_BIT(EXTENT_BOUNDARY),
27
ENUM_BIT(EXTENT_NODATASUM),
28
ENUM_BIT(EXTENT_CLEAR_META_RESV),
29
ENUM_BIT(EXTENT_NEED_WAIT),
30
ENUM_BIT(EXTENT_NORESERVE),
31
ENUM_BIT(EXTENT_QGROUP_RESERVED),
32
ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
33
/*
34
* Must be cleared only during ordered extent completion or on error
35
* paths if we did not manage to submit bios and create the ordered
36
* extents for the range. Should not be cleared during page release
37
* and page invalidation (if there is an ordered extent in flight),
38
* that is left for the ordered extent completion.
39
*/
40
ENUM_BIT(EXTENT_DELALLOC_NEW),
41
/*
42
* Mark that a range is being locked for finishing an ordered extent.
43
* Used together with EXTENT_LOCKED.
44
*/
45
ENUM_BIT(EXTENT_FINISHING_ORDERED),
46
/*
47
* When an ordered extent successfully completes for a region marked as
48
* a new delalloc range, use this flag when clearing a new delalloc
49
* range to indicate that the VFS' inode number of bytes should be
50
* incremented and the inode's new delalloc bytes decremented, in an
51
* atomic way to prevent races with stat(2).
52
*/
53
ENUM_BIT(EXTENT_ADD_INODE_BYTES),
54
/*
55
* Set during truncate when we're clearing an entire range and we just
56
* want the extent states to go away.
57
*/
58
ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
59
60
/*
61
* This must be last.
62
*
63
* Bit not representing a state but a request for NOWAIT semantics,
64
* e.g. when allocating memory, and must be masked out from the other
65
* bits.
66
*/
67
ENUM_BIT(EXTENT_NOWAIT)
68
};
69
70
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
71
EXTENT_CLEAR_DATA_RESV)
72
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
73
EXTENT_ADD_INODE_BYTES | \
74
EXTENT_CLEAR_ALL_BITS)
75
76
#define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
77
78
/*
79
* Redefined bits above which are used only in the device allocation tree,
80
* shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
81
* / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
82
* manipulation functions
83
*/
84
#define CHUNK_ALLOCATED EXTENT_DIRTY
85
#define CHUNK_TRIMMED EXTENT_DEFRAG
86
#define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
87
CHUNK_TRIMMED)
88
89
enum {
90
IO_TREE_FS_PINNED_EXTENTS,
91
IO_TREE_FS_EXCLUDED_EXTENTS,
92
IO_TREE_BTREE_INODE_IO,
93
IO_TREE_INODE_IO,
94
IO_TREE_RELOC_BLOCKS,
95
IO_TREE_TRANS_DIRTY_PAGES,
96
IO_TREE_ROOT_DIRTY_LOG_PAGES,
97
IO_TREE_INODE_FILE_EXTENT,
98
IO_TREE_LOG_CSUM_RANGE,
99
IO_TREE_SELFTEST,
100
IO_TREE_DEVICE_ALLOC_STATE,
101
};
102
103
struct extent_io_tree {
104
struct rb_root state;
105
/*
106
* The fs_info is needed for trace points, a tree attached to an inode
107
* needs the inode.
108
*
109
* owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
110
* accessed as inode->root->fs_info
111
*/
112
union {
113
struct btrfs_fs_info *fs_info;
114
struct btrfs_inode *inode;
115
};
116
117
/* Who owns this io tree, should be one of IO_TREE_* */
118
u8 owner;
119
120
spinlock_t lock;
121
};
122
123
struct extent_state {
124
u64 start;
125
u64 end; /* inclusive */
126
struct rb_node rb_node;
127
128
/* ADD NEW ELEMENTS AFTER THIS */
129
wait_queue_head_t wq;
130
refcount_t refs;
131
u32 state;
132
133
#ifdef CONFIG_BTRFS_DEBUG
134
struct list_head leak_list;
135
#endif
136
};
137
138
const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree);
139
const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
140
141
void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
142
struct extent_io_tree *tree, unsigned int owner);
143
void btrfs_extent_io_tree_release(struct extent_io_tree *tree);
144
int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
145
struct extent_state **cached);
146
bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
147
u32 bits, struct extent_state **cached);
148
149
static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
150
struct extent_state **cached)
151
{
152
return btrfs_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
153
}
154
155
static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
156
u64 end, struct extent_state **cached)
157
{
158
return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached);
159
}
160
161
int __init btrfs_extent_state_init_cachep(void);
162
void __cold btrfs_extent_state_free_cachep(void);
163
164
u64 btrfs_count_range_bits(struct extent_io_tree *tree,
165
u64 *start, u64 search_end,
166
u64 max_bytes, u32 bits, int contig,
167
struct extent_state **cached_state);
168
169
void btrfs_free_extent_state(struct extent_state *state);
170
bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
171
struct extent_state *cached_state);
172
bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
173
void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
174
struct extent_state **cached_state);
175
int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
176
u32 bits, struct extent_changeset *changeset);
177
int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,
178
u32 bits, struct extent_state **cached,
179
struct extent_changeset *changeset);
180
181
static inline int btrfs_clear_extent_bit(struct extent_io_tree *tree, u64 start,
182
u64 end, u32 bits,
183
struct extent_state **cached)
184
{
185
return btrfs_clear_extent_bit_changeset(tree, start, end, bits, cached, NULL);
186
}
187
188
static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
189
struct extent_state **cached)
190
{
191
return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_LOCKED,
192
cached, NULL);
193
}
194
195
int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
196
u32 bits, struct extent_changeset *changeset);
197
int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
198
u32 bits, struct extent_state **cached_state);
199
200
static inline int btrfs_clear_extent_dirty(struct extent_io_tree *tree, u64 start,
201
u64 end, struct extent_state **cached)
202
{
203
return btrfs_clear_extent_bit(tree, start, end,
204
EXTENT_DIRTY | EXTENT_DELALLOC |
205
EXTENT_DO_ACCOUNTING, cached);
206
}
207
208
int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
209
u32 bits, u32 clear_bits,
210
struct extent_state **cached_state);
211
212
bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
213
u64 *start_ret, u64 *end_ret, u32 bits,
214
struct extent_state **cached_state);
215
void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
216
u64 *start_ret, u64 *end_ret, u32 bits);
217
bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
218
u64 *start_ret, u64 *end_ret, u32 bits);
219
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
220
u64 *end, u64 max_bytes,
221
struct extent_state **cached_state);
222
static inline int btrfs_lock_dio_extent(struct extent_io_tree *tree, u64 start,
223
u64 end, struct extent_state **cached)
224
{
225
return btrfs_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
226
}
227
228
static inline bool btrfs_try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
229
u64 end, struct extent_state **cached)
230
{
231
return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached);
232
}
233
234
static inline int btrfs_unlock_dio_extent(struct extent_io_tree *tree, u64 start,
235
u64 end, struct extent_state **cached)
236
{
237
return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_DIO_LOCKED,
238
cached, NULL);
239
}
240
241
struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree,
242
struct extent_state *state);
243
244
#endif /* BTRFS_EXTENT_IO_TREE_H */
245
246