Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/btrfs/block-group.h
26278 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
#ifndef BTRFS_BLOCK_GROUP_H
4
#define BTRFS_BLOCK_GROUP_H
5
6
#include <linux/atomic.h>
7
#include <linux/mutex.h>
8
#include <linux/list.h>
9
#include <linux/spinlock.h>
10
#include <linux/refcount.h>
11
#include <linux/wait.h>
12
#include <linux/sizes.h>
13
#include <linux/rwsem.h>
14
#include <linux/rbtree.h>
15
#include <uapi/linux/btrfs_tree.h>
16
#include "free-space-cache.h"
17
18
struct btrfs_chunk_map;
19
struct btrfs_fs_info;
20
struct btrfs_inode;
21
struct btrfs_trans_handle;
22
23
enum btrfs_disk_cache_state {
24
BTRFS_DC_WRITTEN,
25
BTRFS_DC_ERROR,
26
BTRFS_DC_CLEAR,
27
BTRFS_DC_SETUP,
28
};
29
30
enum btrfs_block_group_size_class {
31
/* Unset */
32
BTRFS_BG_SZ_NONE,
33
/* 0 < size <= 128K */
34
BTRFS_BG_SZ_SMALL,
35
/* 128K < size <= 8M */
36
BTRFS_BG_SZ_MEDIUM,
37
/* 8M < size < BG_LENGTH */
38
BTRFS_BG_SZ_LARGE,
39
};
40
41
/*
42
* This describes the state of the block_group for async discard. This is due
43
* to the two pass nature of it where extent discarding is prioritized over
44
* bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
45
* between lists to prevent contention for discard state variables
46
* (eg. discard_cursor).
47
*/
48
enum btrfs_discard_state {
49
BTRFS_DISCARD_EXTENTS,
50
BTRFS_DISCARD_BITMAPS,
51
BTRFS_DISCARD_RESET_CURSOR,
52
};
53
54
/*
55
* Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
56
* only allocate a chunk if we really need one.
57
*
58
* CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
59
* chunks already allocated. This is used as part of the clustering code to
60
* help make sure we have a good pool of storage to cluster in, without filling
61
* the FS with empty chunks
62
*
63
* CHUNK_ALLOC_FORCE means it must try to allocate one
64
*
65
* CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
66
* find_free_extent() that also activaes the zone
67
*/
68
enum btrfs_chunk_alloc_enum {
69
CHUNK_ALLOC_NO_FORCE,
70
CHUNK_ALLOC_LIMITED,
71
CHUNK_ALLOC_FORCE,
72
CHUNK_ALLOC_FORCE_FOR_EXTENT,
73
};
74
75
/* Block group flags set at runtime */
76
enum btrfs_block_group_flags {
77
BLOCK_GROUP_FLAG_IREF,
78
BLOCK_GROUP_FLAG_REMOVED,
79
BLOCK_GROUP_FLAG_TO_COPY,
80
BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
81
BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
82
BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
83
BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
84
/* Does the block group need to be added to the free space tree? */
85
BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
86
/* Set after we add a new block group to the free space tree. */
87
BLOCK_GROUP_FLAG_FREE_SPACE_ADDED,
88
/* Indicate that the block group is placed on a sequential zone */
89
BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
90
/*
91
* Indicate that block group is in the list of new block groups of a
92
* transaction.
93
*/
94
BLOCK_GROUP_FLAG_NEW,
95
};
96
97
enum btrfs_caching_type {
98
BTRFS_CACHE_NO,
99
BTRFS_CACHE_STARTED,
100
BTRFS_CACHE_FINISHED,
101
BTRFS_CACHE_ERROR,
102
};
103
104
struct btrfs_caching_control {
105
struct list_head list;
106
struct mutex mutex;
107
wait_queue_head_t wait;
108
struct btrfs_work work;
109
struct btrfs_block_group *block_group;
110
/* Track progress of caching during allocation. */
111
atomic_t progress;
112
refcount_t count;
113
};
114
115
/* Once caching_thread() finds this much free space, it will wake up waiters. */
116
#define CACHING_CTL_WAKE_UP SZ_2M
117
118
struct btrfs_block_group {
119
struct btrfs_fs_info *fs_info;
120
struct btrfs_inode *inode;
121
spinlock_t lock;
122
u64 start;
123
u64 length;
124
u64 pinned;
125
u64 reserved;
126
u64 used;
127
u64 delalloc_bytes;
128
u64 bytes_super;
129
u64 flags;
130
u64 cache_generation;
131
u64 global_root_id;
132
133
/*
134
* The last committed used bytes of this block group, if the above @used
135
* is still the same as @commit_used, we don't need to update block
136
* group item of this block group.
137
*/
138
u64 commit_used;
139
/*
140
* If the free space extent count exceeds this number, convert the block
141
* group to bitmaps.
142
*/
143
u32 bitmap_high_thresh;
144
145
/*
146
* If the free space extent count drops below this number, convert the
147
* block group back to extents.
148
*/
149
u32 bitmap_low_thresh;
150
151
/*
152
* It is just used for the delayed data space allocation because
153
* only the data space allocation and the relative metadata update
154
* can be done cross the transaction.
155
*/
156
struct rw_semaphore data_rwsem;
157
158
/* For raid56, this is a full stripe, without parity */
159
unsigned long full_stripe_len;
160
unsigned long runtime_flags;
161
162
unsigned int ro;
163
164
int disk_cache_state;
165
166
/* Cache tracking stuff */
167
int cached;
168
struct btrfs_caching_control *caching_ctl;
169
170
struct btrfs_space_info *space_info;
171
172
/* Free space cache stuff */
173
struct btrfs_free_space_ctl *free_space_ctl;
174
175
/* Block group cache stuff */
176
struct rb_node cache_node;
177
178
/* For block groups in the same raid type */
179
struct list_head list;
180
181
refcount_t refs;
182
183
/*
184
* List of struct btrfs_free_clusters for this block group.
185
* Today it will only have one thing on it, but that may change
186
*/
187
struct list_head cluster_list;
188
189
/*
190
* Used for several lists:
191
*
192
* 1) struct btrfs_fs_info::unused_bgs
193
* 2) struct btrfs_fs_info::reclaim_bgs
194
* 3) struct btrfs_transaction::deleted_bgs
195
* 4) struct btrfs_trans_handle::new_bgs
196
*/
197
struct list_head bg_list;
198
199
/* For read-only block groups */
200
struct list_head ro_list;
201
202
/*
203
* When non-zero it means the block group's logical address and its
204
* device extents can not be reused for future block group allocations
205
* until the counter goes down to 0. This is to prevent them from being
206
* reused while some task is still using the block group after it was
207
* deleted - we want to make sure they can only be reused for new block
208
* groups after that task is done with the deleted block group.
209
*/
210
atomic_t frozen;
211
212
/* For discard operations */
213
struct list_head discard_list;
214
int discard_index;
215
u64 discard_eligible_time;
216
u64 discard_cursor;
217
enum btrfs_discard_state discard_state;
218
219
/* For dirty block groups */
220
struct list_head dirty_list;
221
struct list_head io_list;
222
223
struct btrfs_io_ctl io_ctl;
224
225
/*
226
* Incremented when doing extent allocations and holding a read lock
227
* on the space_info's groups_sem semaphore.
228
* Decremented when an ordered extent that represents an IO against this
229
* block group's range is created (after it's added to its inode's
230
* root's list of ordered extents) or immediately after the allocation
231
* if it's a metadata extent or fallocate extent (for these cases we
232
* don't create ordered extents).
233
*/
234
atomic_t reservations;
235
236
/*
237
* Incremented while holding the spinlock *lock* by a task checking if
238
* it can perform a nocow write (incremented if the value for the *ro*
239
* field is 0). Decremented by such tasks once they create an ordered
240
* extent or before that if some error happens before reaching that step.
241
* This is to prevent races between block group relocation and nocow
242
* writes through direct IO.
243
*/
244
atomic_t nocow_writers;
245
246
/* Lock for free space tree operations. */
247
struct mutex free_space_lock;
248
249
/* Protected by @free_space_lock. */
250
bool using_free_space_bitmaps;
251
/* Protected by @free_space_lock. */
252
bool using_free_space_bitmaps_cached;
253
254
/*
255
* Number of extents in this block group used for swap files.
256
* All accesses protected by the spinlock 'lock'.
257
*/
258
int swap_extents;
259
260
/*
261
* Allocation offset for the block group to implement sequential
262
* allocation. This is used only on a zoned filesystem.
263
*/
264
u64 alloc_offset;
265
u64 zone_unusable;
266
u64 zone_capacity;
267
u64 meta_write_pointer;
268
struct btrfs_chunk_map *physical_map;
269
struct list_head active_bg_list;
270
struct work_struct zone_finish_work;
271
struct extent_buffer *last_eb;
272
enum btrfs_block_group_size_class size_class;
273
u64 reclaim_mark;
274
};
275
276
static inline u64 btrfs_block_group_end(const struct btrfs_block_group *block_group)
277
{
278
return (block_group->start + block_group->length);
279
}
280
281
static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
282
{
283
lockdep_assert_held(&bg->lock);
284
285
return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
286
}
287
288
static inline bool btrfs_is_block_group_data_only(const struct btrfs_block_group *block_group)
289
{
290
/*
291
* In mixed mode the fragmentation is expected to be high, lowering the
292
* efficiency, so only proper data block groups are considered.
293
*/
294
return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
295
!(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
296
}
297
298
#ifdef CONFIG_BTRFS_DEBUG
299
int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group);
300
#endif
301
302
struct btrfs_block_group *btrfs_lookup_first_block_group(
303
struct btrfs_fs_info *info, u64 bytenr);
304
struct btrfs_block_group *btrfs_lookup_block_group(
305
struct btrfs_fs_info *info, u64 bytenr);
306
struct btrfs_block_group *btrfs_next_block_group(
307
struct btrfs_block_group *cache);
308
void btrfs_get_block_group(struct btrfs_block_group *cache);
309
void btrfs_put_block_group(struct btrfs_block_group *cache);
310
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
311
const u64 start);
312
void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
313
struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
314
u64 bytenr);
315
void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
316
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
317
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
318
u64 num_bytes);
319
int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
320
struct btrfs_caching_control *btrfs_get_caching_control(
321
struct btrfs_block_group *cache);
322
int btrfs_add_new_free_space(struct btrfs_block_group *block_group,
323
u64 start, u64 end, u64 *total_added_ret);
324
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
325
struct btrfs_fs_info *fs_info,
326
const u64 chunk_offset);
327
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
328
struct btrfs_chunk_map *map);
329
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
330
void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
331
void btrfs_reclaim_bgs_work(struct work_struct *work);
332
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
333
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
334
int btrfs_read_block_groups(struct btrfs_fs_info *info);
335
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
336
struct btrfs_space_info *space_info,
337
u64 type, u64 chunk_offset, u64 size);
338
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
339
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
340
bool do_chunk_alloc);
341
void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
342
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
343
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
344
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
345
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
346
u64 bytenr, u64 num_bytes, bool alloc);
347
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
348
u64 ram_bytes, u64 num_bytes, int delalloc,
349
bool force_wrong_size_class);
350
void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes,
351
bool is_delalloc);
352
int btrfs_chunk_alloc(struct btrfs_trans_handle *trans,
353
struct btrfs_space_info *space_info, u64 flags,
354
enum btrfs_chunk_alloc_enum force);
355
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
356
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
357
void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
358
bool is_item_insertion);
359
u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
360
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
361
int btrfs_free_block_groups(struct btrfs_fs_info *info);
362
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
363
u64 physical, u64 **logical, int *naddrs, int *stripe_len);
364
365
static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
366
{
367
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
368
}
369
370
static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
371
{
372
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
373
}
374
375
static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
376
{
377
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
378
}
379
380
static inline int btrfs_block_group_done(const struct btrfs_block_group *cache)
381
{
382
smp_mb();
383
return cache->cached == BTRFS_CACHE_FINISHED ||
384
cache->cached == BTRFS_CACHE_ERROR;
385
}
386
387
void btrfs_freeze_block_group(struct btrfs_block_group *cache);
388
void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
389
390
bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
391
void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
392
393
enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);
394
int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
395
enum btrfs_block_group_size_class size_class,
396
bool force_wrong_size_class);
397
bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg);
398
399
#endif /* BTRFS_BLOCK_GROUP_H */
400
401