Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/btrfs/extent_map.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include <linux/err.h>
4
#include <linux/slab.h>
5
#include <linux/spinlock.h>
6
#include "messages.h"
7
#include "ctree.h"
8
#include "extent_map.h"
9
#include "compression.h"
10
#include "btrfs_inode.h"
11
#include "disk-io.h"
12
13
14
static struct kmem_cache *extent_map_cache;
15
16
int __init btrfs_extent_map_init(void)
17
{
18
extent_map_cache = kmem_cache_create("btrfs_extent_map",
19
sizeof(struct extent_map), 0, 0, NULL);
20
if (!extent_map_cache)
21
return -ENOMEM;
22
return 0;
23
}
24
25
void __cold btrfs_extent_map_exit(void)
26
{
27
kmem_cache_destroy(extent_map_cache);
28
}
29
30
/*
31
* Initialize the extent tree @tree. Should be called for each new inode or
32
* other user of the extent_map interface.
33
*/
34
void btrfs_extent_map_tree_init(struct extent_map_tree *tree)
35
{
36
tree->root = RB_ROOT;
37
INIT_LIST_HEAD(&tree->modified_extents);
38
rwlock_init(&tree->lock);
39
}
40
41
/*
42
* Allocate a new extent_map structure. The new structure is returned with a
43
* reference count of one and needs to be freed using free_extent_map()
44
*/
45
struct extent_map *btrfs_alloc_extent_map(void)
46
{
47
struct extent_map *em;
48
em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
49
if (!em)
50
return NULL;
51
RB_CLEAR_NODE(&em->rb_node);
52
refcount_set(&em->refs, 1);
53
INIT_LIST_HEAD(&em->list);
54
return em;
55
}
56
57
/*
58
* Drop the reference out on @em by one and free the structure if the reference
59
* count hits zero.
60
*/
61
void btrfs_free_extent_map(struct extent_map *em)
62
{
63
if (!em)
64
return;
65
if (refcount_dec_and_test(&em->refs)) {
66
WARN_ON(btrfs_extent_map_in_tree(em));
67
WARN_ON(!list_empty(&em->list));
68
kmem_cache_free(extent_map_cache, em);
69
}
70
}
71
72
/* Do the math around the end of an extent, handling wrapping. */
73
static u64 range_end(u64 start, u64 len)
74
{
75
if (start + len < start)
76
return (u64)-1;
77
return start + len;
78
}
79
80
static void remove_em(struct btrfs_inode *inode, struct extent_map *em)
81
{
82
struct btrfs_fs_info *fs_info = inode->root->fs_info;
83
84
rb_erase(&em->rb_node, &inode->extent_tree.root);
85
RB_CLEAR_NODE(&em->rb_node);
86
87
if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(inode->root)))
88
percpu_counter_dec(&fs_info->evictable_extent_maps);
89
}
90
91
static int tree_insert(struct rb_root *root, struct extent_map *em)
92
{
93
struct rb_node **p = &root->rb_node;
94
struct rb_node *parent = NULL;
95
struct extent_map *entry = NULL;
96
struct rb_node *orig_parent = NULL;
97
u64 end = range_end(em->start, em->len);
98
99
while (*p) {
100
parent = *p;
101
entry = rb_entry(parent, struct extent_map, rb_node);
102
103
if (em->start < entry->start)
104
p = &(*p)->rb_left;
105
else if (em->start >= btrfs_extent_map_end(entry))
106
p = &(*p)->rb_right;
107
else
108
return -EEXIST;
109
}
110
111
orig_parent = parent;
112
while (parent && em->start >= btrfs_extent_map_end(entry)) {
113
parent = rb_next(parent);
114
entry = rb_entry(parent, struct extent_map, rb_node);
115
}
116
if (parent)
117
if (end > entry->start && em->start < btrfs_extent_map_end(entry))
118
return -EEXIST;
119
120
parent = orig_parent;
121
entry = rb_entry(parent, struct extent_map, rb_node);
122
while (parent && em->start < entry->start) {
123
parent = rb_prev(parent);
124
entry = rb_entry(parent, struct extent_map, rb_node);
125
}
126
if (parent)
127
if (end > entry->start && em->start < btrfs_extent_map_end(entry))
128
return -EEXIST;
129
130
rb_link_node(&em->rb_node, orig_parent, p);
131
rb_insert_color(&em->rb_node, root);
132
return 0;
133
}
134
135
/*
136
* Search through the tree for an extent_map with a given offset. If it can't
137
* be found, try to find some neighboring extents
138
*/
139
static struct rb_node *tree_search(struct rb_root *root, u64 offset,
140
struct rb_node **prev_or_next_ret)
141
{
142
struct rb_node *n = root->rb_node;
143
struct rb_node *prev = NULL;
144
struct rb_node *orig_prev = NULL;
145
struct extent_map *entry;
146
struct extent_map *prev_entry = NULL;
147
148
ASSERT(prev_or_next_ret);
149
150
while (n) {
151
entry = rb_entry(n, struct extent_map, rb_node);
152
prev = n;
153
prev_entry = entry;
154
155
if (offset < entry->start)
156
n = n->rb_left;
157
else if (offset >= btrfs_extent_map_end(entry))
158
n = n->rb_right;
159
else
160
return n;
161
}
162
163
orig_prev = prev;
164
while (prev && offset >= btrfs_extent_map_end(prev_entry)) {
165
prev = rb_next(prev);
166
prev_entry = rb_entry(prev, struct extent_map, rb_node);
167
}
168
169
/*
170
* Previous extent map found, return as in this case the caller does not
171
* care about the next one.
172
*/
173
if (prev) {
174
*prev_or_next_ret = prev;
175
return NULL;
176
}
177
178
prev = orig_prev;
179
prev_entry = rb_entry(prev, struct extent_map, rb_node);
180
while (prev && offset < prev_entry->start) {
181
prev = rb_prev(prev);
182
prev_entry = rb_entry(prev, struct extent_map, rb_node);
183
}
184
*prev_or_next_ret = prev;
185
186
return NULL;
187
}
188
189
static inline u64 extent_map_block_len(const struct extent_map *em)
190
{
191
if (btrfs_extent_map_is_compressed(em))
192
return em->disk_num_bytes;
193
return em->len;
194
}
195
196
static inline u64 extent_map_block_end(const struct extent_map *em)
197
{
198
const u64 block_start = btrfs_extent_map_block_start(em);
199
const u64 block_end = block_start + extent_map_block_len(em);
200
201
if (block_end < block_start)
202
return (u64)-1;
203
204
return block_end;
205
}
206
207
static bool can_merge_extent_map(const struct extent_map *em)
208
{
209
if (em->flags & EXTENT_FLAG_PINNED)
210
return false;
211
212
/* Don't merge compressed extents, we need to know their actual size. */
213
if (btrfs_extent_map_is_compressed(em))
214
return false;
215
216
if (em->flags & EXTENT_FLAG_LOGGING)
217
return false;
218
219
/*
220
* We don't want to merge stuff that hasn't been written to the log yet
221
* since it may not reflect exactly what is on disk, and that would be
222
* bad.
223
*/
224
if (!list_empty(&em->list))
225
return false;
226
227
return true;
228
}
229
230
/* Check to see if two extent_map structs are adjacent and safe to merge. */
231
static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
232
{
233
if (btrfs_extent_map_end(prev) != next->start)
234
return false;
235
236
/*
237
* The merged flag is not an on-disk flag, it just indicates we had the
238
* extent maps of 2 (or more) adjacent extents merged, so factor it out.
239
*/
240
if ((prev->flags & ~EXTENT_FLAG_MERGED) !=
241
(next->flags & ~EXTENT_FLAG_MERGED))
242
return false;
243
244
if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1)
245
return btrfs_extent_map_block_start(next) == extent_map_block_end(prev);
246
247
/* HOLES and INLINE extents. */
248
return next->disk_bytenr == prev->disk_bytenr;
249
}
250
251
/*
252
* Handle the on-disk data extents merge for @prev and @next.
253
*
254
* @prev: left extent to merge
255
* @next: right extent to merge
256
* @merged: the extent we will not discard after the merge; updated with new values
257
*
258
* After this, one of the two extents is the new merged extent and the other is
259
* removed from the tree and likely freed. Note that @merged is one of @prev/@next
260
* so there is const/non-const aliasing occurring here.
261
*
262
* Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes.
263
* For now only uncompressed regular extent can be merged.
264
*/
265
static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next,
266
struct extent_map *merged)
267
{
268
u64 new_disk_bytenr;
269
u64 new_disk_num_bytes;
270
u64 new_offset;
271
272
/* @prev and @next should not be compressed. */
273
ASSERT(!btrfs_extent_map_is_compressed(prev));
274
ASSERT(!btrfs_extent_map_is_compressed(next));
275
276
/*
277
* There are two different cases where @prev and @next can be merged.
278
*
279
* 1) They are referring to the same data extent:
280
*
281
* |<----- data extent A ----->|
282
* |<- prev ->|<- next ->|
283
*
284
* 2) They are referring to different data extents but still adjacent:
285
*
286
* |<-- data extent A -->|<-- data extent B -->|
287
* |<- prev ->|<- next ->|
288
*
289
* The calculation here always merges the data extents first, then updates
290
* @offset using the new data extents.
291
*
292
* For case 1), the merged data extent would be the same.
293
* For case 2), we just merge the two data extents into one.
294
*/
295
new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr);
296
new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes,
297
next->disk_bytenr + next->disk_num_bytes) -
298
new_disk_bytenr;
299
new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
300
301
merged->disk_bytenr = new_disk_bytenr;
302
merged->disk_num_bytes = new_disk_num_bytes;
303
merged->ram_bytes = new_disk_num_bytes;
304
merged->offset = new_offset;
305
}
306
307
static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix,
308
struct extent_map *em)
309
{
310
if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
311
return;
312
btrfs_crit(fs_info,
313
"%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x",
314
prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes,
315
em->ram_bytes, em->offset, em->flags);
316
ASSERT(0);
317
}
318
319
/* Internal sanity checks for btrfs debug builds. */
320
static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em)
321
{
322
if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
323
return;
324
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
325
if (em->disk_num_bytes == 0)
326
dump_extent_map(fs_info, "zero disk_num_bytes", em);
327
if (em->offset + em->len > em->ram_bytes)
328
dump_extent_map(fs_info, "ram_bytes too small", em);
329
if (em->offset + em->len > em->disk_num_bytes &&
330
!btrfs_extent_map_is_compressed(em))
331
dump_extent_map(fs_info, "disk_num_bytes too small", em);
332
if (!btrfs_extent_map_is_compressed(em) &&
333
em->ram_bytes != em->disk_num_bytes)
334
dump_extent_map(fs_info,
335
"ram_bytes mismatch with disk_num_bytes for non-compressed em",
336
em);
337
} else if (em->offset) {
338
dump_extent_map(fs_info, "non-zero offset for hole/inline", em);
339
}
340
}
341
342
static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
343
{
344
struct btrfs_fs_info *fs_info = inode->root->fs_info;
345
struct extent_map *merge = NULL;
346
struct rb_node *rb;
347
348
/*
349
* We can't modify an extent map that is in the tree and that is being
350
* used by another task, as it can cause that other task to see it in
351
* inconsistent state during the merging. We always have 1 reference for
352
* the tree and 1 for this task (which is unpinning the extent map or
353
* clearing the logging flag), so anything > 2 means it's being used by
354
* other tasks too.
355
*/
356
if (refcount_read(&em->refs) > 2)
357
return;
358
359
if (!can_merge_extent_map(em))
360
return;
361
362
if (em->start != 0) {
363
rb = rb_prev(&em->rb_node);
364
merge = rb_entry_safe(rb, struct extent_map, rb_node);
365
366
if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
367
em->start = merge->start;
368
em->len += merge->len;
369
em->generation = max(em->generation, merge->generation);
370
371
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
372
merge_ondisk_extents(merge, em, em);
373
em->flags |= EXTENT_FLAG_MERGED;
374
375
validate_extent_map(fs_info, em);
376
remove_em(inode, merge);
377
btrfs_free_extent_map(merge);
378
}
379
}
380
381
rb = rb_next(&em->rb_node);
382
merge = rb_entry_safe(rb, struct extent_map, rb_node);
383
384
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
385
em->len += merge->len;
386
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
387
merge_ondisk_extents(em, merge, em);
388
validate_extent_map(fs_info, em);
389
em->generation = max(em->generation, merge->generation);
390
em->flags |= EXTENT_FLAG_MERGED;
391
remove_em(inode, merge);
392
btrfs_free_extent_map(merge);
393
}
394
}
395
396
/*
397
* Unpin an extent from the cache.
398
*
399
* @inode: the inode from which we are unpinning an extent range
400
* @start: logical offset in the file
401
* @len: length of the extent
402
* @gen: generation that this extent has been modified in
403
*
404
* Called after an extent has been written to disk properly. Set the generation
405
* to the generation that actually added the file item to the inode so we know
406
* we need to sync this extent when we call fsync().
407
*
408
* Returns: 0 on success
409
* -ENOENT when the extent is not found in the tree
410
* -EUCLEAN if the found extent does not match the expected start
411
*/
412
int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
413
{
414
struct btrfs_fs_info *fs_info = inode->root->fs_info;
415
struct extent_map_tree *tree = &inode->extent_tree;
416
int ret = 0;
417
struct extent_map *em;
418
419
write_lock(&tree->lock);
420
em = btrfs_lookup_extent_mapping(tree, start, len);
421
422
if (WARN_ON(!em)) {
423
btrfs_warn(fs_info,
424
"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
425
btrfs_ino(inode), btrfs_root_id(inode->root),
426
start, start + len, gen);
427
ret = -ENOENT;
428
goto out;
429
}
430
431
if (WARN_ON(em->start != start)) {
432
btrfs_warn(fs_info,
433
"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
434
btrfs_ino(inode), btrfs_root_id(inode->root),
435
em->start, start, start + len, gen);
436
ret = -EUCLEAN;
437
goto out;
438
}
439
440
em->generation = gen;
441
em->flags &= ~EXTENT_FLAG_PINNED;
442
443
try_merge_map(inode, em);
444
445
out:
446
write_unlock(&tree->lock);
447
btrfs_free_extent_map(em);
448
return ret;
449
450
}
451
452
void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em)
453
{
454
lockdep_assert_held_write(&inode->extent_tree.lock);
455
456
em->flags &= ~EXTENT_FLAG_LOGGING;
457
if (btrfs_extent_map_in_tree(em))
458
try_merge_map(inode, em);
459
}
460
461
static inline void setup_extent_mapping(struct btrfs_inode *inode,
462
struct extent_map *em,
463
int modified)
464
{
465
refcount_inc(&em->refs);
466
467
ASSERT(list_empty(&em->list));
468
469
if (modified)
470
list_add(&em->list, &inode->extent_tree.modified_extents);
471
else
472
try_merge_map(inode, em);
473
}
474
475
/*
476
* Add a new extent map to an inode's extent map tree.
477
*
478
* @inode: the target inode
479
* @em: map to insert
480
* @modified: indicate whether the given @em should be added to the
481
* modified list, which indicates the extent needs to be logged
482
*
483
* Insert @em into the @inode's extent map tree or perform a simple
484
* forward/backward merge with existing mappings. The extent_map struct passed
485
* in will be inserted into the tree directly, with an additional reference
486
* taken, or a reference dropped if the merge attempt was successful.
487
*/
488
static int add_extent_mapping(struct btrfs_inode *inode,
489
struct extent_map *em, int modified)
490
{
491
struct extent_map_tree *tree = &inode->extent_tree;
492
struct btrfs_root *root = inode->root;
493
struct btrfs_fs_info *fs_info = root->fs_info;
494
int ret;
495
496
lockdep_assert_held_write(&tree->lock);
497
498
validate_extent_map(fs_info, em);
499
ret = tree_insert(&tree->root, em);
500
if (ret)
501
return ret;
502
503
setup_extent_mapping(inode, em, modified);
504
505
if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(root)))
506
percpu_counter_inc(&fs_info->evictable_extent_maps);
507
508
return 0;
509
}
510
511
static struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
512
u64 start, u64 len, int strict)
513
{
514
struct extent_map *em;
515
struct rb_node *rb_node;
516
struct rb_node *prev_or_next = NULL;
517
u64 end = range_end(start, len);
518
519
rb_node = tree_search(&tree->root, start, &prev_or_next);
520
if (!rb_node) {
521
if (prev_or_next)
522
rb_node = prev_or_next;
523
else
524
return NULL;
525
}
526
527
em = rb_entry(rb_node, struct extent_map, rb_node);
528
529
if (strict && !(end > em->start && start < btrfs_extent_map_end(em)))
530
return NULL;
531
532
refcount_inc(&em->refs);
533
return em;
534
}
535
536
/*
537
* Lookup extent_map that intersects @start + @len range.
538
*
539
* @tree: tree to lookup in
540
* @start: byte offset to start the search
541
* @len: length of the lookup range
542
*
543
* Find and return the first extent_map struct in @tree that intersects the
544
* [start, len] range. There may be additional objects in the tree that
545
* intersect, so check the object returned carefully to make sure that no
546
* additional lookups are needed.
547
*/
548
struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree,
549
u64 start, u64 len)
550
{
551
return lookup_extent_mapping(tree, start, len, 1);
552
}
553
554
/*
555
* Find a nearby extent map intersecting @start + @len (not an exact search).
556
*
557
* @tree: tree to lookup in
558
* @start: byte offset to start the search
559
* @len: length of the lookup range
560
*
561
* Find and return the first extent_map struct in @tree that intersects the
562
* [start, len] range.
563
*
564
* If one can't be found, any nearby extent may be returned
565
*/
566
struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree,
567
u64 start, u64 len)
568
{
569
return lookup_extent_mapping(tree, start, len, 0);
570
}
571
572
/*
573
* Remove an extent_map from its inode's extent tree.
574
*
575
* @inode: the inode the extent map belongs to
576
* @em: extent map being removed
577
*
578
* Remove @em from the extent tree of @inode. No reference counts are dropped,
579
* and no checks are done to see if the range is in use.
580
*/
581
void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
582
{
583
struct extent_map_tree *tree = &inode->extent_tree;
584
585
lockdep_assert_held_write(&tree->lock);
586
587
WARN_ON(em->flags & EXTENT_FLAG_PINNED);
588
if (!(em->flags & EXTENT_FLAG_LOGGING))
589
list_del_init(&em->list);
590
591
remove_em(inode, em);
592
}
593
594
static void replace_extent_mapping(struct btrfs_inode *inode,
595
struct extent_map *cur,
596
struct extent_map *new,
597
int modified)
598
{
599
struct btrfs_fs_info *fs_info = inode->root->fs_info;
600
struct extent_map_tree *tree = &inode->extent_tree;
601
602
lockdep_assert_held_write(&tree->lock);
603
604
validate_extent_map(fs_info, new);
605
606
WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
607
ASSERT(btrfs_extent_map_in_tree(cur));
608
if (!(cur->flags & EXTENT_FLAG_LOGGING))
609
list_del_init(&cur->list);
610
rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root);
611
RB_CLEAR_NODE(&cur->rb_node);
612
613
setup_extent_mapping(inode, new, modified);
614
}
615
616
static struct extent_map *next_extent_map(const struct extent_map *em)
617
{
618
struct rb_node *next;
619
620
next = rb_next(&em->rb_node);
621
if (!next)
622
return NULL;
623
return container_of(next, struct extent_map, rb_node);
624
}
625
626
static struct extent_map *prev_extent_map(struct extent_map *em)
627
{
628
struct rb_node *prev;
629
630
prev = rb_prev(&em->rb_node);
631
if (!prev)
632
return NULL;
633
return container_of(prev, struct extent_map, rb_node);
634
}
635
636
/*
637
* Helper for btrfs_get_extent. Given an existing extent in the tree,
638
* the existing extent is the nearest extent to map_start,
639
* and an extent that you want to insert, deal with overlap and insert
640
* the best fitted new extent into the tree.
641
*/
642
static noinline int merge_extent_mapping(struct btrfs_inode *inode,
643
struct extent_map *existing,
644
struct extent_map *em,
645
u64 map_start)
646
{
647
struct extent_map *prev;
648
struct extent_map *next;
649
u64 start;
650
u64 end;
651
u64 start_diff;
652
653
if (map_start < em->start || map_start >= btrfs_extent_map_end(em))
654
return -EINVAL;
655
656
if (existing->start > map_start) {
657
next = existing;
658
prev = prev_extent_map(next);
659
} else {
660
prev = existing;
661
next = next_extent_map(prev);
662
}
663
664
start = prev ? btrfs_extent_map_end(prev) : em->start;
665
start = max_t(u64, start, em->start);
666
end = next ? next->start : btrfs_extent_map_end(em);
667
end = min_t(u64, end, btrfs_extent_map_end(em));
668
start_diff = start - em->start;
669
em->start = start;
670
em->len = end - start;
671
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
672
em->offset += start_diff;
673
return add_extent_mapping(inode, em, 0);
674
}
675
676
/*
677
* Add extent mapping into an inode's extent map tree.
678
*
679
* @inode: target inode
680
* @em_in: extent we are inserting
681
* @start: start of the logical range btrfs_get_extent() is requesting
682
* @len: length of the logical range btrfs_get_extent() is requesting
683
*
684
* Note that @em_in's range may be different from [start, start+len),
685
* but they must be overlapped.
686
*
687
* Insert @em_in into the inode's extent map tree. In case there is an
688
* overlapping range, handle the -EEXIST by either:
689
* a) Returning the existing extent in @em_in if @start is within the
690
* existing em.
691
* b) Merge the existing extent with @em_in passed in.
692
*
693
* Return 0 on success, otherwise -EEXIST.
694
*
695
*/
696
int btrfs_add_extent_mapping(struct btrfs_inode *inode,
697
struct extent_map **em_in, u64 start, u64 len)
698
{
699
int ret;
700
struct extent_map *em = *em_in;
701
struct btrfs_fs_info *fs_info = inode->root->fs_info;
702
703
/*
704
* Tree-checker should have rejected any inline extent with non-zero
705
* file offset. Here just do a sanity check.
706
*/
707
if (em->disk_bytenr == EXTENT_MAP_INLINE)
708
ASSERT(em->start == 0);
709
710
ret = add_extent_mapping(inode, em, 0);
711
/* it is possible that someone inserted the extent into the tree
712
* while we had the lock dropped. It is also possible that
713
* an overlapping map exists in the tree
714
*/
715
if (ret == -EEXIST) {
716
struct extent_map *existing;
717
718
existing = btrfs_search_extent_mapping(&inode->extent_tree, start, len);
719
720
trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
721
722
/*
723
* existing will always be non-NULL, since there must be
724
* extent causing the -EEXIST.
725
*/
726
if (start >= existing->start &&
727
start < btrfs_extent_map_end(existing)) {
728
btrfs_free_extent_map(em);
729
*em_in = existing;
730
ret = 0;
731
} else {
732
u64 orig_start = em->start;
733
u64 orig_len = em->len;
734
735
/*
736
* The existing extent map is the one nearest to
737
* the [start, start + len) range which overlaps
738
*/
739
ret = merge_extent_mapping(inode, existing, em, start);
740
if (WARN_ON(ret)) {
741
btrfs_free_extent_map(em);
742
*em_in = NULL;
743
btrfs_warn(fs_info,
744
"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
745
existing->start, btrfs_extent_map_end(existing),
746
orig_start, orig_start + orig_len, start);
747
}
748
btrfs_free_extent_map(existing);
749
}
750
}
751
752
ASSERT(ret == 0 || ret == -EEXIST);
753
return ret;
754
}
755
756
/*
757
* Drop all extent maps from a tree in the fastest possible way, rescheduling
758
* if needed. This avoids searching the tree, from the root down to the first
759
* extent map, before each deletion.
760
*/
761
static void drop_all_extent_maps_fast(struct btrfs_inode *inode)
762
{
763
struct extent_map_tree *tree = &inode->extent_tree;
764
struct rb_node *node;
765
766
write_lock(&tree->lock);
767
node = rb_first(&tree->root);
768
while (node) {
769
struct extent_map *em;
770
struct rb_node *next = rb_next(node);
771
772
em = rb_entry(node, struct extent_map, rb_node);
773
em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
774
btrfs_remove_extent_mapping(inode, em);
775
btrfs_free_extent_map(em);
776
777
if (cond_resched_rwlock_write(&tree->lock))
778
node = rb_first(&tree->root);
779
else
780
node = next;
781
}
782
write_unlock(&tree->lock);
783
}
784
785
/*
786
* Drop all extent maps in a given range.
787
*
788
* @inode: The target inode.
789
* @start: Start offset of the range.
790
* @end: End offset of the range (inclusive value).
791
* @skip_pinned: Indicate if pinned extent maps should be ignored or not.
792
*
793
* This drops all the extent maps that intersect the given range [@start, @end].
794
* Extent maps that partially overlap the range and extend behind or beyond it,
795
* are split.
796
* The caller should have locked an appropriate file range in the inode's io
797
* tree before calling this function.
798
*/
799
void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
800
bool skip_pinned)
801
{
802
struct extent_map *split;
803
struct extent_map *split2;
804
struct extent_map *em;
805
struct extent_map_tree *em_tree = &inode->extent_tree;
806
u64 len = end - start + 1;
807
808
WARN_ON(end < start);
809
if (end == (u64)-1) {
810
if (start == 0 && !skip_pinned) {
811
drop_all_extent_maps_fast(inode);
812
return;
813
}
814
len = (u64)-1;
815
} else {
816
/* Make end offset exclusive for use in the loop below. */
817
end++;
818
}
819
820
/*
821
* It's ok if we fail to allocate the extent maps, see the comment near
822
* the bottom of the loop below. We only need two spare extent maps in
823
* the worst case, where the first extent map that intersects our range
824
* starts before the range and the last extent map that intersects our
825
* range ends after our range (and they might be the same extent map),
826
* because we need to split those two extent maps at the boundaries.
827
*/
828
split = btrfs_alloc_extent_map();
829
split2 = btrfs_alloc_extent_map();
830
831
write_lock(&em_tree->lock);
832
em = btrfs_lookup_extent_mapping(em_tree, start, len);
833
834
while (em) {
835
/* extent_map_end() returns exclusive value (last byte + 1). */
836
const u64 em_end = btrfs_extent_map_end(em);
837
struct extent_map *next_em = NULL;
838
u64 gen;
839
unsigned long flags;
840
bool modified;
841
842
if (em_end < end) {
843
next_em = next_extent_map(em);
844
if (next_em) {
845
if (next_em->start < end)
846
refcount_inc(&next_em->refs);
847
else
848
next_em = NULL;
849
}
850
}
851
852
if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) {
853
start = em_end;
854
goto next;
855
}
856
857
flags = em->flags;
858
/*
859
* In case we split the extent map, we want to preserve the
860
* EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
861
* it on the new extent maps.
862
*/
863
em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
864
modified = !list_empty(&em->list);
865
866
/*
867
* The extent map does not cross our target range, so no need to
868
* split it, we can remove it directly.
869
*/
870
if (em->start >= start && em_end <= end)
871
goto remove_em;
872
873
gen = em->generation;
874
875
if (em->start < start) {
876
if (!split) {
877
split = split2;
878
split2 = NULL;
879
if (!split)
880
goto remove_em;
881
}
882
split->start = em->start;
883
split->len = start - em->start;
884
885
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
886
split->disk_bytenr = em->disk_bytenr;
887
split->disk_num_bytes = em->disk_num_bytes;
888
split->offset = em->offset;
889
split->ram_bytes = em->ram_bytes;
890
} else {
891
split->disk_bytenr = em->disk_bytenr;
892
split->disk_num_bytes = 0;
893
split->offset = 0;
894
split->ram_bytes = split->len;
895
}
896
897
split->generation = gen;
898
split->flags = flags;
899
replace_extent_mapping(inode, em, split, modified);
900
btrfs_free_extent_map(split);
901
split = split2;
902
split2 = NULL;
903
}
904
if (em_end > end) {
905
if (!split) {
906
split = split2;
907
split2 = NULL;
908
if (!split)
909
goto remove_em;
910
}
911
split->start = end;
912
split->len = em_end - end;
913
split->disk_bytenr = em->disk_bytenr;
914
split->flags = flags;
915
split->generation = gen;
916
917
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
918
split->disk_num_bytes = em->disk_num_bytes;
919
split->offset = em->offset + end - em->start;
920
split->ram_bytes = em->ram_bytes;
921
} else {
922
split->disk_num_bytes = 0;
923
split->offset = 0;
924
split->ram_bytes = split->len;
925
}
926
927
if (btrfs_extent_map_in_tree(em)) {
928
replace_extent_mapping(inode, em, split, modified);
929
} else {
930
int ret;
931
932
ret = add_extent_mapping(inode, split, modified);
933
/* Logic error, shouldn't happen. */
934
ASSERT(ret == 0);
935
if (WARN_ON(ret != 0) && modified)
936
btrfs_set_inode_full_sync(inode);
937
}
938
btrfs_free_extent_map(split);
939
split = NULL;
940
}
941
remove_em:
942
if (btrfs_extent_map_in_tree(em)) {
943
/*
944
* If the extent map is still in the tree it means that
945
* either of the following is true:
946
*
947
* 1) It fits entirely in our range (doesn't end beyond
948
* it or starts before it);
949
*
950
* 2) It starts before our range and/or ends after our
951
* range, and we were not able to allocate the extent
952
* maps for split operations, @split and @split2.
953
*
954
* If we are at case 2) then we just remove the entire
955
* extent map - this is fine since if anyone needs it to
956
* access the subranges outside our range, will just
957
* load it again from the subvolume tree's file extent
958
* item. However if the extent map was in the list of
959
* modified extents, then we must mark the inode for a
960
* full fsync, otherwise a fast fsync will miss this
961
* extent if it's new and needs to be logged.
962
*/
963
if ((em->start < start || em_end > end) && modified) {
964
ASSERT(!split);
965
btrfs_set_inode_full_sync(inode);
966
}
967
btrfs_remove_extent_mapping(inode, em);
968
}
969
970
/*
971
* Once for the tree reference (we replaced or removed the
972
* extent map from the tree).
973
*/
974
btrfs_free_extent_map(em);
975
next:
976
/* Once for us (for our lookup reference). */
977
btrfs_free_extent_map(em);
978
979
em = next_em;
980
}
981
982
write_unlock(&em_tree->lock);
983
984
btrfs_free_extent_map(split);
985
btrfs_free_extent_map(split2);
986
}
987
988
/*
989
* Replace a range in the inode's extent map tree with a new extent map.
990
*
991
* @inode: The target inode.
992
* @new_em: The new extent map to add to the inode's extent map tree.
993
* @modified: Indicate if the new extent map should be added to the list of
994
* modified extents (for fast fsync tracking).
995
*
996
* Drops all the extent maps in the inode's extent map tree that intersect the
997
* range of the new extent map and adds the new extent map to the tree.
998
* The caller should have locked an appropriate file range in the inode's io
999
* tree before calling this function.
1000
*/
1001
int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
1002
struct extent_map *new_em,
1003
bool modified)
1004
{
1005
const u64 end = new_em->start + new_em->len - 1;
1006
struct extent_map_tree *tree = &inode->extent_tree;
1007
int ret;
1008
1009
ASSERT(!btrfs_extent_map_in_tree(new_em));
1010
1011
/*
1012
* The caller has locked an appropriate file range in the inode's io
1013
* tree, but getting -EEXIST when adding the new extent map can still
1014
* happen in case there are extents that partially cover the range, and
1015
* this is due to two tasks operating on different parts of the extent.
1016
* See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
1017
* btrfs_get_extent") for an example and details.
1018
*/
1019
do {
1020
btrfs_drop_extent_map_range(inode, new_em->start, end, false);
1021
write_lock(&tree->lock);
1022
ret = add_extent_mapping(inode, new_em, modified);
1023
write_unlock(&tree->lock);
1024
} while (ret == -EEXIST);
1025
1026
return ret;
1027
}
1028
1029
/*
1030
* Split off the first pre bytes from the extent_map at [start, start + len],
1031
* and set the block_start for it to new_logical.
1032
*
1033
* This function is used when an ordered_extent needs to be split.
1034
*/
1035
int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
1036
u64 new_logical)
1037
{
1038
struct extent_map_tree *em_tree = &inode->extent_tree;
1039
struct extent_map *em;
1040
struct extent_map *split_pre = NULL;
1041
struct extent_map *split_mid = NULL;
1042
int ret = 0;
1043
unsigned long flags;
1044
1045
ASSERT(pre != 0);
1046
ASSERT(pre < len);
1047
1048
split_pre = btrfs_alloc_extent_map();
1049
if (!split_pre)
1050
return -ENOMEM;
1051
split_mid = btrfs_alloc_extent_map();
1052
if (!split_mid) {
1053
ret = -ENOMEM;
1054
goto out_free_pre;
1055
}
1056
1057
btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
1058
write_lock(&em_tree->lock);
1059
em = btrfs_lookup_extent_mapping(em_tree, start, len);
1060
if (!em) {
1061
ret = -EIO;
1062
goto out_unlock;
1063
}
1064
1065
ASSERT(em->len == len);
1066
ASSERT(!btrfs_extent_map_is_compressed(em));
1067
ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE);
1068
ASSERT(em->flags & EXTENT_FLAG_PINNED);
1069
ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
1070
ASSERT(!list_empty(&em->list));
1071
1072
flags = em->flags;
1073
em->flags &= ~EXTENT_FLAG_PINNED;
1074
1075
/* First, replace the em with a new extent_map starting from * em->start */
1076
split_pre->start = em->start;
1077
split_pre->len = pre;
1078
split_pre->disk_bytenr = new_logical;
1079
split_pre->disk_num_bytes = split_pre->len;
1080
split_pre->offset = 0;
1081
split_pre->ram_bytes = split_pre->len;
1082
split_pre->flags = flags;
1083
split_pre->generation = em->generation;
1084
1085
replace_extent_mapping(inode, em, split_pre, 1);
1086
1087
/*
1088
* Now we only have an extent_map at:
1089
* [em->start, em->start + pre]
1090
*/
1091
1092
/* Insert the middle extent_map. */
1093
split_mid->start = em->start + pre;
1094
split_mid->len = em->len - pre;
1095
split_mid->disk_bytenr = btrfs_extent_map_block_start(em) + pre;
1096
split_mid->disk_num_bytes = split_mid->len;
1097
split_mid->offset = 0;
1098
split_mid->ram_bytes = split_mid->len;
1099
split_mid->flags = flags;
1100
split_mid->generation = em->generation;
1101
add_extent_mapping(inode, split_mid, 1);
1102
1103
/* Once for us */
1104
btrfs_free_extent_map(em);
1105
/* Once for the tree */
1106
btrfs_free_extent_map(em);
1107
1108
out_unlock:
1109
write_unlock(&em_tree->lock);
1110
btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
1111
btrfs_free_extent_map(split_mid);
1112
out_free_pre:
1113
btrfs_free_extent_map(split_pre);
1114
return ret;
1115
}
1116
1117
struct btrfs_em_shrink_ctx {
1118
long nr_to_scan;
1119
long scanned;
1120
};
1121
1122
static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
1123
{
1124
struct btrfs_fs_info *fs_info = inode->root->fs_info;
1125
const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info);
1126
struct extent_map_tree *tree = &inode->extent_tree;
1127
long nr_dropped = 0;
1128
struct rb_node *node;
1129
1130
lockdep_assert_held_write(&tree->lock);
1131
1132
/*
1133
* Take the mmap lock so that we serialize with the inode logging phase
1134
* of fsync because we may need to set the full sync flag on the inode,
1135
* in case we have to remove extent maps in the tree's list of modified
1136
* extents. If we set the full sync flag in the inode while an fsync is
1137
* in progress, we may risk missing new extents because before the flag
1138
* is set, fsync decides to only wait for writeback to complete and then
1139
* during inode logging it sees the flag set and uses the subvolume tree
1140
* to find new extents, which may not be there yet because ordered
1141
* extents haven't completed yet.
1142
*
1143
* We also do a try lock because we don't want to block for too long and
1144
* we are holding the extent map tree's lock in write mode.
1145
*/
1146
if (!down_read_trylock(&inode->i_mmap_lock))
1147
return 0;
1148
1149
node = rb_first(&tree->root);
1150
while (node) {
1151
struct rb_node *next = rb_next(node);
1152
struct extent_map *em;
1153
1154
em = rb_entry(node, struct extent_map, rb_node);
1155
ctx->scanned++;
1156
1157
if (em->flags & EXTENT_FLAG_PINNED)
1158
goto next;
1159
1160
/*
1161
* If the inode is in the list of modified extents (new) and its
1162
* generation is the same (or is greater than) the current fs
1163
* generation, it means it was not yet persisted so we have to
1164
* set the full sync flag so that the next fsync will not miss
1165
* it.
1166
*/
1167
if (!list_empty(&em->list) && em->generation >= cur_fs_gen)
1168
btrfs_set_inode_full_sync(inode);
1169
1170
btrfs_remove_extent_mapping(inode, em);
1171
trace_btrfs_extent_map_shrinker_remove_em(inode, em);
1172
/* Drop the reference for the tree. */
1173
btrfs_free_extent_map(em);
1174
nr_dropped++;
1175
next:
1176
if (ctx->scanned >= ctx->nr_to_scan)
1177
break;
1178
1179
/*
1180
* Stop if we need to reschedule or there's contention on the
1181
* lock. This is to avoid slowing other tasks trying to take the
1182
* lock.
1183
*/
1184
if (need_resched() || rwlock_needbreak(&tree->lock) ||
1185
btrfs_fs_closing(fs_info))
1186
break;
1187
node = next;
1188
}
1189
up_read(&inode->i_mmap_lock);
1190
1191
return nr_dropped;
1192
}
1193
1194
static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
1195
u64 min_ino)
1196
{
1197
struct btrfs_inode *inode;
1198
unsigned long from = min_ino;
1199
1200
xa_lock(&root->inodes);
1201
while (true) {
1202
struct extent_map_tree *tree;
1203
1204
inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
1205
if (!inode)
1206
break;
1207
1208
tree = &inode->extent_tree;
1209
1210
/*
1211
* We want to be fast so if the lock is busy we don't want to
1212
* spend time waiting for it (some task is about to do IO for
1213
* the inode).
1214
*/
1215
if (!write_trylock(&tree->lock))
1216
goto next;
1217
1218
/*
1219
* Skip inode if it doesn't have loaded extent maps, so we avoid
1220
* getting a reference and doing an iput later. This includes
1221
* cases like files that were opened for things like stat(2), or
1222
* files with all extent maps previously released through the
1223
* release folio callback (btrfs_release_folio()) or released in
1224
* a previous run, or directories which never have extent maps.
1225
*/
1226
if (RB_EMPTY_ROOT(&tree->root)) {
1227
write_unlock(&tree->lock);
1228
goto next;
1229
}
1230
1231
if (igrab(&inode->vfs_inode))
1232
break;
1233
1234
write_unlock(&tree->lock);
1235
next:
1236
from = btrfs_ino(inode) + 1;
1237
cond_resched_lock(&root->inodes.xa_lock);
1238
}
1239
xa_unlock(&root->inodes);
1240
1241
return inode;
1242
}
1243
1244
static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
1245
{
1246
struct btrfs_fs_info *fs_info = root->fs_info;
1247
struct btrfs_inode *inode;
1248
long nr_dropped = 0;
1249
u64 min_ino = fs_info->em_shrinker_last_ino + 1;
1250
1251
inode = find_first_inode_to_shrink(root, min_ino);
1252
while (inode) {
1253
nr_dropped += btrfs_scan_inode(inode, ctx);
1254
write_unlock(&inode->extent_tree.lock);
1255
1256
min_ino = btrfs_ino(inode) + 1;
1257
fs_info->em_shrinker_last_ino = btrfs_ino(inode);
1258
iput(&inode->vfs_inode);
1259
1260
if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
1261
break;
1262
1263
cond_resched();
1264
1265
inode = find_first_inode_to_shrink(root, min_ino);
1266
}
1267
1268
if (inode) {
1269
/*
1270
* There are still inodes in this root or we happened to process
1271
* the last one and reached the scan limit. In either case set
1272
* the current root to this one, so we'll resume from the next
1273
* inode if there is one or we will find out this was the last
1274
* one and move to the next root.
1275
*/
1276
fs_info->em_shrinker_last_root = btrfs_root_id(root);
1277
} else {
1278
/*
1279
* No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
1280
* that when processing the next root we start from its first inode.
1281
*/
1282
fs_info->em_shrinker_last_ino = 0;
1283
fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1;
1284
}
1285
1286
return nr_dropped;
1287
}
1288
1289
static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
1290
{
1291
struct btrfs_fs_info *fs_info;
1292
struct btrfs_em_shrink_ctx ctx;
1293
u64 start_root_id;
1294
u64 next_root_id;
1295
bool cycled = false;
1296
long nr_dropped = 0;
1297
1298
fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work);
1299
1300
ctx.scanned = 0;
1301
ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan);
1302
1303
start_root_id = fs_info->em_shrinker_last_root;
1304
next_root_id = fs_info->em_shrinker_last_root;
1305
1306
if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
1307
s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
1308
1309
trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr);
1310
}
1311
1312
while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
1313
struct btrfs_root *root;
1314
unsigned long count;
1315
1316
cond_resched();
1317
1318
spin_lock(&fs_info->fs_roots_radix_lock);
1319
count = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1320
(void **)&root,
1321
(unsigned long)next_root_id, 1);
1322
if (count == 0) {
1323
spin_unlock(&fs_info->fs_roots_radix_lock);
1324
if (start_root_id > 0 && !cycled) {
1325
next_root_id = 0;
1326
fs_info->em_shrinker_last_root = 0;
1327
fs_info->em_shrinker_last_ino = 0;
1328
cycled = true;
1329
continue;
1330
}
1331
break;
1332
}
1333
next_root_id = btrfs_root_id(root) + 1;
1334
root = btrfs_grab_root(root);
1335
spin_unlock(&fs_info->fs_roots_radix_lock);
1336
1337
if (!root)
1338
continue;
1339
1340
if (btrfs_is_fstree(btrfs_root_id(root)))
1341
nr_dropped += btrfs_scan_root(root, &ctx);
1342
1343
btrfs_put_root(root);
1344
}
1345
1346
if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
1347
s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
1348
1349
trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
1350
}
1351
1352
atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
1353
}
1354
1355
void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
1356
{
1357
/*
1358
* Do nothing if the shrinker is already running. In case of high memory
1359
* pressure we can have a lot of tasks calling us and all passing the
1360
* same nr_to_scan value, but in reality we may need only to free
1361
* nr_to_scan extent maps (or less). In case we need to free more than
1362
* that, we will be called again by the fs shrinker, so no worries about
1363
* not doing enough work to reclaim memory from extent maps.
1364
* We can also be repeatedly called with the same nr_to_scan value
1365
* simply because the shrinker runs asynchronously and multiple calls
1366
* to this function are made before the shrinker does enough progress.
1367
*
1368
* That's why we set the atomic counter to nr_to_scan only if its
1369
* current value is zero, instead of incrementing the counter by
1370
* nr_to_scan.
1371
*/
1372
if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
1373
return;
1374
1375
queue_work(system_unbound_wq, &fs_info->em_shrinker_work);
1376
}
1377
1378
void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
1379
{
1380
atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
1381
INIT_WORK(&fs_info->em_shrinker_work, btrfs_extent_map_shrinker_worker);
1382
}
1383
1384