#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/bio.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/page-flags.h>
#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/fsverity.h>
#include "extent_io.h"
#include "extent-io-tree.h"
#include "extent_map.h"
#include "ctree.h"
#include "btrfs_inode.h"
#include "bio.h"
#include "locking.h"
#include "backref.h"
#include "disk-io.h"
#include "subpage.h"
#include "zoned.h"
#include "block-group.h"
#include "compression.h"
#include "fs.h"
#include "accessors.h"
#include "file-item.h"
#include "file.h"
#include "dev-replace.h"
#include "super.h"
#include "transaction.h"
static struct kmem_cache *extent_buffer_cache;
#ifdef CONFIG_BTRFS_DEBUG
static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
unsigned long flags;
spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
list_add(&eb->leak_list, &fs_info->allocated_ebs);
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
}
static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
unsigned long flags;
spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
list_del(&eb->leak_list);
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
}
void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
{
struct extent_buffer *eb;
unsigned long flags;
if (!fs_info->allocated_ebs.next)
return;
WARN_ON(!list_empty(&fs_info->allocated_ebs));
spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
while (!list_empty(&fs_info->allocated_ebs)) {
eb = list_first_entry(&fs_info->allocated_ebs,
struct extent_buffer, leak_list);
btrfs_err(fs_info,
"buffer leak start %llu len %u refs %d bflags %lu owner %llu",
eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
btrfs_header_owner(eb));
list_del(&eb->leak_list);
WARN_ON_ONCE(1);
kmem_cache_free(extent_buffer_cache, eb);
}
spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
}
#else
#define btrfs_leak_debug_add_eb(eb) do {} while (0)
#define btrfs_leak_debug_del_eb(eb) do {} while (0)
#endif
struct btrfs_bio_ctrl {
struct btrfs_bio *bbio;
loff_t next_file_offset;
enum btrfs_compression_type compress_type;
u32 len_to_oe_boundary;
blk_opf_t opf;
btrfs_bio_end_io_t end_io_func;
struct writeback_control *wbc;
unsigned long submit_bitmap;
struct readahead_control *ractl;
};
static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_bio *bbio = bio_ctrl->bbio;
if (!bbio)
return;
ASSERT(bbio->bio.bi_iter.bi_size);
if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
btrfs_submit_compressed_read(bbio);
else
btrfs_submit_bbio(bbio, 0);
bio_ctrl->bbio = NULL;
}
static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
{
struct btrfs_bio *bbio = bio_ctrl->bbio;
if (!bbio)
return;
if (ret) {
ASSERT(ret < 0);
btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
bio_ctrl->bbio = NULL;
} else {
submit_one_bio(bio_ctrl);
}
}
int __init extent_buffer_init_cachep(void)
{
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0, 0,
NULL);
if (!extent_buffer_cache)
return -ENOMEM;
return 0;
}
void __cold extent_buffer_free_cachep(void)
{
rcu_barrier();
kmem_cache_destroy(extent_buffer_cache);
}
static void process_one_folio(struct btrfs_fs_info *fs_info,
struct folio *folio, const struct folio *locked_folio,
unsigned long page_ops, u64 start, u64 end)
{
u32 len;
ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
len = end + 1 - start;
if (page_ops & PAGE_SET_ORDERED)
btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
if (page_ops & PAGE_START_WRITEBACK) {
btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
}
if (page_ops & PAGE_END_WRITEBACK)
btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
btrfs_folio_end_lock(fs_info, folio, start, len);
}
static void __process_folios_contig(struct address_space *mapping,
const struct folio *locked_folio, u64 start,
u64 end, unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
struct folio_batch fbatch;
int i;
folio_batch_init(&fbatch);
while (index <= end_index) {
int found_folios;
found_folios = filemap_get_folios_contig(mapping, &index,
end_index, &fbatch);
for (i = 0; i < found_folios; i++) {
struct folio *folio = fbatch.folios[i];
process_one_folio(fs_info, folio, locked_folio,
page_ops, start, end);
}
folio_batch_release(&fbatch);
cond_resched();
}
}
static noinline void unlock_delalloc_folio(const struct inode *inode,
struct folio *locked_folio,
u64 start, u64 end)
{
ASSERT(locked_folio);
__process_folios_contig(inode->i_mapping, locked_folio, start, end,
PAGE_UNLOCK);
}
static noinline int lock_delalloc_folios(struct inode *inode,
struct folio *locked_folio,
u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct address_space *mapping = inode->i_mapping;
pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
u64 processed_end = start;
struct folio_batch fbatch;
folio_batch_init(&fbatch);
while (index <= end_index) {
unsigned int found_folios, i;
found_folios = filemap_get_folios_contig(mapping, &index,
end_index, &fbatch);
if (found_folios == 0)
goto out;
for (i = 0; i < found_folios; i++) {
struct folio *folio = fbatch.folios[i];
u64 range_start;
u32 range_len;
if (folio == locked_folio)
continue;
folio_lock(folio);
if (!folio_test_dirty(folio) || folio->mapping != mapping) {
folio_unlock(folio);
goto out;
}
range_start = max_t(u64, folio_pos(folio), start);
range_len = min_t(u64, folio_end(folio), end + 1) - range_start;
btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
processed_end = range_start + range_len - 1;
}
folio_batch_release(&fbatch);
cond_resched();
}
return 0;
out:
folio_batch_release(&fbatch);
if (processed_end > start)
unlock_delalloc_folio(inode, locked_folio, start, processed_end);
return -EAGAIN;
}
EXPORT_FOR_TESTS
noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
struct folio *locked_folio,
u64 *start, u64 *end)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
const u64 orig_start = *start;
const u64 orig_end = *end;
u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
u64 delalloc_start;
u64 delalloc_end;
bool found;
struct extent_state *cached_state = NULL;
int ret;
int loops = 0;
ASSERT(orig_end > orig_start);
ASSERT(!(orig_start >= folio_end(locked_folio) ||
orig_end <= folio_pos(locked_folio)));
again:
delalloc_start = *start;
delalloc_end = 0;
found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
max_bytes, &cached_state);
if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
*start = delalloc_start;
*end = min(delalloc_end, orig_end);
btrfs_free_extent_state(cached_state);
return false;
}
if (delalloc_start < *start)
delalloc_start = *start;
if (delalloc_end + 1 - delalloc_start > max_bytes)
delalloc_end = delalloc_start + max_bytes - 1;
ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
delalloc_end);
ASSERT(!ret || ret == -EAGAIN);
if (ret == -EAGAIN) {
btrfs_free_extent_state(cached_state);
cached_state = NULL;
if (!loops) {
max_bytes = PAGE_SIZE;
loops = 1;
goto again;
} else {
found = false;
goto out_failed;
}
}
btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, cached_state);
btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
unlock_delalloc_folio(inode, locked_folio, delalloc_start,
delalloc_end);
cond_resched();
goto again;
}
*start = delalloc_start;
*end = delalloc_end;
out_failed:
return found;
}
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
const struct folio *locked_folio,
struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
__process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
end, page_ops);
}
static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
if (!fsverity_active(folio->mapping->host) ||
btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
start >= i_size_read(folio->mapping->host))
return true;
return fsverity_verify_folio(folio);
}
static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
ASSERT(folio_pos(folio) <= start &&
start + len <= folio_end(folio));
if (uptodate && btrfs_verify_folio(folio, start, len))
btrfs_folio_set_uptodate(fs_info, folio, start, len);
else
btrfs_folio_clear_uptodate(fs_info, folio, start, len);
if (!btrfs_is_subpage(fs_info, folio))
folio_unlock(folio);
else
btrfs_folio_end_lock(fs_info, folio, start, len);
}
static void end_bbio_data_write(struct btrfs_bio *bbio)
{
struct btrfs_fs_info *fs_info = bbio->fs_info;
struct bio *bio = &bbio->bio;
int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
const u32 sectorsize = fs_info->sectorsize;
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
u64 start = folio_pos(folio) + fi.offset;
u32 len = fi.length;
if (!IS_ALIGNED(fi.offset, sectorsize))
btrfs_err(fs_info,
"partial page write in btrfs with offset %zu and length %zu",
fi.offset, fi.length);
else if (!IS_ALIGNED(fi.length, sectorsize))
btrfs_info(fs_info,
"incomplete page write with offset %zu and length %zu",
fi.offset, fi.length);
btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
!error);
if (error)
mapping_set_error(folio->mapping, error);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
bio_put(bio);
}
static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
{
ASSERT(folio_test_locked(folio));
if (!btrfs_is_subpage(fs_info, folio))
return;
ASSERT(folio_test_private(folio));
btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), folio_size(folio));
}
static void end_bbio_data_read(struct btrfs_bio *bbio)
{
struct btrfs_fs_info *fs_info = bbio->fs_info;
struct bio *bio = &bbio->bio;
struct folio_iter fi;
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_folio_all(fi, &bbio->bio) {
bool uptodate = !bio->bi_status;
struct folio *folio = fi.folio;
struct inode *inode = folio->mapping->host;
u64 start = folio_pos(folio) + fi.offset;
btrfs_debug(fs_info,
"%s: bi_sector=%llu, err=%d, mirror=%u",
__func__, bio->bi_iter.bi_sector, bio->bi_status,
bbio->mirror_num);
if (likely(uptodate)) {
u64 end = start + fi.length - 1;
loff_t i_size = i_size_read(inode);
if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
i_size <= end) {
u32 zero_start = max(offset_in_folio(folio, i_size),
offset_in_folio(folio, start));
u32 zero_len = offset_in_folio(folio, end) + 1 -
zero_start;
folio_zero_range(folio, zero_start, zero_len);
}
}
end_folio_read(folio, uptodate, start, fi.length);
}
bio_put(bio);
}
int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
{
for (int i = 0; i < nr_folios; i++) {
if (folio_array[i])
continue;
folio_array[i] = folio_alloc(GFP_NOFS, 0);
if (!folio_array[i])
goto error;
}
return 0;
error:
for (int i = 0; i < nr_folios; i++) {
if (folio_array[i])
folio_put(folio_array[i]);
}
return -ENOMEM;
}
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
bool nofail)
{
const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
unsigned int allocated;
for (allocated = 0; allocated < nr_pages;) {
unsigned int last = allocated;
allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
if (unlikely(allocated == last)) {
for (int i = 0; i < allocated; i++) {
__free_page(page_array[i]);
page_array[i] = NULL;
}
return -ENOMEM;
}
}
return 0;
}
static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
{
struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
int num_pages = num_extent_pages(eb);
int ret;
ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
if (ret < 0)
return ret;
for (int i = 0; i < num_pages; i++)
eb->folios[i] = page_folio(page_array[i]);
eb->folio_size = PAGE_SIZE;
eb->folio_shift = PAGE_SHIFT;
return 0;
}
static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
u64 disk_bytenr, loff_t file_offset)
{
struct bio *bio = &bio_ctrl->bbio->bio;
const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
return bio->bi_iter.bi_sector == sector;
}
return bio_ctrl->next_file_offset == file_offset &&
bio_end_sector(bio) == sector;
}
static void alloc_new_bio(struct btrfs_inode *inode,
struct btrfs_bio_ctrl *bio_ctrl,
u64 disk_bytenr, u64 file_offset)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_bio *bbio;
bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
bio_ctrl->end_io_func, NULL);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
bbio->inode = inode;
bbio->file_offset = file_offset;
bio_ctrl->bbio = bbio;
bio_ctrl->len_to_oe_boundary = U32_MAX;
bio_ctrl->next_file_offset = file_offset;
if (bio_ctrl->wbc) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_extent(inode, file_offset);
if (ordered) {
bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
ordered->file_offset +
ordered->disk_num_bytes - file_offset);
bbio->ordered = ordered;
}
bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
}
}
static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
u64 disk_bytenr, struct folio *folio,
size_t size, unsigned long pg_offset)
{
struct btrfs_inode *inode = folio_to_inode(folio);
loff_t file_offset = folio_pos(folio) + pg_offset;
ASSERT(pg_offset + size <= folio_size(folio));
ASSERT(bio_ctrl->end_io_func);
if (bio_ctrl->bbio &&
!btrfs_bio_is_contig(bio_ctrl, disk_bytenr, file_offset))
submit_one_bio(bio_ctrl);
do {
u32 len = size;
if (!bio_ctrl->bbio)
alloc_new_bio(inode, bio_ctrl, disk_bytenr, file_offset);
if (len > bio_ctrl->len_to_oe_boundary) {
ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
ASSERT(is_data_inode(inode));
len = bio_ctrl->len_to_oe_boundary;
}
if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
submit_one_bio(bio_ctrl);
continue;
}
bio_ctrl->next_file_offset += len;
if (bio_ctrl->wbc)
wbc_account_cgroup_owner(bio_ctrl->wbc, folio, len);
size -= len;
pg_offset += len;
disk_bytenr += len;
file_offset += len;
if (bio_ctrl->len_to_oe_boundary != U32_MAX)
bio_ctrl->len_to_oe_boundary -= len;
if (bio_ctrl->len_to_oe_boundary == 0)
submit_one_bio(bio_ctrl);
} while (size);
}
static int attach_extent_buffer_folio(struct extent_buffer *eb,
struct folio *folio,
struct btrfs_folio_state *prealloc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
int ret = 0;
if (folio->mapping)
lockdep_assert_held(&folio->mapping->i_private_lock);
if (!btrfs_meta_is_subpage(fs_info)) {
if (!folio_test_private(folio))
folio_attach_private(folio, eb);
else
WARN_ON(folio_get_private(folio) != eb);
return 0;
}
if (folio_test_private(folio)) {
btrfs_free_folio_state(prealloc);
return 0;
}
if (prealloc)
folio_attach_private(folio, prealloc);
else
ret = btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
return ret;
}
int set_folio_extent_mapped(struct folio *folio)
{
struct btrfs_fs_info *fs_info;
ASSERT(folio->mapping);
if (folio_test_private(folio))
return 0;
fs_info = folio_to_fs_info(folio);
if (btrfs_is_subpage(fs_info, folio))
return btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
return 0;
}
void clear_folio_extent_mapped(struct folio *folio)
{
struct btrfs_fs_info *fs_info;
ASSERT(folio->mapping);
if (!folio_test_private(folio))
return;
fs_info = folio_to_fs_info(folio);
if (btrfs_is_subpage(fs_info, folio))
return btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
folio_detach_private(folio);
}
static struct extent_map *get_extent_map(struct btrfs_inode *inode,
struct folio *folio, u64 start,
u64 len, struct extent_map **em_cached)
{
struct extent_map *em;
ASSERT(em_cached);
if (*em_cached) {
em = *em_cached;
if (btrfs_extent_map_in_tree(em) && start >= em->start &&
start < btrfs_extent_map_end(em)) {
refcount_inc(&em->refs);
return em;
}
btrfs_free_extent_map(em);
*em_cached = NULL;
}
em = btrfs_get_extent(inode, folio, start, len);
if (!IS_ERR(em)) {
BUG_ON(*em_cached);
refcount_inc(&em->refs);
*em_cached = em;
}
return em;
}
static void btrfs_readahead_expand(struct readahead_control *ractl,
const struct extent_map *em)
{
const u64 ra_pos = readahead_pos(ractl);
const u64 ra_end = ra_pos + readahead_length(ractl);
const u64 em_end = em->start + em->ram_bytes;
if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
return;
ASSERT(em_end >= ra_pos,
"extent_map %llu %llu ends before current readahead position %llu",
em->start, em->len, ra_pos);
if (em_end > ra_end)
readahead_expand(ractl, ra_pos, em_end - ra_pos);
}
static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
{
struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
u64 start = folio_pos(folio);
const u64 end = start + folio_size(folio) - 1;
u64 extent_offset;
u64 last_byte = i_size_read(inode);
struct extent_map *em;
int ret = 0;
const size_t blocksize = fs_info->sectorsize;
ret = set_folio_extent_mapped(folio);
if (ret < 0) {
folio_unlock(folio);
return ret;
}
if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
size_t zero_offset = offset_in_folio(folio, last_byte);
if (zero_offset)
folio_zero_range(folio, zero_offset,
folio_size(folio) - zero_offset);
}
bio_ctrl->end_io_func = end_bbio_data_read;
begin_folio_read(fs_info, folio);
for (u64 cur = start; cur <= end; cur += blocksize) {
enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
unsigned long pg_offset = offset_in_folio(folio, cur);
bool force_bio_submit = false;
u64 disk_bytenr;
u64 block_start;
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) {
folio_zero_range(folio, pg_offset, end - cur + 1);
end_folio_read(folio, true, cur, end - cur + 1);
break;
}
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
end_folio_read(folio, true, cur, blocksize);
continue;
}
em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
if (IS_ERR(em)) {
end_folio_read(folio, false, cur, end + 1 - cur);
return PTR_ERR(em);
}
extent_offset = cur - em->start;
BUG_ON(btrfs_extent_map_end(em) <= cur);
BUG_ON(end < cur);
compress_type = btrfs_extent_map_compression(em);
if (bio_ctrl->ractl &&
!btrfs_is_subpage(fs_info, folio) &&
compress_type != BTRFS_COMPRESS_NONE)
btrfs_readahead_expand(bio_ctrl->ractl, em);
if (compress_type != BTRFS_COMPRESS_NONE)
disk_bytenr = em->disk_bytenr;
else
disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
if (em->flags & EXTENT_FLAG_PREALLOC)
block_start = EXTENT_MAP_HOLE;
else
block_start = btrfs_extent_map_block_start(em);
if (compress_type != BTRFS_COMPRESS_NONE &&
prev_em_start && *prev_em_start != (u64)-1 &&
*prev_em_start != em->start)
force_bio_submit = true;
if (prev_em_start)
*prev_em_start = em->start;
btrfs_free_extent_map(em);
em = NULL;
if (block_start == EXTENT_MAP_HOLE) {
folio_zero_range(folio, pg_offset, blocksize);
end_folio_read(folio, true, cur, blocksize);
continue;
}
if (block_start == EXTENT_MAP_INLINE) {
end_folio_read(folio, true, cur, blocksize);
continue;
}
if (bio_ctrl->compress_type != compress_type) {
submit_one_bio(bio_ctrl);
bio_ctrl->compress_type = compress_type;
}
if (force_bio_submit)
submit_one_bio(bio_ctrl);
submit_extent_folio(bio_ctrl, disk_bytenr, folio, blocksize,
pg_offset);
}
return 0;
}
static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
struct btrfs_ordered_extent *ordered,
u64 *fileoff)
{
const struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct folio *folio;
const u32 blocksize = fs_info->sectorsize;
u64 cur = *fileoff;
bool ret;
folio = filemap_get_folio(inode->vfs_inode.i_mapping, cur >> PAGE_SHIFT);
ASSERT(!IS_ERR(folio));
ASSERT(folio_test_locked(folio));
if (!folio_test_private(folio)) {
ret = false;
goto out;
}
if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
u64 range_len = min(folio_end(folio),
ordered->file_offset + ordered->num_bytes) - cur;
ret = true;
ASSERT(btrfs_folio_test_dirty(fs_info, folio, cur, range_len));
*fileoff = ordered->file_offset + ordered->num_bytes;
goto out;
}
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
u64 range_len = min(folio_end(folio),
ordered->file_offset + ordered->num_bytes) - cur;
ASSERT(btrfs_folio_test_uptodate(fs_info, folio, cur, range_len));
ret = true;
*fileoff = cur + range_len;
goto out;
}
ret = false;
out:
folio_put(folio);
return ret;
}
static bool can_skip_ordered_extent(struct btrfs_inode *inode,
struct btrfs_ordered_extent *ordered,
u64 start, u64 end)
{
const u64 range_end = min(end, ordered->file_offset + ordered->num_bytes - 1);
u64 cur = max(start, ordered->file_offset);
while (cur < range_end) {
bool can_skip;
can_skip = can_skip_one_ordered_range(inode, ordered, &cur);
if (!can_skip)
return false;
}
return true;
}
static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
u64 cur_pos;
ASSERT(cached_state);
ASSERT(IS_ALIGNED(start, PAGE_SIZE));
ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
again:
btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
cur_pos = start;
while (cur_pos < end) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_range(inode, cur_pos,
end - cur_pos + 1);
if (!ordered)
break;
if (can_skip_ordered_extent(inode, ordered, start, end)) {
cur_pos = min(ordered->file_offset + ordered->num_bytes,
end + 1);
btrfs_put_ordered_extent(ordered);
continue;
}
btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
btrfs_put_ordered_extent(ordered);
goto again;
}
}
int btrfs_read_folio(struct file *file, struct folio *folio)
{
struct btrfs_inode *inode = folio_to_inode(folio);
const u64 start = folio_pos(folio);
const u64 end = start + folio_size(folio) - 1;
struct extent_state *cached_state = NULL;
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
struct extent_map *em_cached = NULL;
int ret;
lock_extents_for_read(inode, start, end, &cached_state);
ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
btrfs_free_extent_map(em_cached);
submit_one_bio(&bio_ctrl);
return ret;
}
static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
const u64 folio_start = folio_pos(folio);
unsigned int start_bit;
unsigned int nbits;
ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio));
start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
nbits = len >> fs_info->sectorsize_bits;
ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
bitmap_set(delalloc_bitmap, start_bit, nbits);
}
static bool find_next_delalloc_bitmap(struct folio *folio,
unsigned long *delalloc_bitmap, u64 start,
u64 *found_start, u32 *found_len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
const u64 folio_start = folio_pos(folio);
const unsigned int bitmap_size = btrfs_blocks_per_folio(fs_info, folio);
unsigned int start_bit;
unsigned int first_zero;
unsigned int first_set;
ASSERT(start >= folio_start && start < folio_start + folio_size(folio));
start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
if (first_set >= bitmap_size)
return false;
*found_start = folio_start + (first_set << fs_info->sectorsize_bits);
first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
*found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
return true;
}
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
struct folio *folio,
struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
struct writeback_control *wbc = bio_ctrl->wbc;
const bool is_subpage = btrfs_is_subpage(fs_info, folio);
const u64 page_start = folio_pos(folio);
const u64 page_end = page_start + folio_size(folio) - 1;
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
unsigned long delalloc_bitmap = 0;
u64 last_delalloc_end = 0;
u64 last_finished_delalloc_end = page_start;
u64 delalloc_start = page_start;
u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
int ret = 0;
int bit;
if (btrfs_is_subpage(fs_info, folio)) {
ASSERT(blocks_per_folio > 1);
btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
} else {
bio_ctrl->submit_bitmap = 1;
}
for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
u64 start = page_start + (bit << fs_info->sectorsize_bits);
btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
}
while (delalloc_start < page_end) {
delalloc_end = page_end;
if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
&delalloc_start, &delalloc_end)) {
delalloc_start = delalloc_end + 1;
continue;
}
set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
min(delalloc_end, page_end) + 1 - delalloc_start);
last_delalloc_end = delalloc_end;
delalloc_start = delalloc_end + 1;
}
delalloc_start = page_start;
if (!last_delalloc_end)
goto out;
while (delalloc_start < page_end) {
u64 found_start;
u32 found_len;
bool found;
if (!is_subpage) {
found_start = page_start;
found_len = last_delalloc_end + 1 - found_start;
found = true;
} else {
found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
delalloc_start, &found_start, &found_len);
}
if (!found)
break;
if (found_start + found_len >= page_end)
found_len = last_delalloc_end + 1 - found_start;
if (ret >= 0) {
last_finished_delalloc_end = found_start;
ret = btrfs_run_delalloc_range(inode, folio,
found_start,
found_start + found_len - 1,
wbc);
if (ret >= 0)
last_finished_delalloc_end = found_start + found_len;
if (unlikely(ret < 0))
btrfs_err_rl(fs_info,
"failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
btrfs_root_id(inode->root),
btrfs_ino(inode),
folio_pos(folio),
blocks_per_folio,
&bio_ctrl->submit_bitmap,
found_start, found_len, ret);
} else {
btrfs_unlock_extent(&inode->io_tree, found_start,
found_start + found_len - 1, NULL);
unlock_delalloc_folio(&inode->vfs_inode, folio,
found_start,
found_start + found_len - 1);
}
if (ret > 0) {
unsigned int start_bit = (found_start - page_start) >>
fs_info->sectorsize_bits;
unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
page_start) >> fs_info->sectorsize_bits;
bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
}
if (found_start + found_len >= last_delalloc_end + 1)
break;
delalloc_start = found_start + found_len;
}
if (unlikely(ret < 0)) {
unsigned int bitmap_size = min(
(last_finished_delalloc_end - page_start) >>
fs_info->sectorsize_bits,
blocks_per_folio);
for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
btrfs_mark_ordered_io_finished(inode, folio,
page_start + (bit << fs_info->sectorsize_bits),
fs_info->sectorsize, false);
return ret;
}
out:
if (last_delalloc_end)
delalloc_end = last_delalloc_end;
else
delalloc_end = page_end;
delalloc_to_write +=
DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
if (bitmap_empty(&bio_ctrl->submit_bitmap, blocks_per_folio)) {
wbc->nr_to_write -= delalloc_to_write;
return 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
int thresh = 8192;
if (delalloc_to_write < thresh * 2)
thresh = delalloc_to_write;
wbc->nr_to_write = min_t(u64, delalloc_to_write,
thresh);
}
return 0;
}
static int submit_one_sector(struct btrfs_inode *inode,
struct folio *folio,
u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
loff_t i_size)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map *em;
u64 block_start;
u64 disk_bytenr;
u64 extent_offset;
u64 em_end;
const u32 sectorsize = fs_info->sectorsize;
ASSERT(IS_ALIGNED(filepos, sectorsize));
ASSERT(filepos < i_size);
em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
if (IS_ERR(em)) {
btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize);
return PTR_ERR(em);
}
extent_offset = filepos - em->start;
em_end = btrfs_extent_map_end(em);
ASSERT(filepos <= em_end);
ASSERT(IS_ALIGNED(em->start, sectorsize));
ASSERT(IS_ALIGNED(em->len, sectorsize));
block_start = btrfs_extent_map_block_start(em);
disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
ASSERT(!btrfs_extent_map_is_compressed(em));
ASSERT(block_start != EXTENT_MAP_HOLE);
ASSERT(block_start != EXTENT_MAP_INLINE);
btrfs_free_extent_map(em);
em = NULL;
btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
ASSERT(folio_test_writeback(folio));
submit_extent_folio(bio_ctrl, disk_bytenr, folio,
sectorsize, filepos - folio_pos(folio));
return 0;
}
static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
struct folio *folio,
u64 start, u32 len,
struct btrfs_bio_ctrl *bio_ctrl,
loff_t i_size)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long range_bitmap = 0;
bool submitted_io = false;
bool error = false;
const u64 folio_start = folio_pos(folio);
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
u64 cur;
int bit;
int ret = 0;
ASSERT(start >= folio_start &&
start + len <= folio_start + folio_size(folio));
ret = btrfs_writepage_cow_fixup(folio);
if (ret == -EAGAIN) {
folio_redirty_for_writepage(bio_ctrl->wbc, folio);
folio_unlock(folio);
return 1;
}
if (ret < 0) {
btrfs_folio_clear_dirty(fs_info, folio, start, len);
btrfs_folio_set_writeback(fs_info, folio, start, len);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
return ret;
}
for (cur = start; cur < start + len; cur += fs_info->sectorsize)
set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
blocks_per_folio);
bio_ctrl->end_io_func = end_bbio_data_write;
for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
if (cur >= i_size) {
btrfs_mark_ordered_io_finished(inode, folio, cur,
start + len - cur, true);
btrfs_folio_clear_dirty(fs_info, folio, cur,
start + len - cur);
break;
}
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
if (unlikely(ret < 0)) {
submit_one_bio(bio_ctrl);
btrfs_mark_ordered_io_finished(inode, folio, cur,
fs_info->sectorsize, false);
error = true;
continue;
}
submitted_io = true;
}
if (!submitted_io && !error) {
btrfs_folio_set_writeback(fs_info, folio, start, len);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
return ret;
}
static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
size_t pg_offset;
loff_t i_size = i_size_read(&inode->vfs_inode);
const pgoff_t end_index = i_size >> PAGE_SHIFT;
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
WARN_ON(!folio_test_locked(folio));
pg_offset = offset_in_folio(folio, i_size);
if (folio->index > end_index ||
(folio->index == end_index && !pg_offset)) {
folio_invalidate(folio, 0, folio_size(folio));
folio_unlock(folio);
return 0;
}
if (folio_contains(folio, end_index))
folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
bio_ctrl->submit_bitmap = (unsigned long)-1;
if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) &&
unlikely(!folio_test_private(folio))) {
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
btrfs_err_rl(fs_info,
"root %lld ino %llu folio %llu is marked dirty without notifying the fs",
btrfs_root_id(inode->root),
btrfs_ino(inode), folio_pos(folio));
ret = -EUCLEAN;
goto done;
}
ret = set_folio_extent_mapped(folio);
if (ret < 0)
goto done;
ret = writepage_delalloc(inode, folio, bio_ctrl);
if (ret == 1)
return 0;
if (ret)
goto done;
ret = extent_writepage_io(inode, folio, folio_pos(folio),
folio_size(folio), bio_ctrl, i_size);
if (ret == 1)
return 0;
if (ret < 0)
btrfs_err_rl(fs_info,
"failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
btrfs_root_id(inode->root), btrfs_ino(inode),
folio_pos(folio), blocks_per_folio,
&bio_ctrl->submit_bitmap, ret);
bio_ctrl->wbc->nr_to_write--;
done:
if (ret < 0)
mapping_set_error(folio->mapping, ret);
btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
ASSERT(ret <= 0);
return ret;
}
static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
bool ret = false;
btrfs_tree_lock(eb);
while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
btrfs_tree_unlock(eb);
if (wbc->sync_mode != WB_SYNC_ALL)
return false;
wait_on_extent_buffer_writeback(eb);
btrfs_tree_lock(eb);
}
spin_lock(&eb->refs_lock);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
unsigned long flags;
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
spin_unlock(&eb->refs_lock);
xas_lock_irqsave(&xas, flags);
xas_load(&xas);
xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
xas_unlock_irqrestore(&xas, flags);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
-eb->len,
fs_info->dirty_metadata_batch);
ret = true;
} else {
spin_unlock(&eb->refs_lock);
}
btrfs_tree_unlock(eb);
return ret;
}
static void set_btree_ioerr(struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
switch (eb->log_index) {
case -1:
set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
break;
case 0:
set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
break;
case 1:
set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
break;
default:
BUG();
}
}
static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
unsigned long flags;
xas_lock_irqsave(&xas, flags);
xas_load(&xas);
xas_set_mark(&xas, mark);
xas_unlock_irqrestore(&xas, flags);
}
static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
unsigned long flags;
xas_lock_irqsave(&xas, flags);
xas_load(&xas);
xas_clear_mark(&xas, mark);
xas_unlock_irqrestore(&xas, flags);
}
static void buffer_tree_tag_for_writeback(struct btrfs_fs_info *fs_info,
unsigned long start, unsigned long end)
{
XA_STATE(xas, &fs_info->buffer_tree, start);
unsigned int tagged = 0;
void *eb;
xas_lock_irq(&xas);
xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) {
xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
if (++tagged % XA_CHECK_SCHED)
continue;
xas_pause(&xas);
xas_unlock_irq(&xas);
cond_resched();
xas_lock_irq(&xas);
}
xas_unlock_irq(&xas);
}
struct eb_batch {
unsigned int nr;
unsigned int cur;
struct extent_buffer *ebs[PAGEVEC_SIZE];
};
static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb)
{
batch->ebs[batch->nr++] = eb;
return (batch->nr < PAGEVEC_SIZE);
}
static inline void eb_batch_init(struct eb_batch *batch)
{
batch->nr = 0;
batch->cur = 0;
}
static inline struct extent_buffer *eb_batch_next(struct eb_batch *batch)
{
if (batch->cur >= batch->nr)
return NULL;
return batch->ebs[batch->cur++];
}
static inline void eb_batch_release(struct eb_batch *batch)
{
for (unsigned int i = 0; i < batch->nr; i++)
free_extent_buffer(batch->ebs[i]);
eb_batch_init(batch);
}
static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned long max,
xa_mark_t mark)
{
struct extent_buffer *eb;
retry:
eb = xas_find_marked(xas, max, mark);
if (xas_retry(xas, eb))
goto retry;
if (!eb)
return NULL;
if (!refcount_inc_not_zero(&eb->refs)) {
xas_reset(xas);
goto retry;
}
if (unlikely(eb != xas_reload(xas))) {
free_extent_buffer(eb);
xas_reset(xas);
goto retry;
}
return eb;
}
static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info,
unsigned long *start,
unsigned long end, xa_mark_t tag,
struct eb_batch *batch)
{
XA_STATE(xas, &fs_info->buffer_tree, *start);
struct extent_buffer *eb;
rcu_read_lock();
while ((eb = find_get_eb(&xas, end, tag)) != NULL) {
if (!eb_batch_add(batch, eb)) {
*start = ((eb->start + eb->len) >> fs_info->nodesize_bits);
goto out;
}
}
if (end == ULONG_MAX)
*start = ULONG_MAX;
else
*start = end + 1;
out:
rcu_read_unlock();
return batch->nr;
}
static struct extent_buffer *find_extent_buffer_nolock(
struct btrfs_fs_info *fs_info, u64 start)
{
struct extent_buffer *eb;
unsigned long index = (start >> fs_info->nodesize_bits);
rcu_read_lock();
eb = xa_load(&fs_info->buffer_tree, index);
if (eb && !refcount_inc_not_zero(&eb->refs))
eb = NULL;
rcu_read_unlock();
return eb;
}
static void end_bbio_meta_write(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
struct folio_iter fi;
if (bbio->bio.bi_status != BLK_STS_OK)
set_btree_ioerr(eb);
bio_for_each_folio_all(fi, &bbio->bio) {
btrfs_meta_folio_clear_writeback(fi.folio, eb);
}
buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK);
clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
bio_put(&bbio->bio);
}
static void prepare_eb_write(struct extent_buffer *eb)
{
u32 nritems;
unsigned long start;
unsigned long end;
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
nritems = btrfs_header_nritems(eb);
if (btrfs_header_level(eb) > 0) {
end = btrfs_node_key_ptr_offset(eb, nritems);
memzero_extent_buffer(eb, end, eb->len - end);
} else {
start = btrfs_item_nr_offset(eb, nritems);
end = btrfs_item_nr_offset(eb, 0);
if (nritems == 0)
end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
else
end += btrfs_item_offset(eb, nritems - 1);
memzero_extent_buffer(eb, start, end - start);
}
}
static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct btrfs_bio *bbio;
prepare_eb_write(eb);
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
eb->fs_info, end_bbio_meta_write, eb);
bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
wbc_init_bio(wbc, &bbio->bio);
bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
bbio->file_offset = eb->start;
for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i];
u64 range_start = max_t(u64, eb->start, folio_pos(folio));
u32 range_len = min_t(u64, folio_end(folio),
eb->start + eb->len) - range_start;
folio_lock(folio);
btrfs_meta_folio_clear_dirty(folio, eb);
btrfs_meta_folio_set_writeback(folio, eb);
if (!folio_test_dirty(folio))
wbc->nr_to_write -= folio_nr_pages(folio);
bio_add_folio_nofail(&bbio->bio, folio, range_len,
offset_in_folio(folio, range_start));
wbc_account_cgroup_owner(wbc, folio, range_len);
folio_unlock(folio);
}
btrfs_submit_bbio(bbio, 0);
}
void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
u64 end)
{
struct eb_batch batch;
unsigned long start_index = (start >> fs_info->nodesize_bits);
unsigned long end_index = (end >> fs_info->nodesize_bits);
eb_batch_init(&batch);
while (start_index <= end_index) {
struct extent_buffer *eb;
unsigned int nr_ebs;
nr_ebs = buffer_tree_get_ebs_tag(fs_info, &start_index, end_index,
PAGECACHE_TAG_WRITEBACK, &batch);
if (!nr_ebs)
break;
while ((eb = eb_batch_next(&batch)) != NULL)
wait_on_extent_buffer_writeback(eb);
eb_batch_release(&batch);
cond_resched();
}
}
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct btrfs_eb_write_context ctx = { .wbc = wbc };
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
struct eb_batch batch;
unsigned int nr_ebs;
unsigned long index;
unsigned long end;
int scanned = 0;
xa_mark_t tag;
eb_batch_init(&batch);
if (wbc->range_cyclic) {
index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits);
end = -1;
scanned = (index == 0);
} else {
index = (wbc->range_start >> fs_info->nodesize_bits);
end = (wbc->range_end >> fs_info->nodesize_bits);
scanned = 1;
}
if (wbc->sync_mode == WB_SYNC_ALL)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
btrfs_zoned_meta_io_lock(fs_info);
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
buffer_tree_tag_for_writeback(fs_info, index, end);
while (!done && !nr_to_write_done && (index <= end) &&
(nr_ebs = buffer_tree_get_ebs_tag(fs_info, &index, end, tag, &batch))) {
struct extent_buffer *eb;
while ((eb = eb_batch_next(&batch)) != NULL) {
ctx.eb = eb;
ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx);
if (ret) {
if (ret == -EBUSY)
ret = 0;
if (ret) {
done = 1;
break;
}
continue;
}
if (!lock_extent_buffer_for_io(eb, wbc))
continue;
if (ctx.zoned_bg) {
btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb);
ctx.zoned_bg->meta_write_pointer += eb->len;
}
write_one_eb(eb, wbc);
}
nr_to_write_done = (wbc->nr_to_write <= 0);
eb_batch_release(&batch);
cond_resched();
}
if (!scanned && !done) {
scanned = 1;
index = 0;
goto retry;
}
if (ret > 0)
ret = 0;
if (!ret && BTRFS_FS_ERROR(fs_info))
ret = -EROFS;
if (ctx.zoned_bg)
btrfs_put_block_group(ctx.zoned_bg);
btrfs_zoned_meta_io_unlock(fs_info);
return ret;
}
static int extent_write_cache_pages(struct address_space *mapping,
struct btrfs_bio_ctrl *bio_ctrl)
{
struct writeback_control *wbc = bio_ctrl->wbc;
struct inode *inode = mapping->host;
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
struct folio_batch fbatch;
unsigned int nr_folios;
pgoff_t index;
pgoff_t end;
pgoff_t done_index;
int range_whole = 0;
int scanned = 0;
xa_mark_t tag;
if (!igrab(inode))
return 0;
folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
index = mapping->writeback_index;
end = -1;
scanned = (index == 0);
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
scanned = 1;
}
if (range_whole && wbc->nr_to_write == LONG_MAX &&
test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
&BTRFS_I(inode)->runtime_flags))
wbc->tagged_writepages = 1;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
(nr_folios = filemap_get_folios_tag(mapping, &index,
end, tag, &fbatch))) {
unsigned i;
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
done_index = folio_next_index(folio);
if (!folio_trylock(folio)) {
submit_write_bio(bio_ctrl, 0);
folio_lock(folio);
}
if (unlikely(folio->mapping != mapping)) {
folio_unlock(folio);
continue;
}
if (!folio_test_dirty(folio)) {
folio_unlock(folio);
continue;
}
if (wbc->sync_mode != WB_SYNC_NONE ||
btrfs_is_subpage(inode_to_fs_info(inode), folio)) {
if (folio_test_writeback(folio))
submit_write_bio(bio_ctrl, 0);
folio_wait_writeback(folio);
}
if (folio_test_writeback(folio) ||
!folio_clear_dirty_for_io(folio)) {
folio_unlock(folio);
continue;
}
ret = extent_writepage(folio, bio_ctrl);
if (ret < 0) {
done = 1;
break;
}
nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
wbc->nr_to_write <= 0);
}
folio_batch_release(&fbatch);
cond_resched();
}
if (!scanned && !done) {
scanned = 1;
index = 0;
submit_write_bio(bio_ctrl, 0);
goto retry;
}
if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
mapping->writeback_index = done_index;
btrfs_add_delayed_iput(BTRFS_I(inode));
return ret;
}
void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty)
{
bool found_error = false;
int ret = 0;
struct address_space *mapping = inode->i_mapping;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
const u32 sectorsize = fs_info->sectorsize;
loff_t i_size = i_size_read(inode);
u64 cur = start;
struct btrfs_bio_ctrl bio_ctrl = {
.wbc = wbc,
.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
};
if (wbc->no_cgroup_owner)
bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
while (cur <= end) {
u64 cur_end;
u32 cur_len;
struct folio *folio;
folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
if (IS_ERR(folio)) {
cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
cur_len = cur_end + 1 - cur;
btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
cur, cur_len, false);
mapping_set_error(mapping, PTR_ERR(folio));
cur = cur_end;
continue;
}
cur_end = min_t(u64, folio_end(folio) - 1, end);
cur_len = cur_end + 1 - cur;
ASSERT(folio_test_locked(folio));
if (pages_dirty && folio != locked_folio)
ASSERT(folio_test_dirty(folio));
bio_ctrl.submit_bitmap = (unsigned long)-1;
ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
&bio_ctrl, i_size);
if (ret == 1)
goto next_page;
if (ret)
mapping_set_error(mapping, ret);
btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
if (ret < 0)
found_error = true;
next_page:
folio_put(folio);
cur = cur_end + 1;
}
submit_write_bio(&bio_ctrl, found_error ? ret : 0);
}
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
int ret = 0;
struct btrfs_bio_ctrl bio_ctrl = {
.wbc = wbc,
.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
};
btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
ret = extent_write_cache_pages(mapping, &bio_ctrl);
submit_write_bio(&bio_ctrl, ret);
btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
return ret;
}
void btrfs_readahead(struct readahead_control *rac)
{
struct btrfs_bio_ctrl bio_ctrl = {
.opf = REQ_OP_READ | REQ_RAHEAD,
.ractl = rac
};
struct folio *folio;
struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
const u64 start = readahead_pos(rac);
const u64 end = start + readahead_length(rac) - 1;
struct extent_state *cached_state = NULL;
struct extent_map *em_cached = NULL;
u64 prev_em_start = (u64)-1;
lock_extents_for_read(inode, start, end, &cached_state);
while ((folio = readahead_folio(rac)) != NULL)
btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
if (em_cached)
btrfs_free_extent_map(em_cached);
submit_one_bio(&bio_ctrl);
}
int extent_invalidate_folio(struct extent_io_tree *tree,
struct folio *folio, size_t offset)
{
struct extent_state *cached_state = NULL;
u64 start = folio_pos(folio);
u64 end = start + folio_size(folio) - 1;
size_t blocksize = folio_to_fs_info(folio)->sectorsize;
ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
start += ALIGN(offset, blocksize);
if (start > end)
return 0;
btrfs_lock_extent(tree, start, end, &cached_state);
folio_wait_writeback(folio);
btrfs_unlock_extent(tree, start, end, &cached_state);
return 0;
}
static bool try_release_extent_state(struct extent_io_tree *tree,
struct folio *folio)
{
struct extent_state *cached_state = NULL;
u64 start = folio_pos(folio);
u64 end = start + folio_size(folio) - 1;
u32 range_bits;
u32 clear_bits;
bool ret = false;
int ret2;
btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state);
if ((range_bits & EXTENT_LOCKED) &&
!(range_bits & EXTENT_FINISHING_ORDERED))
goto out;
clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW |
EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED |
EXTENT_FINISHING_ORDERED);
ret2 = btrfs_clear_extent_bit(tree, start, end, clear_bits, &cached_state);
if (ret2 == 0)
ret = true;
out:
btrfs_free_extent_state(cached_state);
return ret;
}
bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
{
u64 start = folio_pos(folio);
u64 end = start + folio_size(folio) - 1;
struct btrfs_inode *inode = folio_to_inode(folio);
struct extent_io_tree *io_tree = &inode->io_tree;
while (start <= end) {
const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
const u64 len = end - start + 1;
struct extent_map_tree *extent_tree = &inode->extent_tree;
struct extent_map *em;
write_lock(&extent_tree->lock);
em = btrfs_lookup_extent_mapping(extent_tree, start, len);
if (!em) {
write_unlock(&extent_tree->lock);
break;
}
if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
write_unlock(&extent_tree->lock);
btrfs_free_extent_map(em);
break;
}
if (btrfs_test_range_bit_exists(io_tree, em->start,
btrfs_extent_map_end(em) - 1,
EXTENT_LOCKED))
goto next;
if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
goto remove_em;
if (em->generation >= cur_gen)
goto next;
remove_em:
btrfs_remove_extent_mapping(inode, em);
btrfs_free_extent_map(em);
next:
start = btrfs_extent_map_end(em);
write_unlock(&extent_tree->lock);
btrfs_free_extent_map(em);
if (need_resched()) {
if (!gfpflags_allow_blocking(mask))
break;
cond_resched();
}
}
return try_release_extent_state(io_tree, folio);
}
static int extent_buffer_under_io(const struct extent_buffer *eb)
{
return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
static bool folio_range_has_eb(struct folio *folio)
{
struct btrfs_folio_state *bfs;
lockdep_assert_held(&folio->mapping->i_private_lock);
if (folio_test_private(folio)) {
bfs = folio_get_private(folio);
if (atomic_read(&bfs->eb_refs))
return true;
}
return false;
}
static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct address_space *mapping = folio->mapping;
const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
if (mapped)
spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) {
if (mapped)
spin_unlock(&mapping->i_private_lock);
return;
}
if (!btrfs_meta_is_subpage(fs_info)) {
if (folio_test_private(folio) && folio_get_private(folio) == eb) {
BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(folio_test_dirty(folio));
BUG_ON(folio_test_writeback(folio));
folio_detach_private(folio);
}
if (mapped)
spin_unlock(&mapping->i_private_lock);
return;
}
if (!mapped) {
btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
return;
}
btrfs_folio_dec_eb_refs(fs_info, folio);
if (!folio_range_has_eb(folio))
btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
spin_unlock(&mapping->i_private_lock);
}
static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)
{
ASSERT(!extent_buffer_under_io(eb));
for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
struct folio *folio = eb->folios[i];
if (!folio)
continue;
detach_extent_buffer_folio(eb, folio);
}
}
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
btrfs_release_extent_buffer_folios(eb);
btrfs_leak_debug_del_eb(eb);
kmem_cache_free(extent_buffer_cache, eb);
}
static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
struct extent_buffer *eb = NULL;
eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
eb->start = start;
eb->len = fs_info->nodesize;
eb->fs_info = fs_info;
init_rwsem(&eb->lock);
btrfs_leak_debug_add_eb(eb);
spin_lock_init(&eb->refs_lock);
refcount_set(&eb->refs, 1);
ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
return eb;
}
static void cleanup_extent_buffer_folios(struct extent_buffer *eb)
{
const int num_folios = num_extent_folios(eb);
for (int i = 0; i < num_folios; i++) {
ASSERT(eb->folios[i]);
detach_extent_buffer_folio(eb, eb->folios[i]);
folio_put(eb->folios[i]);
eb->folios[i] = NULL;
}
}
struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
{
struct extent_buffer *new;
int num_folios;
int ret;
new = __alloc_extent_buffer(src->fs_info, src->start);
if (new == NULL)
return NULL;
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
ret = alloc_eb_folio_array(new, false);
if (ret)
goto release_eb;
ASSERT(num_extent_folios(src) == num_extent_folios(new),
"%d != %d", num_extent_folios(src), num_extent_folios(new));
num_folios = num_extent_folios(src);
for (int i = 0; i < num_folios; i++) {
struct folio *folio = new->folios[i];
ret = attach_extent_buffer_folio(new, folio, NULL);
if (ret < 0)
goto cleanup_folios;
WARN_ON(folio_test_dirty(folio));
}
for (int i = 0; i < num_folios; i++)
folio_put(new->folios[i]);
copy_extent_buffer_full(new, src);
set_extent_buffer_uptodate(new);
return new;
cleanup_folios:
cleanup_extent_buffer_folios(new);
release_eb:
btrfs_release_extent_buffer(new);
return NULL;
}
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
struct extent_buffer *eb;
int ret;
eb = __alloc_extent_buffer(fs_info, start);
if (!eb)
return NULL;
ret = alloc_eb_folio_array(eb, false);
if (ret)
goto release_eb;
for (int i = 0; i < num_extent_folios(eb); i++) {
ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
if (ret < 0)
goto cleanup_folios;
}
for (int i = 0; i < num_extent_folios(eb); i++)
folio_put(eb->folios[i]);
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
return eb;
cleanup_folios:
cleanup_extent_buffer_folios(eb);
release_eb:
btrfs_release_extent_buffer(eb);
return NULL;
}
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;
refs = refcount_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
return;
spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
refcount_inc(&eb->refs);
spin_unlock(&eb->refs_lock);
}
static void mark_extent_buffer_accessed(struct extent_buffer *eb)
{
check_buffer_tree_ref(eb);
for (int i = 0; i < num_extent_folios(eb); i++)
folio_mark_accessed(eb->folios[i]);
}
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
struct extent_buffer *eb;
eb = find_extent_buffer_nolock(fs_info, start);
if (!eb)
return NULL;
if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
spin_lock(&eb->refs_lock);
spin_unlock(&eb->refs_lock);
}
mark_extent_buffer_accessed(eb);
return eb;
}
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *eb, *exists = NULL;
int ret;
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
eb = alloc_dummy_extent_buffer(fs_info, start);
if (!eb)
return ERR_PTR(-ENOMEM);
eb->fs_info = fs_info;
again:
xa_lock_irq(&fs_info->buffer_tree);
exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits,
NULL, eb, GFP_NOFS);
if (xa_is_err(exists)) {
ret = xa_err(exists);
xa_unlock_irq(&fs_info->buffer_tree);
btrfs_release_extent_buffer(eb);
return ERR_PTR(ret);
}
if (exists) {
if (!refcount_inc_not_zero(&exists->refs)) {
xa_unlock_irq(&fs_info->buffer_tree);
goto again;
}
xa_unlock_irq(&fs_info->buffer_tree);
btrfs_release_extent_buffer(eb);
return exists;
}
xa_unlock_irq(&fs_info->buffer_tree);
check_buffer_tree_ref(eb);
return eb;
#else
return NULL;
#endif
}
static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
struct folio *folio)
{
struct extent_buffer *exists;
lockdep_assert_held(&folio->mapping->i_private_lock);
if (btrfs_meta_is_subpage(fs_info))
return NULL;
if (!folio_test_private(folio))
return NULL;
exists = folio_get_private(folio);
if (refcount_inc_not_zero(&exists->refs))
return exists;
WARN_ON(folio_test_dirty(folio));
folio_detach_private(folio);
return NULL;
}
static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
{
if (!IS_ALIGNED(start, fs_info->sectorsize)) {
btrfs_err(fs_info, "bad tree block start %llu", start);
return true;
}
if (fs_info->nodesize < PAGE_SIZE && !IS_ALIGNED(start, fs_info->nodesize)) {
btrfs_err(fs_info,
"tree block is not nodesize aligned, start %llu nodesize %u",
start, fs_info->nodesize);
return true;
}
if (fs_info->nodesize >= PAGE_SIZE &&
!PAGE_ALIGNED(start)) {
btrfs_err(fs_info,
"tree block is not page aligned, start %llu nodesize %u",
start, fs_info->nodesize);
return true;
}
if (!IS_ALIGNED(start, fs_info->nodesize) &&
!test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
btrfs_warn(fs_info,
"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
start, fs_info->nodesize);
}
return false;
}
static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
struct btrfs_folio_state *prealloc,
struct extent_buffer **found_eb_ret)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
const pgoff_t index = eb->start >> PAGE_SHIFT;
struct folio *existing_folio;
int ret;
ASSERT(found_eb_ret);
ASSERT(eb->folios[i]);
retry:
existing_folio = NULL;
ret = filemap_add_folio(mapping, eb->folios[i], index + i,
GFP_NOFS | __GFP_NOFAIL);
if (!ret)
goto finish;
existing_folio = filemap_lock_folio(mapping, index + i);
if (IS_ERR(existing_folio))
goto retry;
ASSERT(folio_nr_pages(existing_folio) == 1);
if (folio_size(existing_folio) != eb->folio_size) {
folio_unlock(existing_folio);
folio_put(existing_folio);
return -EAGAIN;
}
finish:
spin_lock(&mapping->i_private_lock);
if (existing_folio && btrfs_meta_is_subpage(fs_info)) {
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
} else if (existing_folio) {
struct extent_buffer *existing_eb;
existing_eb = grab_extent_buffer(fs_info, existing_folio);
if (existing_eb) {
*found_eb_ret = existing_eb;
spin_unlock(&mapping->i_private_lock);
folio_unlock(existing_folio);
folio_put(existing_folio);
return 1;
}
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
}
eb->folio_size = folio_size(eb->folios[i]);
eb->folio_shift = folio_shift(eb->folios[i]);
ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
ASSERT(!ret);
btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
spin_unlock(&mapping->i_private_lock);
return 0;
}
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level)
{
int attached = 0;
struct extent_buffer *eb;
struct extent_buffer *existing_eb = NULL;
struct btrfs_folio_state *prealloc = NULL;
u64 lockdep_owner = owner_root;
bool page_contig = true;
int uptodate = 1;
int ret;
if (check_eb_alignment(fs_info, start))
return ERR_PTR(-EINVAL);
#if BITS_PER_LONG == 32
if (start >= MAX_LFS_FILESIZE) {
btrfs_err_rl(fs_info,
"extent buffer %llu is beyond 32bit page cache limit", start);
btrfs_err_32bit_limit(fs_info);
return ERR_PTR(-EOVERFLOW);
}
if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
btrfs_warn_32bit_limit(fs_info);
#endif
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
eb = __alloc_extent_buffer(fs_info, start);
if (!eb)
return ERR_PTR(-ENOMEM);
if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
lockdep_owner = BTRFS_FS_TREE_OBJECTID;
btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
if (btrfs_meta_is_subpage(fs_info)) {
prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA);
if (IS_ERR(prealloc)) {
ret = PTR_ERR(prealloc);
goto out;
}
}
reallocate:
ret = alloc_eb_folio_array(eb, true);
if (ret < 0) {
btrfs_free_folio_state(prealloc);
goto out;
}
for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio;
ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
if (ret > 0) {
ASSERT(existing_eb);
goto out;
}
if (unlikely(ret == -EAGAIN)) {
DEBUG_WARN("folio order mismatch between new eb and filemap");
goto reallocate;
}
attached++;
folio = eb->folios[i];
WARN_ON(btrfs_meta_folio_test_dirty(folio, eb));
if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
page_contig = false;
if (!btrfs_meta_folio_test_uptodate(folio, eb))
uptodate = 0;
}
if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
if (page_contig)
eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
again:
xa_lock_irq(&fs_info->buffer_tree);
existing_eb = __xa_cmpxchg(&fs_info->buffer_tree,
start >> fs_info->nodesize_bits, NULL, eb,
GFP_NOFS);
if (xa_is_err(existing_eb)) {
ret = xa_err(existing_eb);
xa_unlock_irq(&fs_info->buffer_tree);
goto out;
}
if (existing_eb) {
if (!refcount_inc_not_zero(&existing_eb->refs)) {
xa_unlock_irq(&fs_info->buffer_tree);
goto again;
}
xa_unlock_irq(&fs_info->buffer_tree);
goto out;
}
xa_unlock_irq(&fs_info->buffer_tree);
check_buffer_tree_ref(eb);
for (int i = 0; i < num_extent_folios(eb); i++) {
folio_unlock(eb->folios[i]);
folio_put(eb->folios[i]);
}
return eb;
out:
WARN_ON(!refcount_dec_and_test(&eb->refs));
for (int i = 0; i < num_extent_pages(eb); i++) {
struct folio *folio = eb->folios[i];
if (i < attached) {
ASSERT(folio);
detach_extent_buffer_folio(eb, folio);
folio_unlock(folio);
} else if (!folio) {
continue;
}
folio_put(folio);
eb->folios[i] = NULL;
}
btrfs_release_extent_buffer(eb);
if (ret < 0)
return ERR_PTR(ret);
ASSERT(existing_eb);
return existing_eb;
}
static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
{
struct extent_buffer *eb =
container_of(head, struct extent_buffer, rcu_head);
kmem_cache_free(extent_buffer_cache, eb);
}
static int release_extent_buffer(struct extent_buffer *eb)
__releases(&eb->refs_lock)
{
lockdep_assert_held(&eb->refs_lock);
if (refcount_dec_and_test(&eb->refs)) {
struct btrfs_fs_info *fs_info = eb->fs_info;
spin_unlock(&eb->refs_lock);
xa_cmpxchg_irq(&fs_info->buffer_tree,
eb->start >> fs_info->nodesize_bits, eb, NULL,
GFP_ATOMIC);
btrfs_leak_debug_del_eb(eb);
btrfs_release_extent_buffer_folios(eb);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
kmem_cache_free(extent_buffer_cache, eb);
return 1;
}
#endif
call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
return 1;
}
spin_unlock(&eb->refs_lock);
return 0;
}
void free_extent_buffer(struct extent_buffer *eb)
{
int refs;
if (!eb)
return;
refs = refcount_read(&eb->refs);
while (1) {
if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
if (refs == 1)
break;
} else if (refs <= 3) {
break;
}
if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
return;
}
spin_lock(&eb->refs_lock);
if (refcount_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
!extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
refcount_dec(&eb->refs);
release_extent_buffer(eb);
}
void free_extent_buffer_stale(struct extent_buffer *eb)
{
if (!eb)
return;
spin_lock(&eb->refs_lock);
set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
refcount_dec(&eb->refs);
release_extent_buffer(eb);
}
static void btree_clear_folio_dirty_tag(struct folio *folio)
{
ASSERT(!folio_test_dirty(folio));
ASSERT(folio_test_locked(folio));
xa_lock_irq(&folio->mapping->i_pages);
if (!folio_test_dirty(folio))
__xa_clear_mark(&folio->mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&folio->mapping->i_pages);
}
void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
btrfs_assert_tree_write_locked(eb);
if (trans && btrfs_header_generation(eb) != trans->transid)
return;
if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
return;
}
if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
return;
buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
fs_info->dirty_metadata_batch);
for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i];
bool last;
if (!folio_test_dirty(folio))
continue;
folio_lock(folio);
last = btrfs_meta_folio_clear_and_test_dirty(folio, eb);
if (last)
btree_clear_folio_dirty_tag(folio);
folio_unlock(folio);
}
WARN_ON(refcount_read(&eb->refs) == 0);
}
void set_extent_buffer_dirty(struct extent_buffer *eb)
{
bool was_dirty;
check_buffer_tree_ref(eb);
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
WARN_ON(refcount_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
if (!was_dirty) {
bool subpage = btrfs_meta_is_subpage(eb->fs_info);
if (subpage)
folio_lock(eb->folios[0]);
for (int i = 0; i < num_extent_folios(eb); i++)
btrfs_meta_folio_set_dirty(eb->folios[i], eb);
buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY);
if (subpage)
folio_unlock(eb->folios[0]);
percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
eb->len,
eb->fs_info->dirty_metadata_batch);
}
#ifdef CONFIG_BTRFS_DEBUG
for (int i = 0; i < num_extent_folios(eb); i++)
ASSERT(folio_test_dirty(eb->folios[i]));
#endif
}
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i];
if (!folio)
continue;
btrfs_meta_folio_clear_uptodate(folio, eb);
}
}
void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
for (int i = 0; i < num_extent_folios(eb); i++)
btrfs_meta_folio_set_uptodate(eb->folios[i], eb);
}
static void clear_extent_buffer_reading(struct extent_buffer *eb)
{
clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags);
}
static void end_bbio_meta_read(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
bool uptodate = !bbio->bio.bi_status;
WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
eb->read_mirror = bbio->mirror_num;
if (uptodate &&
btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
uptodate = false;
if (uptodate)
set_extent_buffer_uptodate(eb);
else
clear_extent_buffer_uptodate(eb);
clear_extent_buffer_reading(eb);
free_extent_buffer(eb);
bio_put(&bbio->bio);
}
int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
const struct btrfs_tree_parent_check *check)
{
struct btrfs_bio *bbio;
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
return -EIO;
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
return 0;
if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
clear_extent_buffer_reading(eb);
return 0;
}
eb->read_mirror = 0;
check_buffer_tree_ref(eb);
refcount_inc(&eb->refs);
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
REQ_OP_READ | REQ_META, eb->fs_info,
end_bbio_meta_read, eb);
bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
bbio->file_offset = eb->start;
memcpy(&bbio->parent_check, check, sizeof(*check));
for (int i = 0; i < num_extent_folios(eb); i++) {
struct folio *folio = eb->folios[i];
u64 range_start = max_t(u64, eb->start, folio_pos(folio));
u32 range_len = min_t(u64, folio_end(folio),
eb->start + eb->len) - range_start;
bio_add_folio_nofail(&bbio->bio, folio, range_len,
offset_in_folio(folio, range_start));
}
btrfs_submit_bbio(bbio, mirror_num);
return 0;
}
int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
const struct btrfs_tree_parent_check *check)
{
int ret;
ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
if (ret < 0)
return ret;
wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return -EIO;
return 0;
}
static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
unsigned long len)
{
btrfs_warn(eb->fs_info,
"access to eb bytenr %llu len %u out of range start %lu len %lu",
eb->start, eb->len, start, len);
DEBUG_WARN();
return true;
}
static inline int check_eb_range(const struct extent_buffer *eb,
unsigned long start, unsigned long len)
{
unsigned long offset;
if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
return report_eb_range(eb, start, len);
return false;
}
void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
unsigned long start, unsigned long len)
{
const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
char *dst = (char *)dstv;
unsigned long i = get_eb_folio_index(eb, start);
if (check_eb_range(eb, start, len)) {
memset(dstv, 0, len);
return;
}
if (eb->addr) {
memcpy(dstv, eb->addr + start, len);
return;
}
offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
char *kaddr;
cur = min(len, unit_size - offset);
kaddr = folio_address(eb->folios[i]);
memcpy(dst, kaddr + offset, cur);
dst += cur;
len -= cur;
offset = 0;
i++;
}
}
int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dstv,
unsigned long start, unsigned long len)
{
const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
char __user *dst = (char __user *)dstv;
unsigned long i = get_eb_folio_index(eb, start);
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
if (eb->addr) {
if (copy_to_user_nofault(dstv, eb->addr + start, len))
ret = -EFAULT;
return ret;
}
offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
char *kaddr;
cur = min(len, unit_size - offset);
kaddr = folio_address(eb->folios[i]);
if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
ret = -EFAULT;
break;
}
dst += cur;
len -= cur;
offset = 0;
i++;
}
return ret;
}
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
unsigned long start, unsigned long len)
{
const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
char *kaddr;
char *ptr = (char *)ptrv;
unsigned long i = get_eb_folio_index(eb, start);
int ret = 0;
if (check_eb_range(eb, start, len))
return -EINVAL;
if (eb->addr)
return memcmp(ptrv, eb->addr + start, len);
offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
cur = min(len, unit_size - offset);
kaddr = folio_address(eb->folios[i]);
ret = memcmp(ptr, kaddr + offset, cur);
if (ret)
break;
ptr += cur;
len -= cur;
offset = 0;
i++;
}
return ret;
}
static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct folio *folio = eb->folios[i];
ASSERT(folio);
if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
return;
if (btrfs_meta_is_subpage(fs_info)) {
folio = eb->folios[0];
ASSERT(i == 0);
if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
eb->start, eb->len)))
btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
} else {
WARN_ON(!folio_test_uptodate(folio));
}
}
static void __write_extent_buffer(const struct extent_buffer *eb,
const void *srcv, unsigned long start,
unsigned long len, bool use_memmove)
{
const int unit_size = eb->folio_size;
size_t cur;
size_t offset;
char *kaddr;
const char *src = (const char *)srcv;
unsigned long i = get_eb_folio_index(eb, start);
const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
if (check_eb_range(eb, start, len))
return;
if (eb->addr) {
if (use_memmove)
memmove(eb->addr + start, srcv, len);
else
memcpy(eb->addr + start, srcv, len);
return;
}
offset = get_eb_offset_in_folio(eb, start);
while (len > 0) {
if (check_uptodate)
assert_eb_folio_uptodate(eb, i);
cur = min(len, unit_size - offset);
kaddr = folio_address(eb->folios[i]);
if (use_memmove)
memmove(kaddr + offset, src, cur);
else
memcpy(kaddr + offset, src, cur);
src += cur;
len -= cur;
offset = 0;
i++;
}
}
void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
unsigned long start, unsigned long len)
{
return __write_extent_buffer(eb, srcv, start, len, false);
}
static void memset_extent_buffer(const struct extent_buffer *eb, int c,
unsigned long start, unsigned long len)
{
const int unit_size = eb->folio_size;
unsigned long cur = start;
if (eb->addr) {
memset(eb->addr + start, c, len);
return;
}
while (cur < start + len) {
unsigned long index = get_eb_folio_index(eb, cur);
unsigned int offset = get_eb_offset_in_folio(eb, cur);
unsigned int cur_len = min(start + len - cur, unit_size - offset);
assert_eb_folio_uptodate(eb, index);
memset(folio_address(eb->folios[index]) + offset, c, cur_len);
cur += cur_len;
}
}
void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
unsigned long len)
{
if (check_eb_range(eb, start, len))
return;
return memset_extent_buffer(eb, 0, start, len);
}
void copy_extent_buffer_full(const struct extent_buffer *dst,
const struct extent_buffer *src)
{
const int unit_size = src->folio_size;
unsigned long cur = 0;
ASSERT(dst->len == src->len);
while (cur < src->len) {
unsigned long index = get_eb_folio_index(src, cur);
unsigned long offset = get_eb_offset_in_folio(src, cur);
unsigned long cur_len = min(src->len, unit_size - offset);
void *addr = folio_address(src->folios[index]) + offset;
write_extent_buffer(dst, addr, cur, cur_len);
cur += cur_len;
}
}
void copy_extent_buffer(const struct extent_buffer *dst,
const struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
const int unit_size = dst->folio_size;
u64 dst_len = dst->len;
size_t cur;
size_t offset;
char *kaddr;
unsigned long i = get_eb_folio_index(dst, dst_offset);
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(src, src_offset, len))
return;
WARN_ON(src->len != dst_len);
offset = get_eb_offset_in_folio(dst, dst_offset);
while (len > 0) {
assert_eb_folio_uptodate(dst, i);
cur = min(len, (unsigned long)(unit_size - offset));
kaddr = folio_address(dst->folios[i]);
read_extent_buffer(src, kaddr + offset, src_offset, cur);
src_offset += cur;
len -= cur;
offset = 0;
i++;
}
}
static inline void eb_bitmap_offset(const struct extent_buffer *eb,
unsigned long start, unsigned long nr,
unsigned long *folio_index,
size_t *folio_offset)
{
size_t byte_offset = BIT_BYTE(nr);
size_t offset;
offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
*folio_index = offset >> eb->folio_shift;
*folio_offset = offset_in_eb_folio(eb, offset);
}
bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
unsigned long nr)
{
unsigned long i;
size_t offset;
u8 *kaddr;
eb_bitmap_offset(eb, start, nr, &i, &offset);
assert_eb_folio_uptodate(eb, i);
kaddr = folio_address(eb->folios[i]);
return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
}
static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
{
unsigned long index = get_eb_folio_index(eb, bytenr);
if (check_eb_range(eb, bytenr, 1))
return NULL;
return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
}
void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len)
{
unsigned int first_byte = start + BIT_BYTE(pos);
unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
const bool same_byte = (first_byte == last_byte);
u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
u8 *kaddr;
if (same_byte)
mask &= BITMAP_LAST_BYTE_MASK(pos + len);
kaddr = extent_buffer_get_byte(eb, first_byte);
*kaddr |= mask;
if (same_byte)
return;
ASSERT(first_byte + 1 <= last_byte);
memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
kaddr = extent_buffer_get_byte(eb, last_byte);
*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
}
void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
unsigned long start, unsigned long pos,
unsigned long len)
{
unsigned int first_byte = start + BIT_BYTE(pos);
unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
const bool same_byte = (first_byte == last_byte);
u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
u8 *kaddr;
if (same_byte)
mask &= BITMAP_LAST_BYTE_MASK(pos + len);
kaddr = extent_buffer_get_byte(eb, first_byte);
*kaddr &= ~mask;
if (same_byte)
return;
ASSERT(first_byte + 1 <= last_byte);
memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
kaddr = extent_buffer_get_byte(eb, last_byte);
*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
}
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
{
unsigned long distance = (src > dst) ? src - dst : dst - src;
return distance < len;
}
void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
const int unit_size = dst->folio_size;
unsigned long cur_off = 0;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
if (dst->addr) {
const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
if (use_memmove)
memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
else
memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
return;
}
while (cur_off < len) {
unsigned long cur_src = cur_off + src_offset;
unsigned long folio_index = get_eb_folio_index(dst, cur_src);
unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
unsigned long cur_len = min(src_offset + len - cur_src,
unit_size - folio_off);
void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
const bool use_memmove = areas_overlap(src_offset + cur_off,
dst_offset + cur_off, cur_len);
__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
use_memmove);
cur_off += cur_len;
}
}
void memmove_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
if (dst_offset < src_offset) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
if (dst->addr) {
memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
return;
}
while (len > 0) {
unsigned long src_i;
size_t cur;
size_t dst_off_in_folio;
size_t src_off_in_folio;
void *src_addr;
bool use_memmove;
src_i = get_eb_folio_index(dst, src_end);
dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
cur = min_t(unsigned long, len, src_off_in_folio + 1);
cur = min(cur, dst_off_in_folio + 1);
src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
cur + 1;
use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
cur);
__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
use_memmove);
dst_end -= cur;
src_end -= cur;
len -= cur;
}
}
static int try_release_subpage_extent_buffer(struct folio *folio)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
struct extent_buffer *eb;
unsigned long start = (folio_pos(folio) >> fs_info->nodesize_bits);
unsigned long index = start;
unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1;
int ret;
rcu_read_lock();
xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) {
spin_lock(&eb->refs_lock);
rcu_read_unlock();
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
rcu_read_lock();
continue;
}
if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
spin_unlock(&eb->refs_lock);
break;
}
release_extent_buffer(eb);
rcu_read_lock();
}
rcu_read_unlock();
spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio))
ret = 1;
else
ret = 0;
spin_unlock(&folio->mapping->i_private_lock);
return ret;
}
int try_release_extent_buffer(struct folio *folio)
{
struct extent_buffer *eb;
if (btrfs_meta_is_subpage(folio_to_fs_info(folio)))
return try_release_subpage_extent_buffer(folio);
spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) {
spin_unlock(&folio->mapping->i_private_lock);
return 1;
}
eb = folio_get_private(folio);
BUG_ON(!eb);
spin_lock(&eb->refs_lock);
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
spin_unlock(&folio->mapping->i_private_lock);
return 0;
}
spin_unlock(&folio->mapping->i_private_lock);
if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
spin_unlock(&eb->refs_lock);
return 0;
}
return release_extent_buffer(eb);
}
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level)
{
struct btrfs_tree_parent_check check = {
.level = level,
.transid = gen
};
struct extent_buffer *eb;
int ret;
eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
if (IS_ERR(eb))
return;
if (btrfs_buffer_uptodate(eb, gen, 1)) {
free_extent_buffer(eb);
return;
}
ret = read_extent_buffer_pages_nowait(eb, 0, &check);
if (ret < 0)
free_extent_buffer_stale(eb);
else
free_extent_buffer(eb);
}
void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
{
btrfs_readahead_tree_block(node->fs_info,
btrfs_node_blockptr(node, slot),
btrfs_header_owner(node),
btrfs_node_ptr_generation(node, slot),
btrfs_header_level(node) - 1);
}