Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/fs/btrfs/inode.c
15109 views
1
/*
2
* Copyright (C) 2007 Oracle. All rights reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public
6
* License v2 as published by the Free Software Foundation.
7
*
8
* This program is distributed in the hope that it will be useful,
9
* but WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
* General Public License for more details.
12
*
13
* You should have received a copy of the GNU General Public
14
* License along with this program; if not, write to the
15
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16
* Boston, MA 021110-1307, USA.
17
*/
18
19
#include <linux/kernel.h>
20
#include <linux/bio.h>
21
#include <linux/buffer_head.h>
22
#include <linux/file.h>
23
#include <linux/fs.h>
24
#include <linux/pagemap.h>
25
#include <linux/highmem.h>
26
#include <linux/time.h>
27
#include <linux/init.h>
28
#include <linux/string.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mpage.h>
31
#include <linux/swap.h>
32
#include <linux/writeback.h>
33
#include <linux/statfs.h>
34
#include <linux/compat.h>
35
#include <linux/bit_spinlock.h>
36
#include <linux/xattr.h>
37
#include <linux/posix_acl.h>
38
#include <linux/falloc.h>
39
#include <linux/slab.h>
40
#include <linux/ratelimit.h>
41
#include "compat.h"
42
#include "ctree.h"
43
#include "disk-io.h"
44
#include "transaction.h"
45
#include "btrfs_inode.h"
46
#include "ioctl.h"
47
#include "print-tree.h"
48
#include "volumes.h"
49
#include "ordered-data.h"
50
#include "xattr.h"
51
#include "tree-log.h"
52
#include "compression.h"
53
#include "locking.h"
54
#include "free-space-cache.h"
55
#include "inode-map.h"
56
57
struct btrfs_iget_args {
58
u64 ino;
59
struct btrfs_root *root;
60
};
61
62
static const struct inode_operations btrfs_dir_inode_operations;
63
static const struct inode_operations btrfs_symlink_inode_operations;
64
static const struct inode_operations btrfs_dir_ro_inode_operations;
65
static const struct inode_operations btrfs_special_inode_operations;
66
static const struct inode_operations btrfs_file_inode_operations;
67
static const struct address_space_operations btrfs_aops;
68
static const struct address_space_operations btrfs_symlink_aops;
69
static const struct file_operations btrfs_dir_file_operations;
70
static struct extent_io_ops btrfs_extent_io_ops;
71
72
static struct kmem_cache *btrfs_inode_cachep;
73
struct kmem_cache *btrfs_trans_handle_cachep;
74
struct kmem_cache *btrfs_transaction_cachep;
75
struct kmem_cache *btrfs_path_cachep;
76
struct kmem_cache *btrfs_free_space_cachep;
77
78
#define S_SHIFT 12
79
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
80
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
81
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
82
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
83
[S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
84
[S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
85
[S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
86
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87
};
88
89
static int btrfs_setsize(struct inode *inode, loff_t newsize);
90
static int btrfs_truncate(struct inode *inode);
91
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
92
static noinline int cow_file_range(struct inode *inode,
93
struct page *locked_page,
94
u64 start, u64 end, int *page_started,
95
unsigned long *nr_written, int unlock);
96
97
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98
struct inode *inode, struct inode *dir,
99
const struct qstr *qstr)
100
{
101
int err;
102
103
err = btrfs_init_acl(trans, inode, dir);
104
if (!err)
105
err = btrfs_xattr_security_init(trans, inode, dir, qstr);
106
return err;
107
}
108
109
/*
110
* this does all the hard work for inserting an inline extent into
111
* the btree. The caller should have done a btrfs_drop_extents so that
112
* no overlapping inline items exist in the btree
113
*/
114
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
115
struct btrfs_root *root, struct inode *inode,
116
u64 start, size_t size, size_t compressed_size,
117
int compress_type,
118
struct page **compressed_pages)
119
{
120
struct btrfs_key key;
121
struct btrfs_path *path;
122
struct extent_buffer *leaf;
123
struct page *page = NULL;
124
char *kaddr;
125
unsigned long ptr;
126
struct btrfs_file_extent_item *ei;
127
int err = 0;
128
int ret;
129
size_t cur_size = size;
130
size_t datasize;
131
unsigned long offset;
132
133
if (compressed_size && compressed_pages)
134
cur_size = compressed_size;
135
136
path = btrfs_alloc_path();
137
if (!path)
138
return -ENOMEM;
139
140
path->leave_spinning = 1;
141
142
key.objectid = btrfs_ino(inode);
143
key.offset = start;
144
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
145
datasize = btrfs_file_extent_calc_inline_size(cur_size);
146
147
inode_add_bytes(inode, size);
148
ret = btrfs_insert_empty_item(trans, root, path, &key,
149
datasize);
150
BUG_ON(ret);
151
if (ret) {
152
err = ret;
153
goto fail;
154
}
155
leaf = path->nodes[0];
156
ei = btrfs_item_ptr(leaf, path->slots[0],
157
struct btrfs_file_extent_item);
158
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
159
btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
160
btrfs_set_file_extent_encryption(leaf, ei, 0);
161
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
162
btrfs_set_file_extent_ram_bytes(leaf, ei, size);
163
ptr = btrfs_file_extent_inline_start(ei);
164
165
if (compress_type != BTRFS_COMPRESS_NONE) {
166
struct page *cpage;
167
int i = 0;
168
while (compressed_size > 0) {
169
cpage = compressed_pages[i];
170
cur_size = min_t(unsigned long, compressed_size,
171
PAGE_CACHE_SIZE);
172
173
kaddr = kmap_atomic(cpage, KM_USER0);
174
write_extent_buffer(leaf, kaddr, ptr, cur_size);
175
kunmap_atomic(kaddr, KM_USER0);
176
177
i++;
178
ptr += cur_size;
179
compressed_size -= cur_size;
180
}
181
btrfs_set_file_extent_compression(leaf, ei,
182
compress_type);
183
} else {
184
page = find_get_page(inode->i_mapping,
185
start >> PAGE_CACHE_SHIFT);
186
btrfs_set_file_extent_compression(leaf, ei, 0);
187
kaddr = kmap_atomic(page, KM_USER0);
188
offset = start & (PAGE_CACHE_SIZE - 1);
189
write_extent_buffer(leaf, kaddr + offset, ptr, size);
190
kunmap_atomic(kaddr, KM_USER0);
191
page_cache_release(page);
192
}
193
btrfs_mark_buffer_dirty(leaf);
194
btrfs_free_path(path);
195
196
/*
197
* we're an inline extent, so nobody can
198
* extend the file past i_size without locking
199
* a page we already have locked.
200
*
201
* We must do any isize and inode updates
202
* before we unlock the pages. Otherwise we
203
* could end up racing with unlink.
204
*/
205
BTRFS_I(inode)->disk_i_size = inode->i_size;
206
btrfs_update_inode(trans, root, inode);
207
208
return 0;
209
fail:
210
btrfs_free_path(path);
211
return err;
212
}
213
214
215
/*
216
* conditionally insert an inline extent into the file. This
217
* does the checks required to make sure the data is small enough
218
* to fit as an inline extent.
219
*/
220
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
221
struct btrfs_root *root,
222
struct inode *inode, u64 start, u64 end,
223
size_t compressed_size, int compress_type,
224
struct page **compressed_pages)
225
{
226
u64 isize = i_size_read(inode);
227
u64 actual_end = min(end + 1, isize);
228
u64 inline_len = actual_end - start;
229
u64 aligned_end = (end + root->sectorsize - 1) &
230
~((u64)root->sectorsize - 1);
231
u64 hint_byte;
232
u64 data_len = inline_len;
233
int ret;
234
235
if (compressed_size)
236
data_len = compressed_size;
237
238
if (start > 0 ||
239
actual_end >= PAGE_CACHE_SIZE ||
240
data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
241
(!compressed_size &&
242
(actual_end & (root->sectorsize - 1)) == 0) ||
243
end + 1 < isize ||
244
data_len > root->fs_info->max_inline) {
245
return 1;
246
}
247
248
ret = btrfs_drop_extents(trans, inode, start, aligned_end,
249
&hint_byte, 1);
250
BUG_ON(ret);
251
252
if (isize > actual_end)
253
inline_len = min_t(u64, isize, actual_end);
254
ret = insert_inline_extent(trans, root, inode, start,
255
inline_len, compressed_size,
256
compress_type, compressed_pages);
257
BUG_ON(ret);
258
btrfs_delalloc_release_metadata(inode, end + 1 - start);
259
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
260
return 0;
261
}
262
263
struct async_extent {
264
u64 start;
265
u64 ram_size;
266
u64 compressed_size;
267
struct page **pages;
268
unsigned long nr_pages;
269
int compress_type;
270
struct list_head list;
271
};
272
273
struct async_cow {
274
struct inode *inode;
275
struct btrfs_root *root;
276
struct page *locked_page;
277
u64 start;
278
u64 end;
279
struct list_head extents;
280
struct btrfs_work work;
281
};
282
283
static noinline int add_async_extent(struct async_cow *cow,
284
u64 start, u64 ram_size,
285
u64 compressed_size,
286
struct page **pages,
287
unsigned long nr_pages,
288
int compress_type)
289
{
290
struct async_extent *async_extent;
291
292
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
293
BUG_ON(!async_extent);
294
async_extent->start = start;
295
async_extent->ram_size = ram_size;
296
async_extent->compressed_size = compressed_size;
297
async_extent->pages = pages;
298
async_extent->nr_pages = nr_pages;
299
async_extent->compress_type = compress_type;
300
list_add_tail(&async_extent->list, &cow->extents);
301
return 0;
302
}
303
304
/*
305
* we create compressed extents in two phases. The first
306
* phase compresses a range of pages that have already been
307
* locked (both pages and state bits are locked).
308
*
309
* This is done inside an ordered work queue, and the compression
310
* is spread across many cpus. The actual IO submission is step
311
* two, and the ordered work queue takes care of making sure that
312
* happens in the same order things were put onto the queue by
313
* writepages and friends.
314
*
315
* If this code finds it can't get good compression, it puts an
316
* entry onto the work queue to write the uncompressed bytes. This
317
* makes sure that both compressed inodes and uncompressed inodes
318
* are written in the same order that pdflush sent them down.
319
*/
320
static noinline int compress_file_range(struct inode *inode,
321
struct page *locked_page,
322
u64 start, u64 end,
323
struct async_cow *async_cow,
324
int *num_added)
325
{
326
struct btrfs_root *root = BTRFS_I(inode)->root;
327
struct btrfs_trans_handle *trans;
328
u64 num_bytes;
329
u64 blocksize = root->sectorsize;
330
u64 actual_end;
331
u64 isize = i_size_read(inode);
332
int ret = 0;
333
struct page **pages = NULL;
334
unsigned long nr_pages;
335
unsigned long nr_pages_ret = 0;
336
unsigned long total_compressed = 0;
337
unsigned long total_in = 0;
338
unsigned long max_compressed = 128 * 1024;
339
unsigned long max_uncompressed = 128 * 1024;
340
int i;
341
int will_compress;
342
int compress_type = root->fs_info->compress_type;
343
344
/* if this is a small write inside eof, kick off a defragbot */
345
if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
346
btrfs_add_inode_defrag(NULL, inode);
347
348
actual_end = min_t(u64, isize, end + 1);
349
again:
350
will_compress = 0;
351
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
352
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
353
354
/*
355
* we don't want to send crud past the end of i_size through
356
* compression, that's just a waste of CPU time. So, if the
357
* end of the file is before the start of our current
358
* requested range of bytes, we bail out to the uncompressed
359
* cleanup code that can deal with all of this.
360
*
361
* It isn't really the fastest way to fix things, but this is a
362
* very uncommon corner.
363
*/
364
if (actual_end <= start)
365
goto cleanup_and_bail_uncompressed;
366
367
total_compressed = actual_end - start;
368
369
/* we want to make sure that amount of ram required to uncompress
370
* an extent is reasonable, so we limit the total size in ram
371
* of a compressed extent to 128k. This is a crucial number
372
* because it also controls how easily we can spread reads across
373
* cpus for decompression.
374
*
375
* We also want to make sure the amount of IO required to do
376
* a random read is reasonably small, so we limit the size of
377
* a compressed extent to 128k.
378
*/
379
total_compressed = min(total_compressed, max_uncompressed);
380
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
381
num_bytes = max(blocksize, num_bytes);
382
total_in = 0;
383
ret = 0;
384
385
/*
386
* we do compression for mount -o compress and when the
387
* inode has not been flagged as nocompress. This flag can
388
* change at any time if we discover bad compression ratios.
389
*/
390
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
391
(btrfs_test_opt(root, COMPRESS) ||
392
(BTRFS_I(inode)->force_compress) ||
393
(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
394
WARN_ON(pages);
395
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
396
BUG_ON(!pages);
397
398
if (BTRFS_I(inode)->force_compress)
399
compress_type = BTRFS_I(inode)->force_compress;
400
401
ret = btrfs_compress_pages(compress_type,
402
inode->i_mapping, start,
403
total_compressed, pages,
404
nr_pages, &nr_pages_ret,
405
&total_in,
406
&total_compressed,
407
max_compressed);
408
409
if (!ret) {
410
unsigned long offset = total_compressed &
411
(PAGE_CACHE_SIZE - 1);
412
struct page *page = pages[nr_pages_ret - 1];
413
char *kaddr;
414
415
/* zero the tail end of the last page, we might be
416
* sending it down to disk
417
*/
418
if (offset) {
419
kaddr = kmap_atomic(page, KM_USER0);
420
memset(kaddr + offset, 0,
421
PAGE_CACHE_SIZE - offset);
422
kunmap_atomic(kaddr, KM_USER0);
423
}
424
will_compress = 1;
425
}
426
}
427
if (start == 0) {
428
trans = btrfs_join_transaction(root);
429
BUG_ON(IS_ERR(trans));
430
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
431
432
/* lets try to make an inline extent */
433
if (ret || total_in < (actual_end - start)) {
434
/* we didn't compress the entire range, try
435
* to make an uncompressed inline extent.
436
*/
437
ret = cow_file_range_inline(trans, root, inode,
438
start, end, 0, 0, NULL);
439
} else {
440
/* try making a compressed inline extent */
441
ret = cow_file_range_inline(trans, root, inode,
442
start, end,
443
total_compressed,
444
compress_type, pages);
445
}
446
if (ret == 0) {
447
/*
448
* inline extent creation worked, we don't need
449
* to create any more async work items. Unlock
450
* and free up our temp pages.
451
*/
452
extent_clear_unlock_delalloc(inode,
453
&BTRFS_I(inode)->io_tree,
454
start, end, NULL,
455
EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
456
EXTENT_CLEAR_DELALLOC |
457
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
458
459
btrfs_end_transaction(trans, root);
460
goto free_pages_out;
461
}
462
btrfs_end_transaction(trans, root);
463
}
464
465
if (will_compress) {
466
/*
467
* we aren't doing an inline extent round the compressed size
468
* up to a block size boundary so the allocator does sane
469
* things
470
*/
471
total_compressed = (total_compressed + blocksize - 1) &
472
~(blocksize - 1);
473
474
/*
475
* one last check to make sure the compression is really a
476
* win, compare the page count read with the blocks on disk
477
*/
478
total_in = (total_in + PAGE_CACHE_SIZE - 1) &
479
~(PAGE_CACHE_SIZE - 1);
480
if (total_compressed >= total_in) {
481
will_compress = 0;
482
} else {
483
num_bytes = total_in;
484
}
485
}
486
if (!will_compress && pages) {
487
/*
488
* the compression code ran but failed to make things smaller,
489
* free any pages it allocated and our page pointer array
490
*/
491
for (i = 0; i < nr_pages_ret; i++) {
492
WARN_ON(pages[i]->mapping);
493
page_cache_release(pages[i]);
494
}
495
kfree(pages);
496
pages = NULL;
497
total_compressed = 0;
498
nr_pages_ret = 0;
499
500
/* flag the file so we don't compress in the future */
501
if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
502
!(BTRFS_I(inode)->force_compress)) {
503
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
504
}
505
}
506
if (will_compress) {
507
*num_added += 1;
508
509
/* the async work queues will take care of doing actual
510
* allocation on disk for these compressed pages,
511
* and will submit them to the elevator.
512
*/
513
add_async_extent(async_cow, start, num_bytes,
514
total_compressed, pages, nr_pages_ret,
515
compress_type);
516
517
if (start + num_bytes < end) {
518
start += num_bytes;
519
pages = NULL;
520
cond_resched();
521
goto again;
522
}
523
} else {
524
cleanup_and_bail_uncompressed:
525
/*
526
* No compression, but we still need to write the pages in
527
* the file we've been given so far. redirty the locked
528
* page if it corresponds to our extent and set things up
529
* for the async work queue to run cow_file_range to do
530
* the normal delalloc dance
531
*/
532
if (page_offset(locked_page) >= start &&
533
page_offset(locked_page) <= end) {
534
__set_page_dirty_nobuffers(locked_page);
535
/* unlocked later on in the async handlers */
536
}
537
add_async_extent(async_cow, start, end - start + 1,
538
0, NULL, 0, BTRFS_COMPRESS_NONE);
539
*num_added += 1;
540
}
541
542
out:
543
return 0;
544
545
free_pages_out:
546
for (i = 0; i < nr_pages_ret; i++) {
547
WARN_ON(pages[i]->mapping);
548
page_cache_release(pages[i]);
549
}
550
kfree(pages);
551
552
goto out;
553
}
554
555
/*
556
* phase two of compressed writeback. This is the ordered portion
557
* of the code, which only gets called in the order the work was
558
* queued. We walk all the async extents created by compress_file_range
559
* and send them down to the disk.
560
*/
561
static noinline int submit_compressed_extents(struct inode *inode,
562
struct async_cow *async_cow)
563
{
564
struct async_extent *async_extent;
565
u64 alloc_hint = 0;
566
struct btrfs_trans_handle *trans;
567
struct btrfs_key ins;
568
struct extent_map *em;
569
struct btrfs_root *root = BTRFS_I(inode)->root;
570
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
571
struct extent_io_tree *io_tree;
572
int ret = 0;
573
574
if (list_empty(&async_cow->extents))
575
return 0;
576
577
578
while (!list_empty(&async_cow->extents)) {
579
async_extent = list_entry(async_cow->extents.next,
580
struct async_extent, list);
581
list_del(&async_extent->list);
582
583
io_tree = &BTRFS_I(inode)->io_tree;
584
585
retry:
586
/* did the compression code fall back to uncompressed IO? */
587
if (!async_extent->pages) {
588
int page_started = 0;
589
unsigned long nr_written = 0;
590
591
lock_extent(io_tree, async_extent->start,
592
async_extent->start +
593
async_extent->ram_size - 1, GFP_NOFS);
594
595
/* allocate blocks */
596
ret = cow_file_range(inode, async_cow->locked_page,
597
async_extent->start,
598
async_extent->start +
599
async_extent->ram_size - 1,
600
&page_started, &nr_written, 0);
601
602
/*
603
* if page_started, cow_file_range inserted an
604
* inline extent and took care of all the unlocking
605
* and IO for us. Otherwise, we need to submit
606
* all those pages down to the drive.
607
*/
608
if (!page_started && !ret)
609
extent_write_locked_range(io_tree,
610
inode, async_extent->start,
611
async_extent->start +
612
async_extent->ram_size - 1,
613
btrfs_get_extent,
614
WB_SYNC_ALL);
615
kfree(async_extent);
616
cond_resched();
617
continue;
618
}
619
620
lock_extent(io_tree, async_extent->start,
621
async_extent->start + async_extent->ram_size - 1,
622
GFP_NOFS);
623
624
trans = btrfs_join_transaction(root);
625
BUG_ON(IS_ERR(trans));
626
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
627
ret = btrfs_reserve_extent(trans, root,
628
async_extent->compressed_size,
629
async_extent->compressed_size,
630
0, alloc_hint,
631
(u64)-1, &ins, 1);
632
btrfs_end_transaction(trans, root);
633
634
if (ret) {
635
int i;
636
for (i = 0; i < async_extent->nr_pages; i++) {
637
WARN_ON(async_extent->pages[i]->mapping);
638
page_cache_release(async_extent->pages[i]);
639
}
640
kfree(async_extent->pages);
641
async_extent->nr_pages = 0;
642
async_extent->pages = NULL;
643
unlock_extent(io_tree, async_extent->start,
644
async_extent->start +
645
async_extent->ram_size - 1, GFP_NOFS);
646
goto retry;
647
}
648
649
/*
650
* here we're doing allocation and writeback of the
651
* compressed pages
652
*/
653
btrfs_drop_extent_cache(inode, async_extent->start,
654
async_extent->start +
655
async_extent->ram_size - 1, 0);
656
657
em = alloc_extent_map();
658
BUG_ON(!em);
659
em->start = async_extent->start;
660
em->len = async_extent->ram_size;
661
em->orig_start = em->start;
662
663
em->block_start = ins.objectid;
664
em->block_len = ins.offset;
665
em->bdev = root->fs_info->fs_devices->latest_bdev;
666
em->compress_type = async_extent->compress_type;
667
set_bit(EXTENT_FLAG_PINNED, &em->flags);
668
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
669
670
while (1) {
671
write_lock(&em_tree->lock);
672
ret = add_extent_mapping(em_tree, em);
673
write_unlock(&em_tree->lock);
674
if (ret != -EEXIST) {
675
free_extent_map(em);
676
break;
677
}
678
btrfs_drop_extent_cache(inode, async_extent->start,
679
async_extent->start +
680
async_extent->ram_size - 1, 0);
681
}
682
683
ret = btrfs_add_ordered_extent_compress(inode,
684
async_extent->start,
685
ins.objectid,
686
async_extent->ram_size,
687
ins.offset,
688
BTRFS_ORDERED_COMPRESSED,
689
async_extent->compress_type);
690
BUG_ON(ret);
691
692
/*
693
* clear dirty, set writeback and unlock the pages.
694
*/
695
extent_clear_unlock_delalloc(inode,
696
&BTRFS_I(inode)->io_tree,
697
async_extent->start,
698
async_extent->start +
699
async_extent->ram_size - 1,
700
NULL, EXTENT_CLEAR_UNLOCK_PAGE |
701
EXTENT_CLEAR_UNLOCK |
702
EXTENT_CLEAR_DELALLOC |
703
EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
704
705
ret = btrfs_submit_compressed_write(inode,
706
async_extent->start,
707
async_extent->ram_size,
708
ins.objectid,
709
ins.offset, async_extent->pages,
710
async_extent->nr_pages);
711
712
BUG_ON(ret);
713
alloc_hint = ins.objectid + ins.offset;
714
kfree(async_extent);
715
cond_resched();
716
}
717
718
return 0;
719
}
720
721
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
722
u64 num_bytes)
723
{
724
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
725
struct extent_map *em;
726
u64 alloc_hint = 0;
727
728
read_lock(&em_tree->lock);
729
em = search_extent_mapping(em_tree, start, num_bytes);
730
if (em) {
731
/*
732
* if block start isn't an actual block number then find the
733
* first block in this inode and use that as a hint. If that
734
* block is also bogus then just don't worry about it.
735
*/
736
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
737
free_extent_map(em);
738
em = search_extent_mapping(em_tree, 0, 0);
739
if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
740
alloc_hint = em->block_start;
741
if (em)
742
free_extent_map(em);
743
} else {
744
alloc_hint = em->block_start;
745
free_extent_map(em);
746
}
747
}
748
read_unlock(&em_tree->lock);
749
750
return alloc_hint;
751
}
752
753
static inline bool is_free_space_inode(struct btrfs_root *root,
754
struct inode *inode)
755
{
756
if (root == root->fs_info->tree_root ||
757
BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
758
return true;
759
return false;
760
}
761
762
/*
763
* when extent_io.c finds a delayed allocation range in the file,
764
* the call backs end up in this code. The basic idea is to
765
* allocate extents on disk for the range, and create ordered data structs
766
* in ram to track those extents.
767
*
768
* locked_page is the page that writepage had locked already. We use
769
* it to make sure we don't do extra locks or unlocks.
770
*
771
* *page_started is set to one if we unlock locked_page and do everything
772
* required to start IO on it. It may be clean and already done with
773
* IO when we return.
774
*/
775
static noinline int cow_file_range(struct inode *inode,
776
struct page *locked_page,
777
u64 start, u64 end, int *page_started,
778
unsigned long *nr_written,
779
int unlock)
780
{
781
struct btrfs_root *root = BTRFS_I(inode)->root;
782
struct btrfs_trans_handle *trans;
783
u64 alloc_hint = 0;
784
u64 num_bytes;
785
unsigned long ram_size;
786
u64 disk_num_bytes;
787
u64 cur_alloc_size;
788
u64 blocksize = root->sectorsize;
789
struct btrfs_key ins;
790
struct extent_map *em;
791
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
792
int ret = 0;
793
794
BUG_ON(is_free_space_inode(root, inode));
795
trans = btrfs_join_transaction(root);
796
BUG_ON(IS_ERR(trans));
797
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
798
799
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
800
num_bytes = max(blocksize, num_bytes);
801
disk_num_bytes = num_bytes;
802
ret = 0;
803
804
/* if this is a small write inside eof, kick off defrag */
805
if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
806
btrfs_add_inode_defrag(trans, inode);
807
808
if (start == 0) {
809
/* lets try to make an inline extent */
810
ret = cow_file_range_inline(trans, root, inode,
811
start, end, 0, 0, NULL);
812
if (ret == 0) {
813
extent_clear_unlock_delalloc(inode,
814
&BTRFS_I(inode)->io_tree,
815
start, end, NULL,
816
EXTENT_CLEAR_UNLOCK_PAGE |
817
EXTENT_CLEAR_UNLOCK |
818
EXTENT_CLEAR_DELALLOC |
819
EXTENT_CLEAR_DIRTY |
820
EXTENT_SET_WRITEBACK |
821
EXTENT_END_WRITEBACK);
822
823
*nr_written = *nr_written +
824
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
825
*page_started = 1;
826
ret = 0;
827
goto out;
828
}
829
}
830
831
BUG_ON(disk_num_bytes >
832
btrfs_super_total_bytes(&root->fs_info->super_copy));
833
834
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
835
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
836
837
while (disk_num_bytes > 0) {
838
unsigned long op;
839
840
cur_alloc_size = disk_num_bytes;
841
ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
842
root->sectorsize, 0, alloc_hint,
843
(u64)-1, &ins, 1);
844
BUG_ON(ret);
845
846
em = alloc_extent_map();
847
BUG_ON(!em);
848
em->start = start;
849
em->orig_start = em->start;
850
ram_size = ins.offset;
851
em->len = ins.offset;
852
853
em->block_start = ins.objectid;
854
em->block_len = ins.offset;
855
em->bdev = root->fs_info->fs_devices->latest_bdev;
856
set_bit(EXTENT_FLAG_PINNED, &em->flags);
857
858
while (1) {
859
write_lock(&em_tree->lock);
860
ret = add_extent_mapping(em_tree, em);
861
write_unlock(&em_tree->lock);
862
if (ret != -EEXIST) {
863
free_extent_map(em);
864
break;
865
}
866
btrfs_drop_extent_cache(inode, start,
867
start + ram_size - 1, 0);
868
}
869
870
cur_alloc_size = ins.offset;
871
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
872
ram_size, cur_alloc_size, 0);
873
BUG_ON(ret);
874
875
if (root->root_key.objectid ==
876
BTRFS_DATA_RELOC_TREE_OBJECTID) {
877
ret = btrfs_reloc_clone_csums(inode, start,
878
cur_alloc_size);
879
BUG_ON(ret);
880
}
881
882
if (disk_num_bytes < cur_alloc_size)
883
break;
884
885
/* we're not doing compressed IO, don't unlock the first
886
* page (which the caller expects to stay locked), don't
887
* clear any dirty bits and don't set any writeback bits
888
*
889
* Do set the Private2 bit so we know this page was properly
890
* setup for writepage
891
*/
892
op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
893
op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
894
EXTENT_SET_PRIVATE2;
895
896
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
897
start, start + ram_size - 1,
898
locked_page, op);
899
disk_num_bytes -= cur_alloc_size;
900
num_bytes -= cur_alloc_size;
901
alloc_hint = ins.objectid + ins.offset;
902
start += cur_alloc_size;
903
}
904
out:
905
ret = 0;
906
btrfs_end_transaction(trans, root);
907
908
return ret;
909
}
910
911
/*
912
* work queue call back to started compression on a file and pages
913
*/
914
static noinline void async_cow_start(struct btrfs_work *work)
915
{
916
struct async_cow *async_cow;
917
int num_added = 0;
918
async_cow = container_of(work, struct async_cow, work);
919
920
compress_file_range(async_cow->inode, async_cow->locked_page,
921
async_cow->start, async_cow->end, async_cow,
922
&num_added);
923
if (num_added == 0)
924
async_cow->inode = NULL;
925
}
926
927
/*
928
* work queue call back to submit previously compressed pages
929
*/
930
static noinline void async_cow_submit(struct btrfs_work *work)
931
{
932
struct async_cow *async_cow;
933
struct btrfs_root *root;
934
unsigned long nr_pages;
935
936
async_cow = container_of(work, struct async_cow, work);
937
938
root = async_cow->root;
939
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
940
PAGE_CACHE_SHIFT;
941
942
atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
943
944
if (atomic_read(&root->fs_info->async_delalloc_pages) <
945
5 * 1042 * 1024 &&
946
waitqueue_active(&root->fs_info->async_submit_wait))
947
wake_up(&root->fs_info->async_submit_wait);
948
949
if (async_cow->inode)
950
submit_compressed_extents(async_cow->inode, async_cow);
951
}
952
953
static noinline void async_cow_free(struct btrfs_work *work)
954
{
955
struct async_cow *async_cow;
956
async_cow = container_of(work, struct async_cow, work);
957
kfree(async_cow);
958
}
959
960
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
961
u64 start, u64 end, int *page_started,
962
unsigned long *nr_written)
963
{
964
struct async_cow *async_cow;
965
struct btrfs_root *root = BTRFS_I(inode)->root;
966
unsigned long nr_pages;
967
u64 cur_end;
968
int limit = 10 * 1024 * 1042;
969
970
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
971
1, 0, NULL, GFP_NOFS);
972
while (start < end) {
973
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
974
BUG_ON(!async_cow);
975
async_cow->inode = inode;
976
async_cow->root = root;
977
async_cow->locked_page = locked_page;
978
async_cow->start = start;
979
980
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
981
cur_end = end;
982
else
983
cur_end = min(end, start + 512 * 1024 - 1);
984
985
async_cow->end = cur_end;
986
INIT_LIST_HEAD(&async_cow->extents);
987
988
async_cow->work.func = async_cow_start;
989
async_cow->work.ordered_func = async_cow_submit;
990
async_cow->work.ordered_free = async_cow_free;
991
async_cow->work.flags = 0;
992
993
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
994
PAGE_CACHE_SHIFT;
995
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
996
997
btrfs_queue_worker(&root->fs_info->delalloc_workers,
998
&async_cow->work);
999
1000
if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1001
wait_event(root->fs_info->async_submit_wait,
1002
(atomic_read(&root->fs_info->async_delalloc_pages) <
1003
limit));
1004
}
1005
1006
while (atomic_read(&root->fs_info->async_submit_draining) &&
1007
atomic_read(&root->fs_info->async_delalloc_pages)) {
1008
wait_event(root->fs_info->async_submit_wait,
1009
(atomic_read(&root->fs_info->async_delalloc_pages) ==
1010
0));
1011
}
1012
1013
*nr_written += nr_pages;
1014
start = cur_end + 1;
1015
}
1016
*page_started = 1;
1017
return 0;
1018
}
1019
1020
static noinline int csum_exist_in_range(struct btrfs_root *root,
1021
u64 bytenr, u64 num_bytes)
1022
{
1023
int ret;
1024
struct btrfs_ordered_sum *sums;
1025
LIST_HEAD(list);
1026
1027
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1028
bytenr + num_bytes - 1, &list, 0);
1029
if (ret == 0 && list_empty(&list))
1030
return 0;
1031
1032
while (!list_empty(&list)) {
1033
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1034
list_del(&sums->list);
1035
kfree(sums);
1036
}
1037
return 1;
1038
}
1039
1040
/*
1041
* when nowcow writeback call back. This checks for snapshots or COW copies
1042
* of the extents that exist in the file, and COWs the file as required.
1043
*
1044
* If no cow copies or snapshots exist, we write directly to the existing
1045
* blocks on disk
1046
*/
1047
static noinline int run_delalloc_nocow(struct inode *inode,
1048
struct page *locked_page,
1049
u64 start, u64 end, int *page_started, int force,
1050
unsigned long *nr_written)
1051
{
1052
struct btrfs_root *root = BTRFS_I(inode)->root;
1053
struct btrfs_trans_handle *trans;
1054
struct extent_buffer *leaf;
1055
struct btrfs_path *path;
1056
struct btrfs_file_extent_item *fi;
1057
struct btrfs_key found_key;
1058
u64 cow_start;
1059
u64 cur_offset;
1060
u64 extent_end;
1061
u64 extent_offset;
1062
u64 disk_bytenr;
1063
u64 num_bytes;
1064
int extent_type;
1065
int ret;
1066
int type;
1067
int nocow;
1068
int check_prev = 1;
1069
bool nolock;
1070
u64 ino = btrfs_ino(inode);
1071
1072
path = btrfs_alloc_path();
1073
BUG_ON(!path);
1074
1075
nolock = is_free_space_inode(root, inode);
1076
1077
if (nolock)
1078
trans = btrfs_join_transaction_nolock(root);
1079
else
1080
trans = btrfs_join_transaction(root);
1081
1082
BUG_ON(IS_ERR(trans));
1083
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1084
1085
cow_start = (u64)-1;
1086
cur_offset = start;
1087
while (1) {
1088
ret = btrfs_lookup_file_extent(trans, root, path, ino,
1089
cur_offset, 0);
1090
BUG_ON(ret < 0);
1091
if (ret > 0 && path->slots[0] > 0 && check_prev) {
1092
leaf = path->nodes[0];
1093
btrfs_item_key_to_cpu(leaf, &found_key,
1094
path->slots[0] - 1);
1095
if (found_key.objectid == ino &&
1096
found_key.type == BTRFS_EXTENT_DATA_KEY)
1097
path->slots[0]--;
1098
}
1099
check_prev = 0;
1100
next_slot:
1101
leaf = path->nodes[0];
1102
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1103
ret = btrfs_next_leaf(root, path);
1104
if (ret < 0)
1105
BUG_ON(1);
1106
if (ret > 0)
1107
break;
1108
leaf = path->nodes[0];
1109
}
1110
1111
nocow = 0;
1112
disk_bytenr = 0;
1113
num_bytes = 0;
1114
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1115
1116
if (found_key.objectid > ino ||
1117
found_key.type > BTRFS_EXTENT_DATA_KEY ||
1118
found_key.offset > end)
1119
break;
1120
1121
if (found_key.offset > cur_offset) {
1122
extent_end = found_key.offset;
1123
extent_type = 0;
1124
goto out_check;
1125
}
1126
1127
fi = btrfs_item_ptr(leaf, path->slots[0],
1128
struct btrfs_file_extent_item);
1129
extent_type = btrfs_file_extent_type(leaf, fi);
1130
1131
if (extent_type == BTRFS_FILE_EXTENT_REG ||
1132
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1133
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1134
extent_offset = btrfs_file_extent_offset(leaf, fi);
1135
extent_end = found_key.offset +
1136
btrfs_file_extent_num_bytes(leaf, fi);
1137
if (extent_end <= start) {
1138
path->slots[0]++;
1139
goto next_slot;
1140
}
1141
if (disk_bytenr == 0)
1142
goto out_check;
1143
if (btrfs_file_extent_compression(leaf, fi) ||
1144
btrfs_file_extent_encryption(leaf, fi) ||
1145
btrfs_file_extent_other_encoding(leaf, fi))
1146
goto out_check;
1147
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1148
goto out_check;
1149
if (btrfs_extent_readonly(root, disk_bytenr))
1150
goto out_check;
1151
if (btrfs_cross_ref_exist(trans, root, ino,
1152
found_key.offset -
1153
extent_offset, disk_bytenr))
1154
goto out_check;
1155
disk_bytenr += extent_offset;
1156
disk_bytenr += cur_offset - found_key.offset;
1157
num_bytes = min(end + 1, extent_end) - cur_offset;
1158
/*
1159
* force cow if csum exists in the range.
1160
* this ensure that csum for a given extent are
1161
* either valid or do not exist.
1162
*/
1163
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1164
goto out_check;
1165
nocow = 1;
1166
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1167
extent_end = found_key.offset +
1168
btrfs_file_extent_inline_len(leaf, fi);
1169
extent_end = ALIGN(extent_end, root->sectorsize);
1170
} else {
1171
BUG_ON(1);
1172
}
1173
out_check:
1174
if (extent_end <= start) {
1175
path->slots[0]++;
1176
goto next_slot;
1177
}
1178
if (!nocow) {
1179
if (cow_start == (u64)-1)
1180
cow_start = cur_offset;
1181
cur_offset = extent_end;
1182
if (cur_offset > end)
1183
break;
1184
path->slots[0]++;
1185
goto next_slot;
1186
}
1187
1188
btrfs_release_path(path);
1189
if (cow_start != (u64)-1) {
1190
ret = cow_file_range(inode, locked_page, cow_start,
1191
found_key.offset - 1, page_started,
1192
nr_written, 1);
1193
BUG_ON(ret);
1194
cow_start = (u64)-1;
1195
}
1196
1197
if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1198
struct extent_map *em;
1199
struct extent_map_tree *em_tree;
1200
em_tree = &BTRFS_I(inode)->extent_tree;
1201
em = alloc_extent_map();
1202
BUG_ON(!em);
1203
em->start = cur_offset;
1204
em->orig_start = em->start;
1205
em->len = num_bytes;
1206
em->block_len = num_bytes;
1207
em->block_start = disk_bytenr;
1208
em->bdev = root->fs_info->fs_devices->latest_bdev;
1209
set_bit(EXTENT_FLAG_PINNED, &em->flags);
1210
while (1) {
1211
write_lock(&em_tree->lock);
1212
ret = add_extent_mapping(em_tree, em);
1213
write_unlock(&em_tree->lock);
1214
if (ret != -EEXIST) {
1215
free_extent_map(em);
1216
break;
1217
}
1218
btrfs_drop_extent_cache(inode, em->start,
1219
em->start + em->len - 1, 0);
1220
}
1221
type = BTRFS_ORDERED_PREALLOC;
1222
} else {
1223
type = BTRFS_ORDERED_NOCOW;
1224
}
1225
1226
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1227
num_bytes, num_bytes, type);
1228
BUG_ON(ret);
1229
1230
if (root->root_key.objectid ==
1231
BTRFS_DATA_RELOC_TREE_OBJECTID) {
1232
ret = btrfs_reloc_clone_csums(inode, cur_offset,
1233
num_bytes);
1234
BUG_ON(ret);
1235
}
1236
1237
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1238
cur_offset, cur_offset + num_bytes - 1,
1239
locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1240
EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1241
EXTENT_SET_PRIVATE2);
1242
cur_offset = extent_end;
1243
if (cur_offset > end)
1244
break;
1245
}
1246
btrfs_release_path(path);
1247
1248
if (cur_offset <= end && cow_start == (u64)-1)
1249
cow_start = cur_offset;
1250
if (cow_start != (u64)-1) {
1251
ret = cow_file_range(inode, locked_page, cow_start, end,
1252
page_started, nr_written, 1);
1253
BUG_ON(ret);
1254
}
1255
1256
if (nolock) {
1257
ret = btrfs_end_transaction_nolock(trans, root);
1258
BUG_ON(ret);
1259
} else {
1260
ret = btrfs_end_transaction(trans, root);
1261
BUG_ON(ret);
1262
}
1263
btrfs_free_path(path);
1264
return 0;
1265
}
1266
1267
/*
1268
* extent_io.c call back to do delayed allocation processing
1269
*/
1270
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1271
u64 start, u64 end, int *page_started,
1272
unsigned long *nr_written)
1273
{
1274
int ret;
1275
struct btrfs_root *root = BTRFS_I(inode)->root;
1276
1277
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1278
ret = run_delalloc_nocow(inode, locked_page, start, end,
1279
page_started, 1, nr_written);
1280
else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1281
ret = run_delalloc_nocow(inode, locked_page, start, end,
1282
page_started, 0, nr_written);
1283
else if (!btrfs_test_opt(root, COMPRESS) &&
1284
!(BTRFS_I(inode)->force_compress) &&
1285
!(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1286
ret = cow_file_range(inode, locked_page, start, end,
1287
page_started, nr_written, 1);
1288
else
1289
ret = cow_file_range_async(inode, locked_page, start, end,
1290
page_started, nr_written);
1291
return ret;
1292
}
1293
1294
static int btrfs_split_extent_hook(struct inode *inode,
1295
struct extent_state *orig, u64 split)
1296
{
1297
/* not delalloc, ignore it */
1298
if (!(orig->state & EXTENT_DELALLOC))
1299
return 0;
1300
1301
atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1302
return 0;
1303
}
1304
1305
/*
1306
* extent_io.c merge_extent_hook, used to track merged delayed allocation
1307
* extents so we can keep track of new extents that are just merged onto old
1308
* extents, such as when we are doing sequential writes, so we can properly
1309
* account for the metadata space we'll need.
1310
*/
1311
static int btrfs_merge_extent_hook(struct inode *inode,
1312
struct extent_state *new,
1313
struct extent_state *other)
1314
{
1315
/* not delalloc, ignore it */
1316
if (!(other->state & EXTENT_DELALLOC))
1317
return 0;
1318
1319
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1320
return 0;
1321
}
1322
1323
/*
1324
* extent_io.c set_bit_hook, used to track delayed allocation
1325
* bytes in this file, and to maintain the list of inodes that
1326
* have pending delalloc work to be done.
1327
*/
1328
static int btrfs_set_bit_hook(struct inode *inode,
1329
struct extent_state *state, int *bits)
1330
{
1331
1332
/*
1333
* set_bit and clear bit hooks normally require _irqsave/restore
1334
* but in this case, we are only testing for the DELALLOC
1335
* bit, which is only set or cleared with irqs on
1336
*/
1337
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1338
struct btrfs_root *root = BTRFS_I(inode)->root;
1339
u64 len = state->end + 1 - state->start;
1340
bool do_list = !is_free_space_inode(root, inode);
1341
1342
if (*bits & EXTENT_FIRST_DELALLOC)
1343
*bits &= ~EXTENT_FIRST_DELALLOC;
1344
else
1345
atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1346
1347
spin_lock(&root->fs_info->delalloc_lock);
1348
BTRFS_I(inode)->delalloc_bytes += len;
1349
root->fs_info->delalloc_bytes += len;
1350
if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1351
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1352
&root->fs_info->delalloc_inodes);
1353
}
1354
spin_unlock(&root->fs_info->delalloc_lock);
1355
}
1356
return 0;
1357
}
1358
1359
/*
1360
* extent_io.c clear_bit_hook, see set_bit_hook for why
1361
*/
1362
static int btrfs_clear_bit_hook(struct inode *inode,
1363
struct extent_state *state, int *bits)
1364
{
1365
/*
1366
* set_bit and clear bit hooks normally require _irqsave/restore
1367
* but in this case, we are only testing for the DELALLOC
1368
* bit, which is only set or cleared with irqs on
1369
*/
1370
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1371
struct btrfs_root *root = BTRFS_I(inode)->root;
1372
u64 len = state->end + 1 - state->start;
1373
bool do_list = !is_free_space_inode(root, inode);
1374
1375
if (*bits & EXTENT_FIRST_DELALLOC)
1376
*bits &= ~EXTENT_FIRST_DELALLOC;
1377
else if (!(*bits & EXTENT_DO_ACCOUNTING))
1378
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1379
1380
if (*bits & EXTENT_DO_ACCOUNTING)
1381
btrfs_delalloc_release_metadata(inode, len);
1382
1383
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1384
&& do_list)
1385
btrfs_free_reserved_data_space(inode, len);
1386
1387
spin_lock(&root->fs_info->delalloc_lock);
1388
root->fs_info->delalloc_bytes -= len;
1389
BTRFS_I(inode)->delalloc_bytes -= len;
1390
1391
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1392
!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1393
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1394
}
1395
spin_unlock(&root->fs_info->delalloc_lock);
1396
}
1397
return 0;
1398
}
1399
1400
/*
1401
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1402
* we don't create bios that span stripes or chunks
1403
*/
1404
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1405
size_t size, struct bio *bio,
1406
unsigned long bio_flags)
1407
{
1408
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1409
struct btrfs_mapping_tree *map_tree;
1410
u64 logical = (u64)bio->bi_sector << 9;
1411
u64 length = 0;
1412
u64 map_length;
1413
int ret;
1414
1415
if (bio_flags & EXTENT_BIO_COMPRESSED)
1416
return 0;
1417
1418
length = bio->bi_size;
1419
map_tree = &root->fs_info->mapping_tree;
1420
map_length = length;
1421
ret = btrfs_map_block(map_tree, READ, logical,
1422
&map_length, NULL, 0);
1423
1424
if (map_length < length + size)
1425
return 1;
1426
return ret;
1427
}
1428
1429
/*
1430
* in order to insert checksums into the metadata in large chunks,
1431
* we wait until bio submission time. All the pages in the bio are
1432
* checksummed and sums are attached onto the ordered extent record.
1433
*
1434
* At IO completion time the cums attached on the ordered extent record
1435
* are inserted into the btree
1436
*/
1437
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1438
struct bio *bio, int mirror_num,
1439
unsigned long bio_flags,
1440
u64 bio_offset)
1441
{
1442
struct btrfs_root *root = BTRFS_I(inode)->root;
1443
int ret = 0;
1444
1445
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1446
BUG_ON(ret);
1447
return 0;
1448
}
1449
1450
/*
1451
* in order to insert checksums into the metadata in large chunks,
1452
* we wait until bio submission time. All the pages in the bio are
1453
* checksummed and sums are attached onto the ordered extent record.
1454
*
1455
* At IO completion time the cums attached on the ordered extent record
1456
* are inserted into the btree
1457
*/
1458
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1459
int mirror_num, unsigned long bio_flags,
1460
u64 bio_offset)
1461
{
1462
struct btrfs_root *root = BTRFS_I(inode)->root;
1463
return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1464
}
1465
1466
/*
1467
* extent_io.c submission hook. This does the right thing for csum calculation
1468
* on write, or reading the csums from the tree before a read
1469
*/
1470
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1471
int mirror_num, unsigned long bio_flags,
1472
u64 bio_offset)
1473
{
1474
struct btrfs_root *root = BTRFS_I(inode)->root;
1475
int ret = 0;
1476
int skip_sum;
1477
1478
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1479
1480
if (is_free_space_inode(root, inode))
1481
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
1482
else
1483
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1484
BUG_ON(ret);
1485
1486
if (!(rw & REQ_WRITE)) {
1487
if (bio_flags & EXTENT_BIO_COMPRESSED) {
1488
return btrfs_submit_compressed_read(inode, bio,
1489
mirror_num, bio_flags);
1490
} else if (!skip_sum) {
1491
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1492
if (ret)
1493
return ret;
1494
}
1495
goto mapit;
1496
} else if (!skip_sum) {
1497
/* csum items have already been cloned */
1498
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1499
goto mapit;
1500
/* we're doing a write, do the async checksumming */
1501
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1502
inode, rw, bio, mirror_num,
1503
bio_flags, bio_offset,
1504
__btrfs_submit_bio_start,
1505
__btrfs_submit_bio_done);
1506
}
1507
1508
mapit:
1509
return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1510
}
1511
1512
/*
1513
* given a list of ordered sums record them in the inode. This happens
1514
* at IO completion time based on sums calculated at bio submission time.
1515
*/
1516
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1517
struct inode *inode, u64 file_offset,
1518
struct list_head *list)
1519
{
1520
struct btrfs_ordered_sum *sum;
1521
1522
list_for_each_entry(sum, list, list) {
1523
btrfs_csum_file_blocks(trans,
1524
BTRFS_I(inode)->root->fs_info->csum_root, sum);
1525
}
1526
return 0;
1527
}
1528
1529
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1530
struct extent_state **cached_state)
1531
{
1532
if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1533
WARN_ON(1);
1534
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1535
cached_state, GFP_NOFS);
1536
}
1537
1538
/* see btrfs_writepage_start_hook for details on why this is required */
1539
struct btrfs_writepage_fixup {
1540
struct page *page;
1541
struct btrfs_work work;
1542
};
1543
1544
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1545
{
1546
struct btrfs_writepage_fixup *fixup;
1547
struct btrfs_ordered_extent *ordered;
1548
struct extent_state *cached_state = NULL;
1549
struct page *page;
1550
struct inode *inode;
1551
u64 page_start;
1552
u64 page_end;
1553
1554
fixup = container_of(work, struct btrfs_writepage_fixup, work);
1555
page = fixup->page;
1556
again:
1557
lock_page(page);
1558
if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1559
ClearPageChecked(page);
1560
goto out_page;
1561
}
1562
1563
inode = page->mapping->host;
1564
page_start = page_offset(page);
1565
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1566
1567
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1568
&cached_state, GFP_NOFS);
1569
1570
/* already ordered? We're done */
1571
if (PagePrivate2(page))
1572
goto out;
1573
1574
ordered = btrfs_lookup_ordered_extent(inode, page_start);
1575
if (ordered) {
1576
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1577
page_end, &cached_state, GFP_NOFS);
1578
unlock_page(page);
1579
btrfs_start_ordered_extent(inode, ordered, 1);
1580
goto again;
1581
}
1582
1583
BUG();
1584
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1585
ClearPageChecked(page);
1586
out:
1587
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1588
&cached_state, GFP_NOFS);
1589
out_page:
1590
unlock_page(page);
1591
page_cache_release(page);
1592
kfree(fixup);
1593
}
1594
1595
/*
1596
* There are a few paths in the higher layers of the kernel that directly
1597
* set the page dirty bit without asking the filesystem if it is a
1598
* good idea. This causes problems because we want to make sure COW
1599
* properly happens and the data=ordered rules are followed.
1600
*
1601
* In our case any range that doesn't have the ORDERED bit set
1602
* hasn't been properly setup for IO. We kick off an async process
1603
* to fix it up. The async helper will wait for ordered extents, set
1604
* the delalloc bit and make it safe to write the page.
1605
*/
1606
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1607
{
1608
struct inode *inode = page->mapping->host;
1609
struct btrfs_writepage_fixup *fixup;
1610
struct btrfs_root *root = BTRFS_I(inode)->root;
1611
1612
/* this page is properly in the ordered list */
1613
if (TestClearPagePrivate2(page))
1614
return 0;
1615
1616
if (PageChecked(page))
1617
return -EAGAIN;
1618
1619
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1620
if (!fixup)
1621
return -EAGAIN;
1622
1623
SetPageChecked(page);
1624
page_cache_get(page);
1625
fixup->work.func = btrfs_writepage_fixup_worker;
1626
fixup->page = page;
1627
btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1628
return -EAGAIN;
1629
}
1630
1631
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1632
struct inode *inode, u64 file_pos,
1633
u64 disk_bytenr, u64 disk_num_bytes,
1634
u64 num_bytes, u64 ram_bytes,
1635
u8 compression, u8 encryption,
1636
u16 other_encoding, int extent_type)
1637
{
1638
struct btrfs_root *root = BTRFS_I(inode)->root;
1639
struct btrfs_file_extent_item *fi;
1640
struct btrfs_path *path;
1641
struct extent_buffer *leaf;
1642
struct btrfs_key ins;
1643
u64 hint;
1644
int ret;
1645
1646
path = btrfs_alloc_path();
1647
BUG_ON(!path);
1648
1649
path->leave_spinning = 1;
1650
1651
/*
1652
* we may be replacing one extent in the tree with another.
1653
* The new extent is pinned in the extent map, and we don't want
1654
* to drop it from the cache until it is completely in the btree.
1655
*
1656
* So, tell btrfs_drop_extents to leave this extent in the cache.
1657
* the caller is expected to unpin it and allow it to be merged
1658
* with the others.
1659
*/
1660
ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1661
&hint, 0);
1662
BUG_ON(ret);
1663
1664
ins.objectid = btrfs_ino(inode);
1665
ins.offset = file_pos;
1666
ins.type = BTRFS_EXTENT_DATA_KEY;
1667
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1668
BUG_ON(ret);
1669
leaf = path->nodes[0];
1670
fi = btrfs_item_ptr(leaf, path->slots[0],
1671
struct btrfs_file_extent_item);
1672
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1673
btrfs_set_file_extent_type(leaf, fi, extent_type);
1674
btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1675
btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1676
btrfs_set_file_extent_offset(leaf, fi, 0);
1677
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1678
btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1679
btrfs_set_file_extent_compression(leaf, fi, compression);
1680
btrfs_set_file_extent_encryption(leaf, fi, encryption);
1681
btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1682
1683
btrfs_unlock_up_safe(path, 1);
1684
btrfs_set_lock_blocking(leaf);
1685
1686
btrfs_mark_buffer_dirty(leaf);
1687
1688
inode_add_bytes(inode, num_bytes);
1689
1690
ins.objectid = disk_bytenr;
1691
ins.offset = disk_num_bytes;
1692
ins.type = BTRFS_EXTENT_ITEM_KEY;
1693
ret = btrfs_alloc_reserved_file_extent(trans, root,
1694
root->root_key.objectid,
1695
btrfs_ino(inode), file_pos, &ins);
1696
BUG_ON(ret);
1697
btrfs_free_path(path);
1698
1699
return 0;
1700
}
1701
1702
/*
1703
* helper function for btrfs_finish_ordered_io, this
1704
* just reads in some of the csum leaves to prime them into ram
1705
* before we start the transaction. It limits the amount of btree
1706
* reads required while inside the transaction.
1707
*/
1708
/* as ordered data IO finishes, this gets called so we can finish
1709
* an ordered extent if the range of bytes in the file it covers are
1710
* fully written.
1711
*/
1712
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1713
{
1714
struct btrfs_root *root = BTRFS_I(inode)->root;
1715
struct btrfs_trans_handle *trans = NULL;
1716
struct btrfs_ordered_extent *ordered_extent = NULL;
1717
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1718
struct extent_state *cached_state = NULL;
1719
int compress_type = 0;
1720
int ret;
1721
bool nolock;
1722
1723
ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1724
end - start + 1);
1725
if (!ret)
1726
return 0;
1727
BUG_ON(!ordered_extent);
1728
1729
nolock = is_free_space_inode(root, inode);
1730
1731
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1732
BUG_ON(!list_empty(&ordered_extent->list));
1733
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1734
if (!ret) {
1735
if (nolock)
1736
trans = btrfs_join_transaction_nolock(root);
1737
else
1738
trans = btrfs_join_transaction(root);
1739
BUG_ON(IS_ERR(trans));
1740
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1741
ret = btrfs_update_inode(trans, root, inode);
1742
BUG_ON(ret);
1743
}
1744
goto out;
1745
}
1746
1747
lock_extent_bits(io_tree, ordered_extent->file_offset,
1748
ordered_extent->file_offset + ordered_extent->len - 1,
1749
0, &cached_state, GFP_NOFS);
1750
1751
if (nolock)
1752
trans = btrfs_join_transaction_nolock(root);
1753
else
1754
trans = btrfs_join_transaction(root);
1755
BUG_ON(IS_ERR(trans));
1756
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1757
1758
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1759
compress_type = ordered_extent->compress_type;
1760
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1761
BUG_ON(compress_type);
1762
ret = btrfs_mark_extent_written(trans, inode,
1763
ordered_extent->file_offset,
1764
ordered_extent->file_offset +
1765
ordered_extent->len);
1766
BUG_ON(ret);
1767
} else {
1768
BUG_ON(root == root->fs_info->tree_root);
1769
ret = insert_reserved_file_extent(trans, inode,
1770
ordered_extent->file_offset,
1771
ordered_extent->start,
1772
ordered_extent->disk_len,
1773
ordered_extent->len,
1774
ordered_extent->len,
1775
compress_type, 0, 0,
1776
BTRFS_FILE_EXTENT_REG);
1777
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1778
ordered_extent->file_offset,
1779
ordered_extent->len);
1780
BUG_ON(ret);
1781
}
1782
unlock_extent_cached(io_tree, ordered_extent->file_offset,
1783
ordered_extent->file_offset +
1784
ordered_extent->len - 1, &cached_state, GFP_NOFS);
1785
1786
add_pending_csums(trans, inode, ordered_extent->file_offset,
1787
&ordered_extent->list);
1788
1789
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1790
if (!ret) {
1791
ret = btrfs_update_inode(trans, root, inode);
1792
BUG_ON(ret);
1793
}
1794
ret = 0;
1795
out:
1796
if (nolock) {
1797
if (trans)
1798
btrfs_end_transaction_nolock(trans, root);
1799
} else {
1800
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1801
if (trans)
1802
btrfs_end_transaction(trans, root);
1803
}
1804
1805
/* once for us */
1806
btrfs_put_ordered_extent(ordered_extent);
1807
/* once for the tree */
1808
btrfs_put_ordered_extent(ordered_extent);
1809
1810
return 0;
1811
}
1812
1813
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1814
struct extent_state *state, int uptodate)
1815
{
1816
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1817
1818
ClearPagePrivate2(page);
1819
return btrfs_finish_ordered_io(page->mapping->host, start, end);
1820
}
1821
1822
/*
1823
* When IO fails, either with EIO or csum verification fails, we
1824
* try other mirrors that might have a good copy of the data. This
1825
* io_failure_record is used to record state as we go through all the
1826
* mirrors. If another mirror has good data, the page is set up to date
1827
* and things continue. If a good mirror can't be found, the original
1828
* bio end_io callback is called to indicate things have failed.
1829
*/
1830
struct io_failure_record {
1831
struct page *page;
1832
u64 start;
1833
u64 len;
1834
u64 logical;
1835
unsigned long bio_flags;
1836
int last_mirror;
1837
};
1838
1839
static int btrfs_io_failed_hook(struct bio *failed_bio,
1840
struct page *page, u64 start, u64 end,
1841
struct extent_state *state)
1842
{
1843
struct io_failure_record *failrec = NULL;
1844
u64 private;
1845
struct extent_map *em;
1846
struct inode *inode = page->mapping->host;
1847
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1848
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1849
struct bio *bio;
1850
int num_copies;
1851
int ret;
1852
int rw;
1853
u64 logical;
1854
1855
ret = get_state_private(failure_tree, start, &private);
1856
if (ret) {
1857
failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1858
if (!failrec)
1859
return -ENOMEM;
1860
failrec->start = start;
1861
failrec->len = end - start + 1;
1862
failrec->last_mirror = 0;
1863
failrec->bio_flags = 0;
1864
1865
read_lock(&em_tree->lock);
1866
em = lookup_extent_mapping(em_tree, start, failrec->len);
1867
if (em->start > start || em->start + em->len < start) {
1868
free_extent_map(em);
1869
em = NULL;
1870
}
1871
read_unlock(&em_tree->lock);
1872
1873
if (IS_ERR_OR_NULL(em)) {
1874
kfree(failrec);
1875
return -EIO;
1876
}
1877
logical = start - em->start;
1878
logical = em->block_start + logical;
1879
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1880
logical = em->block_start;
1881
failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1882
extent_set_compress_type(&failrec->bio_flags,
1883
em->compress_type);
1884
}
1885
failrec->logical = logical;
1886
free_extent_map(em);
1887
set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1888
EXTENT_DIRTY, GFP_NOFS);
1889
set_state_private(failure_tree, start,
1890
(u64)(unsigned long)failrec);
1891
} else {
1892
failrec = (struct io_failure_record *)(unsigned long)private;
1893
}
1894
num_copies = btrfs_num_copies(
1895
&BTRFS_I(inode)->root->fs_info->mapping_tree,
1896
failrec->logical, failrec->len);
1897
failrec->last_mirror++;
1898
if (!state) {
1899
spin_lock(&BTRFS_I(inode)->io_tree.lock);
1900
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1901
failrec->start,
1902
EXTENT_LOCKED);
1903
if (state && state->start != failrec->start)
1904
state = NULL;
1905
spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1906
}
1907
if (!state || failrec->last_mirror > num_copies) {
1908
set_state_private(failure_tree, failrec->start, 0);
1909
clear_extent_bits(failure_tree, failrec->start,
1910
failrec->start + failrec->len - 1,
1911
EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1912
kfree(failrec);
1913
return -EIO;
1914
}
1915
bio = bio_alloc(GFP_NOFS, 1);
1916
bio->bi_private = state;
1917
bio->bi_end_io = failed_bio->bi_end_io;
1918
bio->bi_sector = failrec->logical >> 9;
1919
bio->bi_bdev = failed_bio->bi_bdev;
1920
bio->bi_size = 0;
1921
1922
bio_add_page(bio, page, failrec->len, start - page_offset(page));
1923
if (failed_bio->bi_rw & REQ_WRITE)
1924
rw = WRITE;
1925
else
1926
rw = READ;
1927
1928
ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1929
failrec->last_mirror,
1930
failrec->bio_flags, 0);
1931
return ret;
1932
}
1933
1934
/*
1935
* each time an IO finishes, we do a fast check in the IO failure tree
1936
* to see if we need to process or clean up an io_failure_record
1937
*/
1938
static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1939
{
1940
u64 private;
1941
u64 private_failure;
1942
struct io_failure_record *failure;
1943
int ret;
1944
1945
private = 0;
1946
if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1947
(u64)-1, 1, EXTENT_DIRTY, 0)) {
1948
ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1949
start, &private_failure);
1950
if (ret == 0) {
1951
failure = (struct io_failure_record *)(unsigned long)
1952
private_failure;
1953
set_state_private(&BTRFS_I(inode)->io_failure_tree,
1954
failure->start, 0);
1955
clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1956
failure->start,
1957
failure->start + failure->len - 1,
1958
EXTENT_DIRTY | EXTENT_LOCKED,
1959
GFP_NOFS);
1960
kfree(failure);
1961
}
1962
}
1963
return 0;
1964
}
1965
1966
/*
1967
* when reads are done, we need to check csums to verify the data is correct
1968
* if there's a match, we allow the bio to finish. If not, we go through
1969
* the io_failure_record routines to find good copies
1970
*/
1971
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1972
struct extent_state *state)
1973
{
1974
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1975
struct inode *inode = page->mapping->host;
1976
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1977
char *kaddr;
1978
u64 private = ~(u32)0;
1979
int ret;
1980
struct btrfs_root *root = BTRFS_I(inode)->root;
1981
u32 csum = ~(u32)0;
1982
1983
if (PageChecked(page)) {
1984
ClearPageChecked(page);
1985
goto good;
1986
}
1987
1988
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1989
goto good;
1990
1991
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1992
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1993
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1994
GFP_NOFS);
1995
return 0;
1996
}
1997
1998
if (state && state->start == start) {
1999
private = state->private;
2000
ret = 0;
2001
} else {
2002
ret = get_state_private(io_tree, start, &private);
2003
}
2004
kaddr = kmap_atomic(page, KM_USER0);
2005
if (ret)
2006
goto zeroit;
2007
2008
csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
2009
btrfs_csum_final(csum, (char *)&csum);
2010
if (csum != private)
2011
goto zeroit;
2012
2013
kunmap_atomic(kaddr, KM_USER0);
2014
good:
2015
/* if the io failure tree for this inode is non-empty,
2016
* check to see if we've recovered from a failed IO
2017
*/
2018
btrfs_clean_io_failures(inode, start);
2019
return 0;
2020
2021
zeroit:
2022
printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2023
"private %llu\n",
2024
(unsigned long long)btrfs_ino(page->mapping->host),
2025
(unsigned long long)start, csum,
2026
(unsigned long long)private);
2027
memset(kaddr + offset, 1, end - start + 1);
2028
flush_dcache_page(page);
2029
kunmap_atomic(kaddr, KM_USER0);
2030
if (private == 0)
2031
return 0;
2032
return -EIO;
2033
}
2034
2035
struct delayed_iput {
2036
struct list_head list;
2037
struct inode *inode;
2038
};
2039
2040
void btrfs_add_delayed_iput(struct inode *inode)
2041
{
2042
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2043
struct delayed_iput *delayed;
2044
2045
if (atomic_add_unless(&inode->i_count, -1, 1))
2046
return;
2047
2048
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2049
delayed->inode = inode;
2050
2051
spin_lock(&fs_info->delayed_iput_lock);
2052
list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2053
spin_unlock(&fs_info->delayed_iput_lock);
2054
}
2055
2056
void btrfs_run_delayed_iputs(struct btrfs_root *root)
2057
{
2058
LIST_HEAD(list);
2059
struct btrfs_fs_info *fs_info = root->fs_info;
2060
struct delayed_iput *delayed;
2061
int empty;
2062
2063
spin_lock(&fs_info->delayed_iput_lock);
2064
empty = list_empty(&fs_info->delayed_iputs);
2065
spin_unlock(&fs_info->delayed_iput_lock);
2066
if (empty)
2067
return;
2068
2069
down_read(&root->fs_info->cleanup_work_sem);
2070
spin_lock(&fs_info->delayed_iput_lock);
2071
list_splice_init(&fs_info->delayed_iputs, &list);
2072
spin_unlock(&fs_info->delayed_iput_lock);
2073
2074
while (!list_empty(&list)) {
2075
delayed = list_entry(list.next, struct delayed_iput, list);
2076
list_del(&delayed->list);
2077
iput(delayed->inode);
2078
kfree(delayed);
2079
}
2080
up_read(&root->fs_info->cleanup_work_sem);
2081
}
2082
2083
/*
2084
* calculate extra metadata reservation when snapshotting a subvolume
2085
* contains orphan files.
2086
*/
2087
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
2088
struct btrfs_pending_snapshot *pending,
2089
u64 *bytes_to_reserve)
2090
{
2091
struct btrfs_root *root;
2092
struct btrfs_block_rsv *block_rsv;
2093
u64 num_bytes;
2094
int index;
2095
2096
root = pending->root;
2097
if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2098
return;
2099
2100
block_rsv = root->orphan_block_rsv;
2101
2102
/* orphan block reservation for the snapshot */
2103
num_bytes = block_rsv->size;
2104
2105
/*
2106
* after the snapshot is created, COWing tree blocks may use more
2107
* space than it frees. So we should make sure there is enough
2108
* reserved space.
2109
*/
2110
index = trans->transid & 0x1;
2111
if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2112
num_bytes += block_rsv->size -
2113
(block_rsv->reserved + block_rsv->freed[index]);
2114
}
2115
2116
*bytes_to_reserve += num_bytes;
2117
}
2118
2119
void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
2120
struct btrfs_pending_snapshot *pending)
2121
{
2122
struct btrfs_root *root = pending->root;
2123
struct btrfs_root *snap = pending->snap;
2124
struct btrfs_block_rsv *block_rsv;
2125
u64 num_bytes;
2126
int index;
2127
int ret;
2128
2129
if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2130
return;
2131
2132
/* refill source subvolume's orphan block reservation */
2133
block_rsv = root->orphan_block_rsv;
2134
index = trans->transid & 0x1;
2135
if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2136
num_bytes = block_rsv->size -
2137
(block_rsv->reserved + block_rsv->freed[index]);
2138
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2139
root->orphan_block_rsv,
2140
num_bytes);
2141
BUG_ON(ret);
2142
}
2143
2144
/* setup orphan block reservation for the snapshot */
2145
block_rsv = btrfs_alloc_block_rsv(snap);
2146
BUG_ON(!block_rsv);
2147
2148
btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2149
snap->orphan_block_rsv = block_rsv;
2150
2151
num_bytes = root->orphan_block_rsv->size;
2152
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2153
block_rsv, num_bytes);
2154
BUG_ON(ret);
2155
2156
#if 0
2157
/* insert orphan item for the snapshot */
2158
WARN_ON(!root->orphan_item_inserted);
2159
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2160
snap->root_key.objectid);
2161
BUG_ON(ret);
2162
snap->orphan_item_inserted = 1;
2163
#endif
2164
}
2165
2166
enum btrfs_orphan_cleanup_state {
2167
ORPHAN_CLEANUP_STARTED = 1,
2168
ORPHAN_CLEANUP_DONE = 2,
2169
};
2170
2171
/*
2172
* This is called in transaction commmit time. If there are no orphan
2173
* files in the subvolume, it removes orphan item and frees block_rsv
2174
* structure.
2175
*/
2176
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2177
struct btrfs_root *root)
2178
{
2179
int ret;
2180
2181
if (!list_empty(&root->orphan_list) ||
2182
root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2183
return;
2184
2185
if (root->orphan_item_inserted &&
2186
btrfs_root_refs(&root->root_item) > 0) {
2187
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2188
root->root_key.objectid);
2189
BUG_ON(ret);
2190
root->orphan_item_inserted = 0;
2191
}
2192
2193
if (root->orphan_block_rsv) {
2194
WARN_ON(root->orphan_block_rsv->size > 0);
2195
btrfs_free_block_rsv(root, root->orphan_block_rsv);
2196
root->orphan_block_rsv = NULL;
2197
}
2198
}
2199
2200
/*
2201
* This creates an orphan entry for the given inode in case something goes
2202
* wrong in the middle of an unlink/truncate.
2203
*
2204
* NOTE: caller of this function should reserve 5 units of metadata for
2205
* this function.
2206
*/
2207
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2208
{
2209
struct btrfs_root *root = BTRFS_I(inode)->root;
2210
struct btrfs_block_rsv *block_rsv = NULL;
2211
int reserve = 0;
2212
int insert = 0;
2213
int ret;
2214
2215
if (!root->orphan_block_rsv) {
2216
block_rsv = btrfs_alloc_block_rsv(root);
2217
BUG_ON(!block_rsv);
2218
}
2219
2220
spin_lock(&root->orphan_lock);
2221
if (!root->orphan_block_rsv) {
2222
root->orphan_block_rsv = block_rsv;
2223
} else if (block_rsv) {
2224
btrfs_free_block_rsv(root, block_rsv);
2225
block_rsv = NULL;
2226
}
2227
2228
if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2229
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2230
#if 0
2231
/*
2232
* For proper ENOSPC handling, we should do orphan
2233
* cleanup when mounting. But this introduces backward
2234
* compatibility issue.
2235
*/
2236
if (!xchg(&root->orphan_item_inserted, 1))
2237
insert = 2;
2238
else
2239
insert = 1;
2240
#endif
2241
insert = 1;
2242
}
2243
2244
if (!BTRFS_I(inode)->orphan_meta_reserved) {
2245
BTRFS_I(inode)->orphan_meta_reserved = 1;
2246
reserve = 1;
2247
}
2248
spin_unlock(&root->orphan_lock);
2249
2250
if (block_rsv)
2251
btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2252
2253
/* grab metadata reservation from transaction handle */
2254
if (reserve) {
2255
ret = btrfs_orphan_reserve_metadata(trans, inode);
2256
BUG_ON(ret);
2257
}
2258
2259
/* insert an orphan item to track this unlinked/truncated file */
2260
if (insert >= 1) {
2261
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2262
BUG_ON(ret);
2263
}
2264
2265
/* insert an orphan item to track subvolume contains orphan files */
2266
if (insert >= 2) {
2267
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2268
root->root_key.objectid);
2269
BUG_ON(ret);
2270
}
2271
return 0;
2272
}
2273
2274
/*
2275
* We have done the truncate/delete so we can go ahead and remove the orphan
2276
* item for this particular inode.
2277
*/
2278
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2279
{
2280
struct btrfs_root *root = BTRFS_I(inode)->root;
2281
int delete_item = 0;
2282
int release_rsv = 0;
2283
int ret = 0;
2284
2285
spin_lock(&root->orphan_lock);
2286
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2287
list_del_init(&BTRFS_I(inode)->i_orphan);
2288
delete_item = 1;
2289
}
2290
2291
if (BTRFS_I(inode)->orphan_meta_reserved) {
2292
BTRFS_I(inode)->orphan_meta_reserved = 0;
2293
release_rsv = 1;
2294
}
2295
spin_unlock(&root->orphan_lock);
2296
2297
if (trans && delete_item) {
2298
ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2299
BUG_ON(ret);
2300
}
2301
2302
if (release_rsv)
2303
btrfs_orphan_release_metadata(inode);
2304
2305
return 0;
2306
}
2307
2308
/*
2309
* this cleans up any orphans that may be left on the list from the last use
2310
* of this root.
2311
*/
2312
int btrfs_orphan_cleanup(struct btrfs_root *root)
2313
{
2314
struct btrfs_path *path;
2315
struct extent_buffer *leaf;
2316
struct btrfs_key key, found_key;
2317
struct btrfs_trans_handle *trans;
2318
struct inode *inode;
2319
int ret = 0, nr_unlink = 0, nr_truncate = 0;
2320
2321
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2322
return 0;
2323
2324
path = btrfs_alloc_path();
2325
if (!path) {
2326
ret = -ENOMEM;
2327
goto out;
2328
}
2329
path->reada = -1;
2330
2331
key.objectid = BTRFS_ORPHAN_OBJECTID;
2332
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2333
key.offset = (u64)-1;
2334
2335
while (1) {
2336
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2337
if (ret < 0)
2338
goto out;
2339
2340
/*
2341
* if ret == 0 means we found what we were searching for, which
2342
* is weird, but possible, so only screw with path if we didn't
2343
* find the key and see if we have stuff that matches
2344
*/
2345
if (ret > 0) {
2346
ret = 0;
2347
if (path->slots[0] == 0)
2348
break;
2349
path->slots[0]--;
2350
}
2351
2352
/* pull out the item */
2353
leaf = path->nodes[0];
2354
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2355
2356
/* make sure the item matches what we want */
2357
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2358
break;
2359
if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2360
break;
2361
2362
/* release the path since we're done with it */
2363
btrfs_release_path(path);
2364
2365
/*
2366
* this is where we are basically btrfs_lookup, without the
2367
* crossing root thing. we store the inode number in the
2368
* offset of the orphan item.
2369
*/
2370
found_key.objectid = found_key.offset;
2371
found_key.type = BTRFS_INODE_ITEM_KEY;
2372
found_key.offset = 0;
2373
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2374
if (IS_ERR(inode)) {
2375
ret = PTR_ERR(inode);
2376
goto out;
2377
}
2378
2379
/*
2380
* add this inode to the orphan list so btrfs_orphan_del does
2381
* the proper thing when we hit it
2382
*/
2383
spin_lock(&root->orphan_lock);
2384
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2385
spin_unlock(&root->orphan_lock);
2386
2387
/*
2388
* if this is a bad inode, means we actually succeeded in
2389
* removing the inode, but not the orphan record, which means
2390
* we need to manually delete the orphan since iput will just
2391
* do a destroy_inode
2392
*/
2393
if (is_bad_inode(inode)) {
2394
trans = btrfs_start_transaction(root, 0);
2395
if (IS_ERR(trans)) {
2396
ret = PTR_ERR(trans);
2397
goto out;
2398
}
2399
btrfs_orphan_del(trans, inode);
2400
btrfs_end_transaction(trans, root);
2401
iput(inode);
2402
continue;
2403
}
2404
2405
/* if we have links, this was a truncate, lets do that */
2406
if (inode->i_nlink) {
2407
if (!S_ISREG(inode->i_mode)) {
2408
WARN_ON(1);
2409
iput(inode);
2410
continue;
2411
}
2412
nr_truncate++;
2413
ret = btrfs_truncate(inode);
2414
} else {
2415
nr_unlink++;
2416
}
2417
2418
/* this will do delete_inode and everything for us */
2419
iput(inode);
2420
if (ret)
2421
goto out;
2422
}
2423
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2424
2425
if (root->orphan_block_rsv)
2426
btrfs_block_rsv_release(root, root->orphan_block_rsv,
2427
(u64)-1);
2428
2429
if (root->orphan_block_rsv || root->orphan_item_inserted) {
2430
trans = btrfs_join_transaction(root);
2431
if (!IS_ERR(trans))
2432
btrfs_end_transaction(trans, root);
2433
}
2434
2435
if (nr_unlink)
2436
printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2437
if (nr_truncate)
2438
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2439
2440
out:
2441
if (ret)
2442
printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2443
btrfs_free_path(path);
2444
return ret;
2445
}
2446
2447
/*
2448
* very simple check to peek ahead in the leaf looking for xattrs. If we
2449
* don't find any xattrs, we know there can't be any acls.
2450
*
2451
* slot is the slot the inode is in, objectid is the objectid of the inode
2452
*/
2453
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2454
int slot, u64 objectid)
2455
{
2456
u32 nritems = btrfs_header_nritems(leaf);
2457
struct btrfs_key found_key;
2458
int scanned = 0;
2459
2460
slot++;
2461
while (slot < nritems) {
2462
btrfs_item_key_to_cpu(leaf, &found_key, slot);
2463
2464
/* we found a different objectid, there must not be acls */
2465
if (found_key.objectid != objectid)
2466
return 0;
2467
2468
/* we found an xattr, assume we've got an acl */
2469
if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2470
return 1;
2471
2472
/*
2473
* we found a key greater than an xattr key, there can't
2474
* be any acls later on
2475
*/
2476
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2477
return 0;
2478
2479
slot++;
2480
scanned++;
2481
2482
/*
2483
* it goes inode, inode backrefs, xattrs, extents,
2484
* so if there are a ton of hard links to an inode there can
2485
* be a lot of backrefs. Don't waste time searching too hard,
2486
* this is just an optimization
2487
*/
2488
if (scanned >= 8)
2489
break;
2490
}
2491
/* we hit the end of the leaf before we found an xattr or
2492
* something larger than an xattr. We have to assume the inode
2493
* has acls
2494
*/
2495
return 1;
2496
}
2497
2498
/*
2499
* read an inode from the btree into the in-memory inode
2500
*/
2501
static void btrfs_read_locked_inode(struct inode *inode)
2502
{
2503
struct btrfs_path *path;
2504
struct extent_buffer *leaf;
2505
struct btrfs_inode_item *inode_item;
2506
struct btrfs_timespec *tspec;
2507
struct btrfs_root *root = BTRFS_I(inode)->root;
2508
struct btrfs_key location;
2509
int maybe_acls;
2510
u32 rdev;
2511
int ret;
2512
bool filled = false;
2513
2514
ret = btrfs_fill_inode(inode, &rdev);
2515
if (!ret)
2516
filled = true;
2517
2518
path = btrfs_alloc_path();
2519
BUG_ON(!path);
2520
path->leave_spinning = 1;
2521
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2522
2523
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2524
if (ret)
2525
goto make_bad;
2526
2527
leaf = path->nodes[0];
2528
2529
if (filled)
2530
goto cache_acl;
2531
2532
inode_item = btrfs_item_ptr(leaf, path->slots[0],
2533
struct btrfs_inode_item);
2534
if (!leaf->map_token)
2535
map_private_extent_buffer(leaf, (unsigned long)inode_item,
2536
sizeof(struct btrfs_inode_item),
2537
&leaf->map_token, &leaf->kaddr,
2538
&leaf->map_start, &leaf->map_len,
2539
KM_USER1);
2540
2541
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2542
inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2543
inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2544
inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2545
btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2546
2547
tspec = btrfs_inode_atime(inode_item);
2548
inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2549
inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2550
2551
tspec = btrfs_inode_mtime(inode_item);
2552
inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2553
inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2554
2555
tspec = btrfs_inode_ctime(inode_item);
2556
inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2557
inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2558
2559
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2560
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2561
BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2562
inode->i_generation = BTRFS_I(inode)->generation;
2563
inode->i_rdev = 0;
2564
rdev = btrfs_inode_rdev(leaf, inode_item);
2565
2566
BTRFS_I(inode)->index_cnt = (u64)-1;
2567
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2568
cache_acl:
2569
/*
2570
* try to precache a NULL acl entry for files that don't have
2571
* any xattrs or acls
2572
*/
2573
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2574
btrfs_ino(inode));
2575
if (!maybe_acls)
2576
cache_no_acl(inode);
2577
2578
if (leaf->map_token) {
2579
unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2580
leaf->map_token = NULL;
2581
}
2582
2583
btrfs_free_path(path);
2584
2585
switch (inode->i_mode & S_IFMT) {
2586
case S_IFREG:
2587
inode->i_mapping->a_ops = &btrfs_aops;
2588
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2589
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2590
inode->i_fop = &btrfs_file_operations;
2591
inode->i_op = &btrfs_file_inode_operations;
2592
break;
2593
case S_IFDIR:
2594
inode->i_fop = &btrfs_dir_file_operations;
2595
if (root == root->fs_info->tree_root)
2596
inode->i_op = &btrfs_dir_ro_inode_operations;
2597
else
2598
inode->i_op = &btrfs_dir_inode_operations;
2599
break;
2600
case S_IFLNK:
2601
inode->i_op = &btrfs_symlink_inode_operations;
2602
inode->i_mapping->a_ops = &btrfs_symlink_aops;
2603
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2604
break;
2605
default:
2606
inode->i_op = &btrfs_special_inode_operations;
2607
init_special_inode(inode, inode->i_mode, rdev);
2608
break;
2609
}
2610
2611
btrfs_update_iflags(inode);
2612
return;
2613
2614
make_bad:
2615
btrfs_free_path(path);
2616
make_bad_inode(inode);
2617
}
2618
2619
/*
2620
* given a leaf and an inode, copy the inode fields into the leaf
2621
*/
2622
static void fill_inode_item(struct btrfs_trans_handle *trans,
2623
struct extent_buffer *leaf,
2624
struct btrfs_inode_item *item,
2625
struct inode *inode)
2626
{
2627
if (!leaf->map_token)
2628
map_private_extent_buffer(leaf, (unsigned long)item,
2629
sizeof(struct btrfs_inode_item),
2630
&leaf->map_token, &leaf->kaddr,
2631
&leaf->map_start, &leaf->map_len,
2632
KM_USER1);
2633
2634
btrfs_set_inode_uid(leaf, item, inode->i_uid);
2635
btrfs_set_inode_gid(leaf, item, inode->i_gid);
2636
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2637
btrfs_set_inode_mode(leaf, item, inode->i_mode);
2638
btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2639
2640
btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2641
inode->i_atime.tv_sec);
2642
btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2643
inode->i_atime.tv_nsec);
2644
2645
btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2646
inode->i_mtime.tv_sec);
2647
btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2648
inode->i_mtime.tv_nsec);
2649
2650
btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2651
inode->i_ctime.tv_sec);
2652
btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2653
inode->i_ctime.tv_nsec);
2654
2655
btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2656
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2657
btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2658
btrfs_set_inode_transid(leaf, item, trans->transid);
2659
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2660
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2661
btrfs_set_inode_block_group(leaf, item, 0);
2662
2663
if (leaf->map_token) {
2664
unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2665
leaf->map_token = NULL;
2666
}
2667
}
2668
2669
/*
2670
* copy everything in the in-memory inode into the btree.
2671
*/
2672
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2673
struct btrfs_root *root, struct inode *inode)
2674
{
2675
struct btrfs_inode_item *inode_item;
2676
struct btrfs_path *path;
2677
struct extent_buffer *leaf;
2678
int ret;
2679
2680
/*
2681
* If the inode is a free space inode, we can deadlock during commit
2682
* if we put it into the delayed code.
2683
*
2684
* The data relocation inode should also be directly updated
2685
* without delay
2686
*/
2687
if (!is_free_space_inode(root, inode)
2688
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2689
ret = btrfs_delayed_update_inode(trans, root, inode);
2690
if (!ret)
2691
btrfs_set_inode_last_trans(trans, inode);
2692
return ret;
2693
}
2694
2695
path = btrfs_alloc_path();
2696
if (!path)
2697
return -ENOMEM;
2698
2699
path->leave_spinning = 1;
2700
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2701
1);
2702
if (ret) {
2703
if (ret > 0)
2704
ret = -ENOENT;
2705
goto failed;
2706
}
2707
2708
btrfs_unlock_up_safe(path, 1);
2709
leaf = path->nodes[0];
2710
inode_item = btrfs_item_ptr(leaf, path->slots[0],
2711
struct btrfs_inode_item);
2712
2713
fill_inode_item(trans, leaf, inode_item, inode);
2714
btrfs_mark_buffer_dirty(leaf);
2715
btrfs_set_inode_last_trans(trans, inode);
2716
ret = 0;
2717
failed:
2718
btrfs_free_path(path);
2719
return ret;
2720
}
2721
2722
/*
2723
* unlink helper that gets used here in inode.c and in the tree logging
2724
* recovery code. It remove a link in a directory with a given name, and
2725
* also drops the back refs in the inode to the directory
2726
*/
2727
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2728
struct btrfs_root *root,
2729
struct inode *dir, struct inode *inode,
2730
const char *name, int name_len)
2731
{
2732
struct btrfs_path *path;
2733
int ret = 0;
2734
struct extent_buffer *leaf;
2735
struct btrfs_dir_item *di;
2736
struct btrfs_key key;
2737
u64 index;
2738
u64 ino = btrfs_ino(inode);
2739
u64 dir_ino = btrfs_ino(dir);
2740
2741
path = btrfs_alloc_path();
2742
if (!path) {
2743
ret = -ENOMEM;
2744
goto out;
2745
}
2746
2747
path->leave_spinning = 1;
2748
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2749
name, name_len, -1);
2750
if (IS_ERR(di)) {
2751
ret = PTR_ERR(di);
2752
goto err;
2753
}
2754
if (!di) {
2755
ret = -ENOENT;
2756
goto err;
2757
}
2758
leaf = path->nodes[0];
2759
btrfs_dir_item_key_to_cpu(leaf, di, &key);
2760
ret = btrfs_delete_one_dir_name(trans, root, path, di);
2761
if (ret)
2762
goto err;
2763
btrfs_release_path(path);
2764
2765
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2766
dir_ino, &index);
2767
if (ret) {
2768
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2769
"inode %llu parent %llu\n", name_len, name,
2770
(unsigned long long)ino, (unsigned long long)dir_ino);
2771
goto err;
2772
}
2773
2774
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2775
if (ret)
2776
goto err;
2777
2778
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2779
inode, dir_ino);
2780
BUG_ON(ret != 0 && ret != -ENOENT);
2781
2782
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2783
dir, index);
2784
if (ret == -ENOENT)
2785
ret = 0;
2786
err:
2787
btrfs_free_path(path);
2788
if (ret)
2789
goto out;
2790
2791
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2792
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2793
btrfs_update_inode(trans, root, dir);
2794
out:
2795
return ret;
2796
}
2797
2798
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2799
struct btrfs_root *root,
2800
struct inode *dir, struct inode *inode,
2801
const char *name, int name_len)
2802
{
2803
int ret;
2804
ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2805
if (!ret) {
2806
btrfs_drop_nlink(inode);
2807
ret = btrfs_update_inode(trans, root, inode);
2808
}
2809
return ret;
2810
}
2811
2812
2813
/* helper to check if there is any shared block in the path */
2814
static int check_path_shared(struct btrfs_root *root,
2815
struct btrfs_path *path)
2816
{
2817
struct extent_buffer *eb;
2818
int level;
2819
u64 refs = 1;
2820
2821
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2822
int ret;
2823
2824
if (!path->nodes[level])
2825
break;
2826
eb = path->nodes[level];
2827
if (!btrfs_block_can_be_shared(root, eb))
2828
continue;
2829
ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2830
&refs, NULL);
2831
if (refs > 1)
2832
return 1;
2833
}
2834
return 0;
2835
}
2836
2837
/*
2838
* helper to start transaction for unlink and rmdir.
2839
*
2840
* unlink and rmdir are special in btrfs, they do not always free space.
2841
* so in enospc case, we should make sure they will free space before
2842
* allowing them to use the global metadata reservation.
2843
*/
2844
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2845
struct dentry *dentry)
2846
{
2847
struct btrfs_trans_handle *trans;
2848
struct btrfs_root *root = BTRFS_I(dir)->root;
2849
struct btrfs_path *path;
2850
struct btrfs_inode_ref *ref;
2851
struct btrfs_dir_item *di;
2852
struct inode *inode = dentry->d_inode;
2853
u64 index;
2854
int check_link = 1;
2855
int err = -ENOSPC;
2856
int ret;
2857
u64 ino = btrfs_ino(inode);
2858
u64 dir_ino = btrfs_ino(dir);
2859
2860
trans = btrfs_start_transaction(root, 10);
2861
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2862
return trans;
2863
2864
if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2865
return ERR_PTR(-ENOSPC);
2866
2867
/* check if there is someone else holds reference */
2868
if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2869
return ERR_PTR(-ENOSPC);
2870
2871
if (atomic_read(&inode->i_count) > 2)
2872
return ERR_PTR(-ENOSPC);
2873
2874
if (xchg(&root->fs_info->enospc_unlink, 1))
2875
return ERR_PTR(-ENOSPC);
2876
2877
path = btrfs_alloc_path();
2878
if (!path) {
2879
root->fs_info->enospc_unlink = 0;
2880
return ERR_PTR(-ENOMEM);
2881
}
2882
2883
trans = btrfs_start_transaction(root, 0);
2884
if (IS_ERR(trans)) {
2885
btrfs_free_path(path);
2886
root->fs_info->enospc_unlink = 0;
2887
return trans;
2888
}
2889
2890
path->skip_locking = 1;
2891
path->search_commit_root = 1;
2892
2893
ret = btrfs_lookup_inode(trans, root, path,
2894
&BTRFS_I(dir)->location, 0);
2895
if (ret < 0) {
2896
err = ret;
2897
goto out;
2898
}
2899
if (ret == 0) {
2900
if (check_path_shared(root, path))
2901
goto out;
2902
} else {
2903
check_link = 0;
2904
}
2905
btrfs_release_path(path);
2906
2907
ret = btrfs_lookup_inode(trans, root, path,
2908
&BTRFS_I(inode)->location, 0);
2909
if (ret < 0) {
2910
err = ret;
2911
goto out;
2912
}
2913
if (ret == 0) {
2914
if (check_path_shared(root, path))
2915
goto out;
2916
} else {
2917
check_link = 0;
2918
}
2919
btrfs_release_path(path);
2920
2921
if (ret == 0 && S_ISREG(inode->i_mode)) {
2922
ret = btrfs_lookup_file_extent(trans, root, path,
2923
ino, (u64)-1, 0);
2924
if (ret < 0) {
2925
err = ret;
2926
goto out;
2927
}
2928
BUG_ON(ret == 0);
2929
if (check_path_shared(root, path))
2930
goto out;
2931
btrfs_release_path(path);
2932
}
2933
2934
if (!check_link) {
2935
err = 0;
2936
goto out;
2937
}
2938
2939
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2940
dentry->d_name.name, dentry->d_name.len, 0);
2941
if (IS_ERR(di)) {
2942
err = PTR_ERR(di);
2943
goto out;
2944
}
2945
if (di) {
2946
if (check_path_shared(root, path))
2947
goto out;
2948
} else {
2949
err = 0;
2950
goto out;
2951
}
2952
btrfs_release_path(path);
2953
2954
ref = btrfs_lookup_inode_ref(trans, root, path,
2955
dentry->d_name.name, dentry->d_name.len,
2956
ino, dir_ino, 0);
2957
if (IS_ERR(ref)) {
2958
err = PTR_ERR(ref);
2959
goto out;
2960
}
2961
BUG_ON(!ref);
2962
if (check_path_shared(root, path))
2963
goto out;
2964
index = btrfs_inode_ref_index(path->nodes[0], ref);
2965
btrfs_release_path(path);
2966
2967
/*
2968
* This is a commit root search, if we can lookup inode item and other
2969
* relative items in the commit root, it means the transaction of
2970
* dir/file creation has been committed, and the dir index item that we
2971
* delay to insert has also been inserted into the commit root. So
2972
* we needn't worry about the delayed insertion of the dir index item
2973
* here.
2974
*/
2975
di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2976
dentry->d_name.name, dentry->d_name.len, 0);
2977
if (IS_ERR(di)) {
2978
err = PTR_ERR(di);
2979
goto out;
2980
}
2981
BUG_ON(ret == -ENOENT);
2982
if (check_path_shared(root, path))
2983
goto out;
2984
2985
err = 0;
2986
out:
2987
btrfs_free_path(path);
2988
if (err) {
2989
btrfs_end_transaction(trans, root);
2990
root->fs_info->enospc_unlink = 0;
2991
return ERR_PTR(err);
2992
}
2993
2994
trans->block_rsv = &root->fs_info->global_block_rsv;
2995
return trans;
2996
}
2997
2998
static void __unlink_end_trans(struct btrfs_trans_handle *trans,
2999
struct btrfs_root *root)
3000
{
3001
if (trans->block_rsv == &root->fs_info->global_block_rsv) {
3002
BUG_ON(!root->fs_info->enospc_unlink);
3003
root->fs_info->enospc_unlink = 0;
3004
}
3005
btrfs_end_transaction_throttle(trans, root);
3006
}
3007
3008
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3009
{
3010
struct btrfs_root *root = BTRFS_I(dir)->root;
3011
struct btrfs_trans_handle *trans;
3012
struct inode *inode = dentry->d_inode;
3013
int ret;
3014
unsigned long nr = 0;
3015
3016
trans = __unlink_start_trans(dir, dentry);
3017
if (IS_ERR(trans))
3018
return PTR_ERR(trans);
3019
3020
btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3021
3022
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3023
dentry->d_name.name, dentry->d_name.len);
3024
BUG_ON(ret);
3025
3026
if (inode->i_nlink == 0) {
3027
ret = btrfs_orphan_add(trans, inode);
3028
BUG_ON(ret);
3029
}
3030
3031
nr = trans->blocks_used;
3032
__unlink_end_trans(trans, root);
3033
btrfs_btree_balance_dirty(root, nr);
3034
return ret;
3035
}
3036
3037
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3038
struct btrfs_root *root,
3039
struct inode *dir, u64 objectid,
3040
const char *name, int name_len)
3041
{
3042
struct btrfs_path *path;
3043
struct extent_buffer *leaf;
3044
struct btrfs_dir_item *di;
3045
struct btrfs_key key;
3046
u64 index;
3047
int ret;
3048
u64 dir_ino = btrfs_ino(dir);
3049
3050
path = btrfs_alloc_path();
3051
if (!path)
3052
return -ENOMEM;
3053
3054
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3055
name, name_len, -1);
3056
BUG_ON(IS_ERR_OR_NULL(di));
3057
3058
leaf = path->nodes[0];
3059
btrfs_dir_item_key_to_cpu(leaf, di, &key);
3060
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3061
ret = btrfs_delete_one_dir_name(trans, root, path, di);
3062
BUG_ON(ret);
3063
btrfs_release_path(path);
3064
3065
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3066
objectid, root->root_key.objectid,
3067
dir_ino, &index, name, name_len);
3068
if (ret < 0) {
3069
BUG_ON(ret != -ENOENT);
3070
di = btrfs_search_dir_index_item(root, path, dir_ino,
3071
name, name_len);
3072
BUG_ON(IS_ERR_OR_NULL(di));
3073
3074
leaf = path->nodes[0];
3075
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3076
btrfs_release_path(path);
3077
index = key.offset;
3078
}
3079
btrfs_release_path(path);
3080
3081
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3082
BUG_ON(ret);
3083
3084
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3085
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3086
ret = btrfs_update_inode(trans, root, dir);
3087
BUG_ON(ret);
3088
3089
btrfs_free_path(path);
3090
return 0;
3091
}
3092
3093
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3094
{
3095
struct inode *inode = dentry->d_inode;
3096
int err = 0;
3097
struct btrfs_root *root = BTRFS_I(dir)->root;
3098
struct btrfs_trans_handle *trans;
3099
unsigned long nr = 0;
3100
3101
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3102
btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3103
return -ENOTEMPTY;
3104
3105
trans = __unlink_start_trans(dir, dentry);
3106
if (IS_ERR(trans))
3107
return PTR_ERR(trans);
3108
3109
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3110
err = btrfs_unlink_subvol(trans, root, dir,
3111
BTRFS_I(inode)->location.objectid,
3112
dentry->d_name.name,
3113
dentry->d_name.len);
3114
goto out;
3115
}
3116
3117
err = btrfs_orphan_add(trans, inode);
3118
if (err)
3119
goto out;
3120
3121
/* now the directory is empty */
3122
err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3123
dentry->d_name.name, dentry->d_name.len);
3124
if (!err)
3125
btrfs_i_size_write(inode, 0);
3126
out:
3127
nr = trans->blocks_used;
3128
__unlink_end_trans(trans, root);
3129
btrfs_btree_balance_dirty(root, nr);
3130
3131
return err;
3132
}
3133
3134
/*
3135
* this can truncate away extent items, csum items and directory items.
3136
* It starts at a high offset and removes keys until it can't find
3137
* any higher than new_size
3138
*
3139
* csum items that cross the new i_size are truncated to the new size
3140
* as well.
3141
*
3142
* min_type is the minimum key type to truncate down to. If set to 0, this
3143
* will kill all the items on this inode, including the INODE_ITEM_KEY.
3144
*/
3145
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3146
struct btrfs_root *root,
3147
struct inode *inode,
3148
u64 new_size, u32 min_type)
3149
{
3150
struct btrfs_path *path;
3151
struct extent_buffer *leaf;
3152
struct btrfs_file_extent_item *fi;
3153
struct btrfs_key key;
3154
struct btrfs_key found_key;
3155
u64 extent_start = 0;
3156
u64 extent_num_bytes = 0;
3157
u64 extent_offset = 0;
3158
u64 item_end = 0;
3159
u64 mask = root->sectorsize - 1;
3160
u32 found_type = (u8)-1;
3161
int found_extent;
3162
int del_item;
3163
int pending_del_nr = 0;
3164
int pending_del_slot = 0;
3165
int extent_type = -1;
3166
int encoding;
3167
int ret;
3168
int err = 0;
3169
u64 ino = btrfs_ino(inode);
3170
3171
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3172
3173
if (root->ref_cows || root == root->fs_info->tree_root)
3174
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3175
3176
/*
3177
* This function is also used to drop the items in the log tree before
3178
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
3179
* it is used to drop the loged items. So we shouldn't kill the delayed
3180
* items.
3181
*/
3182
if (min_type == 0 && root == BTRFS_I(inode)->root)
3183
btrfs_kill_delayed_inode_items(inode);
3184
3185
path = btrfs_alloc_path();
3186
BUG_ON(!path);
3187
path->reada = -1;
3188
3189
key.objectid = ino;
3190
key.offset = (u64)-1;
3191
key.type = (u8)-1;
3192
3193
search_again:
3194
path->leave_spinning = 1;
3195
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3196
if (ret < 0) {
3197
err = ret;
3198
goto out;
3199
}
3200
3201
if (ret > 0) {
3202
/* there are no items in the tree for us to truncate, we're
3203
* done
3204
*/
3205
if (path->slots[0] == 0)
3206
goto out;
3207
path->slots[0]--;
3208
}
3209
3210
while (1) {
3211
fi = NULL;
3212
leaf = path->nodes[0];
3213
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3214
found_type = btrfs_key_type(&found_key);
3215
encoding = 0;
3216
3217
if (found_key.objectid != ino)
3218
break;
3219
3220
if (found_type < min_type)
3221
break;
3222
3223
item_end = found_key.offset;
3224
if (found_type == BTRFS_EXTENT_DATA_KEY) {
3225
fi = btrfs_item_ptr(leaf, path->slots[0],
3226
struct btrfs_file_extent_item);
3227
extent_type = btrfs_file_extent_type(leaf, fi);
3228
encoding = btrfs_file_extent_compression(leaf, fi);
3229
encoding |= btrfs_file_extent_encryption(leaf, fi);
3230
encoding |= btrfs_file_extent_other_encoding(leaf, fi);
3231
3232
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3233
item_end +=
3234
btrfs_file_extent_num_bytes(leaf, fi);
3235
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3236
item_end += btrfs_file_extent_inline_len(leaf,
3237
fi);
3238
}
3239
item_end--;
3240
}
3241
if (found_type > min_type) {
3242
del_item = 1;
3243
} else {
3244
if (item_end < new_size)
3245
break;
3246
if (found_key.offset >= new_size)
3247
del_item = 1;
3248
else
3249
del_item = 0;
3250
}
3251
found_extent = 0;
3252
/* FIXME, shrink the extent if the ref count is only 1 */
3253
if (found_type != BTRFS_EXTENT_DATA_KEY)
3254
goto delete;
3255
3256
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3257
u64 num_dec;
3258
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3259
if (!del_item && !encoding) {
3260
u64 orig_num_bytes =
3261
btrfs_file_extent_num_bytes(leaf, fi);
3262
extent_num_bytes = new_size -
3263
found_key.offset + root->sectorsize - 1;
3264
extent_num_bytes = extent_num_bytes &
3265
~((u64)root->sectorsize - 1);
3266
btrfs_set_file_extent_num_bytes(leaf, fi,
3267
extent_num_bytes);
3268
num_dec = (orig_num_bytes -
3269
extent_num_bytes);
3270
if (root->ref_cows && extent_start != 0)
3271
inode_sub_bytes(inode, num_dec);
3272
btrfs_mark_buffer_dirty(leaf);
3273
} else {
3274
extent_num_bytes =
3275
btrfs_file_extent_disk_num_bytes(leaf,
3276
fi);
3277
extent_offset = found_key.offset -
3278
btrfs_file_extent_offset(leaf, fi);
3279
3280
/* FIXME blocksize != 4096 */
3281
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3282
if (extent_start != 0) {
3283
found_extent = 1;
3284
if (root->ref_cows)
3285
inode_sub_bytes(inode, num_dec);
3286
}
3287
}
3288
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3289
/*
3290
* we can't truncate inline items that have had
3291
* special encodings
3292
*/
3293
if (!del_item &&
3294
btrfs_file_extent_compression(leaf, fi) == 0 &&
3295
btrfs_file_extent_encryption(leaf, fi) == 0 &&
3296
btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3297
u32 size = new_size - found_key.offset;
3298
3299
if (root->ref_cows) {
3300
inode_sub_bytes(inode, item_end + 1 -
3301
new_size);
3302
}
3303
size =
3304
btrfs_file_extent_calc_inline_size(size);
3305
ret = btrfs_truncate_item(trans, root, path,
3306
size, 1);
3307
} else if (root->ref_cows) {
3308
inode_sub_bytes(inode, item_end + 1 -
3309
found_key.offset);
3310
}
3311
}
3312
delete:
3313
if (del_item) {
3314
if (!pending_del_nr) {
3315
/* no pending yet, add ourselves */
3316
pending_del_slot = path->slots[0];
3317
pending_del_nr = 1;
3318
} else if (pending_del_nr &&
3319
path->slots[0] + 1 == pending_del_slot) {
3320
/* hop on the pending chunk */
3321
pending_del_nr++;
3322
pending_del_slot = path->slots[0];
3323
} else {
3324
BUG();
3325
}
3326
} else {
3327
break;
3328
}
3329
if (found_extent && (root->ref_cows ||
3330
root == root->fs_info->tree_root)) {
3331
btrfs_set_path_blocking(path);
3332
ret = btrfs_free_extent(trans, root, extent_start,
3333
extent_num_bytes, 0,
3334
btrfs_header_owner(leaf),
3335
ino, extent_offset);
3336
BUG_ON(ret);
3337
}
3338
3339
if (found_type == BTRFS_INODE_ITEM_KEY)
3340
break;
3341
3342
if (path->slots[0] == 0 ||
3343
path->slots[0] != pending_del_slot) {
3344
if (root->ref_cows &&
3345
BTRFS_I(inode)->location.objectid !=
3346
BTRFS_FREE_INO_OBJECTID) {
3347
err = -EAGAIN;
3348
goto out;
3349
}
3350
if (pending_del_nr) {
3351
ret = btrfs_del_items(trans, root, path,
3352
pending_del_slot,
3353
pending_del_nr);
3354
BUG_ON(ret);
3355
pending_del_nr = 0;
3356
}
3357
btrfs_release_path(path);
3358
goto search_again;
3359
} else {
3360
path->slots[0]--;
3361
}
3362
}
3363
out:
3364
if (pending_del_nr) {
3365
ret = btrfs_del_items(trans, root, path, pending_del_slot,
3366
pending_del_nr);
3367
BUG_ON(ret);
3368
}
3369
btrfs_free_path(path);
3370
return err;
3371
}
3372
3373
/*
3374
* taken from block_truncate_page, but does cow as it zeros out
3375
* any bytes left in the last page in the file.
3376
*/
3377
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3378
{
3379
struct inode *inode = mapping->host;
3380
struct btrfs_root *root = BTRFS_I(inode)->root;
3381
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3382
struct btrfs_ordered_extent *ordered;
3383
struct extent_state *cached_state = NULL;
3384
char *kaddr;
3385
u32 blocksize = root->sectorsize;
3386
pgoff_t index = from >> PAGE_CACHE_SHIFT;
3387
unsigned offset = from & (PAGE_CACHE_SIZE-1);
3388
struct page *page;
3389
int ret = 0;
3390
u64 page_start;
3391
u64 page_end;
3392
3393
if ((offset & (blocksize - 1)) == 0)
3394
goto out;
3395
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3396
if (ret)
3397
goto out;
3398
3399
ret = -ENOMEM;
3400
again:
3401
page = grab_cache_page(mapping, index);
3402
if (!page) {
3403
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3404
goto out;
3405
}
3406
3407
page_start = page_offset(page);
3408
page_end = page_start + PAGE_CACHE_SIZE - 1;
3409
3410
if (!PageUptodate(page)) {
3411
ret = btrfs_readpage(NULL, page);
3412
lock_page(page);
3413
if (page->mapping != mapping) {
3414
unlock_page(page);
3415
page_cache_release(page);
3416
goto again;
3417
}
3418
if (!PageUptodate(page)) {
3419
ret = -EIO;
3420
goto out_unlock;
3421
}
3422
}
3423
wait_on_page_writeback(page);
3424
3425
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3426
GFP_NOFS);
3427
set_page_extent_mapped(page);
3428
3429
ordered = btrfs_lookup_ordered_extent(inode, page_start);
3430
if (ordered) {
3431
unlock_extent_cached(io_tree, page_start, page_end,
3432
&cached_state, GFP_NOFS);
3433
unlock_page(page);
3434
page_cache_release(page);
3435
btrfs_start_ordered_extent(inode, ordered, 1);
3436
btrfs_put_ordered_extent(ordered);
3437
goto again;
3438
}
3439
3440
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3441
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3442
0, 0, &cached_state, GFP_NOFS);
3443
3444
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3445
&cached_state);
3446
if (ret) {
3447
unlock_extent_cached(io_tree, page_start, page_end,
3448
&cached_state, GFP_NOFS);
3449
goto out_unlock;
3450
}
3451
3452
ret = 0;
3453
if (offset != PAGE_CACHE_SIZE) {
3454
kaddr = kmap(page);
3455
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3456
flush_dcache_page(page);
3457
kunmap(page);
3458
}
3459
ClearPageChecked(page);
3460
set_page_dirty(page);
3461
unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3462
GFP_NOFS);
3463
3464
out_unlock:
3465
if (ret)
3466
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3467
unlock_page(page);
3468
page_cache_release(page);
3469
out:
3470
return ret;
3471
}
3472
3473
/*
3474
* This function puts in dummy file extents for the area we're creating a hole
3475
* for. So if we are truncating this file to a larger size we need to insert
3476
* these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3477
* the range between oldsize and size
3478
*/
3479
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3480
{
3481
struct btrfs_trans_handle *trans;
3482
struct btrfs_root *root = BTRFS_I(inode)->root;
3483
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3484
struct extent_map *em = NULL;
3485
struct extent_state *cached_state = NULL;
3486
u64 mask = root->sectorsize - 1;
3487
u64 hole_start = (oldsize + mask) & ~mask;
3488
u64 block_end = (size + mask) & ~mask;
3489
u64 last_byte;
3490
u64 cur_offset;
3491
u64 hole_size;
3492
int err = 0;
3493
3494
if (size <= hole_start)
3495
return 0;
3496
3497
while (1) {
3498
struct btrfs_ordered_extent *ordered;
3499
btrfs_wait_ordered_range(inode, hole_start,
3500
block_end - hole_start);
3501
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3502
&cached_state, GFP_NOFS);
3503
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3504
if (!ordered)
3505
break;
3506
unlock_extent_cached(io_tree, hole_start, block_end - 1,
3507
&cached_state, GFP_NOFS);
3508
btrfs_put_ordered_extent(ordered);
3509
}
3510
3511
cur_offset = hole_start;
3512
while (1) {
3513
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3514
block_end - cur_offset, 0);
3515
BUG_ON(IS_ERR_OR_NULL(em));
3516
last_byte = min(extent_map_end(em), block_end);
3517
last_byte = (last_byte + mask) & ~mask;
3518
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3519
u64 hint_byte = 0;
3520
hole_size = last_byte - cur_offset;
3521
3522
trans = btrfs_start_transaction(root, 2);
3523
if (IS_ERR(trans)) {
3524
err = PTR_ERR(trans);
3525
break;
3526
}
3527
3528
err = btrfs_drop_extents(trans, inode, cur_offset,
3529
cur_offset + hole_size,
3530
&hint_byte, 1);
3531
if (err)
3532
break;
3533
3534
err = btrfs_insert_file_extent(trans, root,
3535
btrfs_ino(inode), cur_offset, 0,
3536
0, hole_size, 0, hole_size,
3537
0, 0, 0);
3538
if (err)
3539
break;
3540
3541
btrfs_drop_extent_cache(inode, hole_start,
3542
last_byte - 1, 0);
3543
3544
btrfs_end_transaction(trans, root);
3545
}
3546
free_extent_map(em);
3547
em = NULL;
3548
cur_offset = last_byte;
3549
if (cur_offset >= block_end)
3550
break;
3551
}
3552
3553
free_extent_map(em);
3554
unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3555
GFP_NOFS);
3556
return err;
3557
}
3558
3559
static int btrfs_setsize(struct inode *inode, loff_t newsize)
3560
{
3561
loff_t oldsize = i_size_read(inode);
3562
int ret;
3563
3564
if (newsize == oldsize)
3565
return 0;
3566
3567
if (newsize > oldsize) {
3568
i_size_write(inode, newsize);
3569
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3570
truncate_pagecache(inode, oldsize, newsize);
3571
ret = btrfs_cont_expand(inode, oldsize, newsize);
3572
if (ret) {
3573
btrfs_setsize(inode, oldsize);
3574
return ret;
3575
}
3576
3577
mark_inode_dirty(inode);
3578
} else {
3579
3580
/*
3581
* We're truncating a file that used to have good data down to
3582
* zero. Make sure it gets into the ordered flush list so that
3583
* any new writes get down to disk quickly.
3584
*/
3585
if (newsize == 0)
3586
BTRFS_I(inode)->ordered_data_close = 1;
3587
3588
/* we don't support swapfiles, so vmtruncate shouldn't fail */
3589
truncate_setsize(inode, newsize);
3590
ret = btrfs_truncate(inode);
3591
}
3592
3593
return ret;
3594
}
3595
3596
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3597
{
3598
struct inode *inode = dentry->d_inode;
3599
struct btrfs_root *root = BTRFS_I(inode)->root;
3600
int err;
3601
3602
if (btrfs_root_readonly(root))
3603
return -EROFS;
3604
3605
err = inode_change_ok(inode, attr);
3606
if (err)
3607
return err;
3608
3609
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3610
err = btrfs_setsize(inode, attr->ia_size);
3611
if (err)
3612
return err;
3613
}
3614
3615
if (attr->ia_valid) {
3616
setattr_copy(inode, attr);
3617
mark_inode_dirty(inode);
3618
3619
if (attr->ia_valid & ATTR_MODE)
3620
err = btrfs_acl_chmod(inode);
3621
}
3622
3623
return err;
3624
}
3625
3626
void btrfs_evict_inode(struct inode *inode)
3627
{
3628
struct btrfs_trans_handle *trans;
3629
struct btrfs_root *root = BTRFS_I(inode)->root;
3630
unsigned long nr;
3631
int ret;
3632
3633
trace_btrfs_inode_evict(inode);
3634
3635
truncate_inode_pages(&inode->i_data, 0);
3636
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3637
is_free_space_inode(root, inode)))
3638
goto no_delete;
3639
3640
if (is_bad_inode(inode)) {
3641
btrfs_orphan_del(NULL, inode);
3642
goto no_delete;
3643
}
3644
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3645
btrfs_wait_ordered_range(inode, 0, (u64)-1);
3646
3647
if (root->fs_info->log_root_recovering) {
3648
BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3649
goto no_delete;
3650
}
3651
3652
if (inode->i_nlink > 0) {
3653
BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3654
goto no_delete;
3655
}
3656
3657
btrfs_i_size_write(inode, 0);
3658
3659
while (1) {
3660
trans = btrfs_join_transaction(root);
3661
BUG_ON(IS_ERR(trans));
3662
trans->block_rsv = root->orphan_block_rsv;
3663
3664
ret = btrfs_block_rsv_check(trans, root,
3665
root->orphan_block_rsv, 0, 5);
3666
if (ret) {
3667
BUG_ON(ret != -EAGAIN);
3668
ret = btrfs_commit_transaction(trans, root);
3669
BUG_ON(ret);
3670
continue;
3671
}
3672
3673
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3674
if (ret != -EAGAIN)
3675
break;
3676
3677
nr = trans->blocks_used;
3678
btrfs_end_transaction(trans, root);
3679
trans = NULL;
3680
btrfs_btree_balance_dirty(root, nr);
3681
3682
}
3683
3684
if (ret == 0) {
3685
ret = btrfs_orphan_del(trans, inode);
3686
BUG_ON(ret);
3687
}
3688
3689
if (!(root == root->fs_info->tree_root ||
3690
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3691
btrfs_return_ino(root, btrfs_ino(inode));
3692
3693
nr = trans->blocks_used;
3694
btrfs_end_transaction(trans, root);
3695
btrfs_btree_balance_dirty(root, nr);
3696
no_delete:
3697
end_writeback(inode);
3698
return;
3699
}
3700
3701
/*
3702
* this returns the key found in the dir entry in the location pointer.
3703
* If no dir entries were found, location->objectid is 0.
3704
*/
3705
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3706
struct btrfs_key *location)
3707
{
3708
const char *name = dentry->d_name.name;
3709
int namelen = dentry->d_name.len;
3710
struct btrfs_dir_item *di;
3711
struct btrfs_path *path;
3712
struct btrfs_root *root = BTRFS_I(dir)->root;
3713
int ret = 0;
3714
3715
path = btrfs_alloc_path();
3716
BUG_ON(!path);
3717
3718
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3719
namelen, 0);
3720
if (IS_ERR(di))
3721
ret = PTR_ERR(di);
3722
3723
if (IS_ERR_OR_NULL(di))
3724
goto out_err;
3725
3726
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3727
out:
3728
btrfs_free_path(path);
3729
return ret;
3730
out_err:
3731
location->objectid = 0;
3732
goto out;
3733
}
3734
3735
/*
3736
* when we hit a tree root in a directory, the btrfs part of the inode
3737
* needs to be changed to reflect the root directory of the tree root. This
3738
* is kind of like crossing a mount point.
3739
*/
3740
static int fixup_tree_root_location(struct btrfs_root *root,
3741
struct inode *dir,
3742
struct dentry *dentry,
3743
struct btrfs_key *location,
3744
struct btrfs_root **sub_root)
3745
{
3746
struct btrfs_path *path;
3747
struct btrfs_root *new_root;
3748
struct btrfs_root_ref *ref;
3749
struct extent_buffer *leaf;
3750
int ret;
3751
int err = 0;
3752
3753
path = btrfs_alloc_path();
3754
if (!path) {
3755
err = -ENOMEM;
3756
goto out;
3757
}
3758
3759
err = -ENOENT;
3760
ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3761
BTRFS_I(dir)->root->root_key.objectid,
3762
location->objectid);
3763
if (ret) {
3764
if (ret < 0)
3765
err = ret;
3766
goto out;
3767
}
3768
3769
leaf = path->nodes[0];
3770
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3771
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3772
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3773
goto out;
3774
3775
ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3776
(unsigned long)(ref + 1),
3777
dentry->d_name.len);
3778
if (ret)
3779
goto out;
3780
3781
btrfs_release_path(path);
3782
3783
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3784
if (IS_ERR(new_root)) {
3785
err = PTR_ERR(new_root);
3786
goto out;
3787
}
3788
3789
if (btrfs_root_refs(&new_root->root_item) == 0) {
3790
err = -ENOENT;
3791
goto out;
3792
}
3793
3794
*sub_root = new_root;
3795
location->objectid = btrfs_root_dirid(&new_root->root_item);
3796
location->type = BTRFS_INODE_ITEM_KEY;
3797
location->offset = 0;
3798
err = 0;
3799
out:
3800
btrfs_free_path(path);
3801
return err;
3802
}
3803
3804
static void inode_tree_add(struct inode *inode)
3805
{
3806
struct btrfs_root *root = BTRFS_I(inode)->root;
3807
struct btrfs_inode *entry;
3808
struct rb_node **p;
3809
struct rb_node *parent;
3810
u64 ino = btrfs_ino(inode);
3811
again:
3812
p = &root->inode_tree.rb_node;
3813
parent = NULL;
3814
3815
if (inode_unhashed(inode))
3816
return;
3817
3818
spin_lock(&root->inode_lock);
3819
while (*p) {
3820
parent = *p;
3821
entry = rb_entry(parent, struct btrfs_inode, rb_node);
3822
3823
if (ino < btrfs_ino(&entry->vfs_inode))
3824
p = &parent->rb_left;
3825
else if (ino > btrfs_ino(&entry->vfs_inode))
3826
p = &parent->rb_right;
3827
else {
3828
WARN_ON(!(entry->vfs_inode.i_state &
3829
(I_WILL_FREE | I_FREEING)));
3830
rb_erase(parent, &root->inode_tree);
3831
RB_CLEAR_NODE(parent);
3832
spin_unlock(&root->inode_lock);
3833
goto again;
3834
}
3835
}
3836
rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3837
rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3838
spin_unlock(&root->inode_lock);
3839
}
3840
3841
static void inode_tree_del(struct inode *inode)
3842
{
3843
struct btrfs_root *root = BTRFS_I(inode)->root;
3844
int empty = 0;
3845
3846
spin_lock(&root->inode_lock);
3847
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3848
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3849
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3850
empty = RB_EMPTY_ROOT(&root->inode_tree);
3851
}
3852
spin_unlock(&root->inode_lock);
3853
3854
/*
3855
* Free space cache has inodes in the tree root, but the tree root has a
3856
* root_refs of 0, so this could end up dropping the tree root as a
3857
* snapshot, so we need the extra !root->fs_info->tree_root check to
3858
* make sure we don't drop it.
3859
*/
3860
if (empty && btrfs_root_refs(&root->root_item) == 0 &&
3861
root != root->fs_info->tree_root) {
3862
synchronize_srcu(&root->fs_info->subvol_srcu);
3863
spin_lock(&root->inode_lock);
3864
empty = RB_EMPTY_ROOT(&root->inode_tree);
3865
spin_unlock(&root->inode_lock);
3866
if (empty)
3867
btrfs_add_dead_root(root);
3868
}
3869
}
3870
3871
int btrfs_invalidate_inodes(struct btrfs_root *root)
3872
{
3873
struct rb_node *node;
3874
struct rb_node *prev;
3875
struct btrfs_inode *entry;
3876
struct inode *inode;
3877
u64 objectid = 0;
3878
3879
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3880
3881
spin_lock(&root->inode_lock);
3882
again:
3883
node = root->inode_tree.rb_node;
3884
prev = NULL;
3885
while (node) {
3886
prev = node;
3887
entry = rb_entry(node, struct btrfs_inode, rb_node);
3888
3889
if (objectid < btrfs_ino(&entry->vfs_inode))
3890
node = node->rb_left;
3891
else if (objectid > btrfs_ino(&entry->vfs_inode))
3892
node = node->rb_right;
3893
else
3894
break;
3895
}
3896
if (!node) {
3897
while (prev) {
3898
entry = rb_entry(prev, struct btrfs_inode, rb_node);
3899
if (objectid <= btrfs_ino(&entry->vfs_inode)) {
3900
node = prev;
3901
break;
3902
}
3903
prev = rb_next(prev);
3904
}
3905
}
3906
while (node) {
3907
entry = rb_entry(node, struct btrfs_inode, rb_node);
3908
objectid = btrfs_ino(&entry->vfs_inode) + 1;
3909
inode = igrab(&entry->vfs_inode);
3910
if (inode) {
3911
spin_unlock(&root->inode_lock);
3912
if (atomic_read(&inode->i_count) > 1)
3913
d_prune_aliases(inode);
3914
/*
3915
* btrfs_drop_inode will have it removed from
3916
* the inode cache when its usage count
3917
* hits zero.
3918
*/
3919
iput(inode);
3920
cond_resched();
3921
spin_lock(&root->inode_lock);
3922
goto again;
3923
}
3924
3925
if (cond_resched_lock(&root->inode_lock))
3926
goto again;
3927
3928
node = rb_next(node);
3929
}
3930
spin_unlock(&root->inode_lock);
3931
return 0;
3932
}
3933
3934
static int btrfs_init_locked_inode(struct inode *inode, void *p)
3935
{
3936
struct btrfs_iget_args *args = p;
3937
inode->i_ino = args->ino;
3938
BTRFS_I(inode)->root = args->root;
3939
btrfs_set_inode_space_info(args->root, inode);
3940
return 0;
3941
}
3942
3943
static int btrfs_find_actor(struct inode *inode, void *opaque)
3944
{
3945
struct btrfs_iget_args *args = opaque;
3946
return args->ino == btrfs_ino(inode) &&
3947
args->root == BTRFS_I(inode)->root;
3948
}
3949
3950
static struct inode *btrfs_iget_locked(struct super_block *s,
3951
u64 objectid,
3952
struct btrfs_root *root)
3953
{
3954
struct inode *inode;
3955
struct btrfs_iget_args args;
3956
args.ino = objectid;
3957
args.root = root;
3958
3959
inode = iget5_locked(s, objectid, btrfs_find_actor,
3960
btrfs_init_locked_inode,
3961
(void *)&args);
3962
return inode;
3963
}
3964
3965
/* Get an inode object given its location and corresponding root.
3966
* Returns in *is_new if the inode was read from disk
3967
*/
3968
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3969
struct btrfs_root *root, int *new)
3970
{
3971
struct inode *inode;
3972
3973
inode = btrfs_iget_locked(s, location->objectid, root);
3974
if (!inode)
3975
return ERR_PTR(-ENOMEM);
3976
3977
if (inode->i_state & I_NEW) {
3978
BTRFS_I(inode)->root = root;
3979
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3980
btrfs_read_locked_inode(inode);
3981
inode_tree_add(inode);
3982
unlock_new_inode(inode);
3983
if (new)
3984
*new = 1;
3985
}
3986
3987
return inode;
3988
}
3989
3990
static struct inode *new_simple_dir(struct super_block *s,
3991
struct btrfs_key *key,
3992
struct btrfs_root *root)
3993
{
3994
struct inode *inode = new_inode(s);
3995
3996
if (!inode)
3997
return ERR_PTR(-ENOMEM);
3998
3999
BTRFS_I(inode)->root = root;
4000
memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4001
BTRFS_I(inode)->dummy_inode = 1;
4002
4003
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4004
inode->i_op = &simple_dir_inode_operations;
4005
inode->i_fop = &simple_dir_operations;
4006
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4007
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4008
4009
return inode;
4010
}
4011
4012
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4013
{
4014
struct inode *inode;
4015
struct btrfs_root *root = BTRFS_I(dir)->root;
4016
struct btrfs_root *sub_root = root;
4017
struct btrfs_key location;
4018
int index;
4019
int ret;
4020
4021
if (dentry->d_name.len > BTRFS_NAME_LEN)
4022
return ERR_PTR(-ENAMETOOLONG);
4023
4024
ret = btrfs_inode_by_name(dir, dentry, &location);
4025
4026
if (ret < 0)
4027
return ERR_PTR(ret);
4028
4029
if (location.objectid == 0)
4030
return NULL;
4031
4032
if (location.type == BTRFS_INODE_ITEM_KEY) {
4033
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4034
return inode;
4035
}
4036
4037
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4038
4039
index = srcu_read_lock(&root->fs_info->subvol_srcu);
4040
ret = fixup_tree_root_location(root, dir, dentry,
4041
&location, &sub_root);
4042
if (ret < 0) {
4043
if (ret != -ENOENT)
4044
inode = ERR_PTR(ret);
4045
else
4046
inode = new_simple_dir(dir->i_sb, &location, sub_root);
4047
} else {
4048
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4049
}
4050
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4051
4052
if (!IS_ERR(inode) && root != sub_root) {
4053
down_read(&root->fs_info->cleanup_work_sem);
4054
if (!(inode->i_sb->s_flags & MS_RDONLY))
4055
ret = btrfs_orphan_cleanup(sub_root);
4056
up_read(&root->fs_info->cleanup_work_sem);
4057
if (ret)
4058
inode = ERR_PTR(ret);
4059
}
4060
4061
return inode;
4062
}
4063
4064
static int btrfs_dentry_delete(const struct dentry *dentry)
4065
{
4066
struct btrfs_root *root;
4067
4068
if (!dentry->d_inode && !IS_ROOT(dentry))
4069
dentry = dentry->d_parent;
4070
4071
if (dentry->d_inode) {
4072
root = BTRFS_I(dentry->d_inode)->root;
4073
if (btrfs_root_refs(&root->root_item) == 0)
4074
return 1;
4075
}
4076
return 0;
4077
}
4078
4079
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4080
struct nameidata *nd)
4081
{
4082
struct inode *inode;
4083
4084
inode = btrfs_lookup_dentry(dir, dentry);
4085
if (IS_ERR(inode))
4086
return ERR_CAST(inode);
4087
4088
return d_splice_alias(inode, dentry);
4089
}
4090
4091
unsigned char btrfs_filetype_table[] = {
4092
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4093
};
4094
4095
static int btrfs_real_readdir(struct file *filp, void *dirent,
4096
filldir_t filldir)
4097
{
4098
struct inode *inode = filp->f_dentry->d_inode;
4099
struct btrfs_root *root = BTRFS_I(inode)->root;
4100
struct btrfs_item *item;
4101
struct btrfs_dir_item *di;
4102
struct btrfs_key key;
4103
struct btrfs_key found_key;
4104
struct btrfs_path *path;
4105
struct list_head ins_list;
4106
struct list_head del_list;
4107
int ret;
4108
struct extent_buffer *leaf;
4109
int slot;
4110
unsigned char d_type;
4111
int over = 0;
4112
u32 di_cur;
4113
u32 di_total;
4114
u32 di_len;
4115
int key_type = BTRFS_DIR_INDEX_KEY;
4116
char tmp_name[32];
4117
char *name_ptr;
4118
int name_len;
4119
int is_curr = 0; /* filp->f_pos points to the current index? */
4120
4121
/* FIXME, use a real flag for deciding about the key type */
4122
if (root->fs_info->tree_root == root)
4123
key_type = BTRFS_DIR_ITEM_KEY;
4124
4125
/* special case for "." */
4126
if (filp->f_pos == 0) {
4127
over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
4128
if (over)
4129
return 0;
4130
filp->f_pos = 1;
4131
}
4132
/* special case for .., just use the back ref */
4133
if (filp->f_pos == 1) {
4134
u64 pino = parent_ino(filp->f_path.dentry);
4135
over = filldir(dirent, "..", 2,
4136
2, pino, DT_DIR);
4137
if (over)
4138
return 0;
4139
filp->f_pos = 2;
4140
}
4141
path = btrfs_alloc_path();
4142
if (!path)
4143
return -ENOMEM;
4144
4145
path->reada = 1;
4146
4147
if (key_type == BTRFS_DIR_INDEX_KEY) {
4148
INIT_LIST_HEAD(&ins_list);
4149
INIT_LIST_HEAD(&del_list);
4150
btrfs_get_delayed_items(inode, &ins_list, &del_list);
4151
}
4152
4153
btrfs_set_key_type(&key, key_type);
4154
key.offset = filp->f_pos;
4155
key.objectid = btrfs_ino(inode);
4156
4157
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4158
if (ret < 0)
4159
goto err;
4160
4161
while (1) {
4162
leaf = path->nodes[0];
4163
slot = path->slots[0];
4164
if (slot >= btrfs_header_nritems(leaf)) {
4165
ret = btrfs_next_leaf(root, path);
4166
if (ret < 0)
4167
goto err;
4168
else if (ret > 0)
4169
break;
4170
continue;
4171
}
4172
4173
item = btrfs_item_nr(leaf, slot);
4174
btrfs_item_key_to_cpu(leaf, &found_key, slot);
4175
4176
if (found_key.objectid != key.objectid)
4177
break;
4178
if (btrfs_key_type(&found_key) != key_type)
4179
break;
4180
if (found_key.offset < filp->f_pos)
4181
goto next;
4182
if (key_type == BTRFS_DIR_INDEX_KEY &&
4183
btrfs_should_delete_dir_index(&del_list,
4184
found_key.offset))
4185
goto next;
4186
4187
filp->f_pos = found_key.offset;
4188
is_curr = 1;
4189
4190
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4191
di_cur = 0;
4192
di_total = btrfs_item_size(leaf, item);
4193
4194
while (di_cur < di_total) {
4195
struct btrfs_key location;
4196
4197
if (verify_dir_item(root, leaf, di))
4198
break;
4199
4200
name_len = btrfs_dir_name_len(leaf, di);
4201
if (name_len <= sizeof(tmp_name)) {
4202
name_ptr = tmp_name;
4203
} else {
4204
name_ptr = kmalloc(name_len, GFP_NOFS);
4205
if (!name_ptr) {
4206
ret = -ENOMEM;
4207
goto err;
4208
}
4209
}
4210
read_extent_buffer(leaf, name_ptr,
4211
(unsigned long)(di + 1), name_len);
4212
4213
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4214
btrfs_dir_item_key_to_cpu(leaf, di, &location);
4215
4216
/* is this a reference to our own snapshot? If so
4217
* skip it
4218
*/
4219
if (location.type == BTRFS_ROOT_ITEM_KEY &&
4220
location.objectid == root->root_key.objectid) {
4221
over = 0;
4222
goto skip;
4223
}
4224
over = filldir(dirent, name_ptr, name_len,
4225
found_key.offset, location.objectid,
4226
d_type);
4227
4228
skip:
4229
if (name_ptr != tmp_name)
4230
kfree(name_ptr);
4231
4232
if (over)
4233
goto nopos;
4234
di_len = btrfs_dir_name_len(leaf, di) +
4235
btrfs_dir_data_len(leaf, di) + sizeof(*di);
4236
di_cur += di_len;
4237
di = (struct btrfs_dir_item *)((char *)di + di_len);
4238
}
4239
next:
4240
path->slots[0]++;
4241
}
4242
4243
if (key_type == BTRFS_DIR_INDEX_KEY) {
4244
if (is_curr)
4245
filp->f_pos++;
4246
ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4247
&ins_list);
4248
if (ret)
4249
goto nopos;
4250
}
4251
4252
/* Reached end of directory/root. Bump pos past the last item. */
4253
if (key_type == BTRFS_DIR_INDEX_KEY)
4254
/*
4255
* 32-bit glibc will use getdents64, but then strtol -
4256
* so the last number we can serve is this.
4257
*/
4258
filp->f_pos = 0x7fffffff;
4259
else
4260
filp->f_pos++;
4261
nopos:
4262
ret = 0;
4263
err:
4264
if (key_type == BTRFS_DIR_INDEX_KEY)
4265
btrfs_put_delayed_items(&ins_list, &del_list);
4266
btrfs_free_path(path);
4267
return ret;
4268
}
4269
4270
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4271
{
4272
struct btrfs_root *root = BTRFS_I(inode)->root;
4273
struct btrfs_trans_handle *trans;
4274
int ret = 0;
4275
bool nolock = false;
4276
4277
if (BTRFS_I(inode)->dummy_inode)
4278
return 0;
4279
4280
if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
4281
nolock = true;
4282
4283
if (wbc->sync_mode == WB_SYNC_ALL) {
4284
if (nolock)
4285
trans = btrfs_join_transaction_nolock(root);
4286
else
4287
trans = btrfs_join_transaction(root);
4288
if (IS_ERR(trans))
4289
return PTR_ERR(trans);
4290
if (nolock)
4291
ret = btrfs_end_transaction_nolock(trans, root);
4292
else
4293
ret = btrfs_commit_transaction(trans, root);
4294
}
4295
return ret;
4296
}
4297
4298
/*
4299
* This is somewhat expensive, updating the tree every time the
4300
* inode changes. But, it is most likely to find the inode in cache.
4301
* FIXME, needs more benchmarking...there are no reasons other than performance
4302
* to keep or drop this code.
4303
*/
4304
void btrfs_dirty_inode(struct inode *inode, int flags)
4305
{
4306
struct btrfs_root *root = BTRFS_I(inode)->root;
4307
struct btrfs_trans_handle *trans;
4308
int ret;
4309
4310
if (BTRFS_I(inode)->dummy_inode)
4311
return;
4312
4313
trans = btrfs_join_transaction(root);
4314
BUG_ON(IS_ERR(trans));
4315
4316
ret = btrfs_update_inode(trans, root, inode);
4317
if (ret && ret == -ENOSPC) {
4318
/* whoops, lets try again with the full transaction */
4319
btrfs_end_transaction(trans, root);
4320
trans = btrfs_start_transaction(root, 1);
4321
if (IS_ERR(trans)) {
4322
printk_ratelimited(KERN_ERR "btrfs: fail to "
4323
"dirty inode %llu error %ld\n",
4324
(unsigned long long)btrfs_ino(inode),
4325
PTR_ERR(trans));
4326
return;
4327
}
4328
4329
ret = btrfs_update_inode(trans, root, inode);
4330
if (ret) {
4331
printk_ratelimited(KERN_ERR "btrfs: fail to "
4332
"dirty inode %llu error %d\n",
4333
(unsigned long long)btrfs_ino(inode),
4334
ret);
4335
}
4336
}
4337
btrfs_end_transaction(trans, root);
4338
if (BTRFS_I(inode)->delayed_node)
4339
btrfs_balance_delayed_items(root);
4340
}
4341
4342
/*
4343
* find the highest existing sequence number in a directory
4344
* and then set the in-memory index_cnt variable to reflect
4345
* free sequence numbers
4346
*/
4347
static int btrfs_set_inode_index_count(struct inode *inode)
4348
{
4349
struct btrfs_root *root = BTRFS_I(inode)->root;
4350
struct btrfs_key key, found_key;
4351
struct btrfs_path *path;
4352
struct extent_buffer *leaf;
4353
int ret;
4354
4355
key.objectid = btrfs_ino(inode);
4356
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4357
key.offset = (u64)-1;
4358
4359
path = btrfs_alloc_path();
4360
if (!path)
4361
return -ENOMEM;
4362
4363
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4364
if (ret < 0)
4365
goto out;
4366
/* FIXME: we should be able to handle this */
4367
if (ret == 0)
4368
goto out;
4369
ret = 0;
4370
4371
/*
4372
* MAGIC NUMBER EXPLANATION:
4373
* since we search a directory based on f_pos we have to start at 2
4374
* since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4375
* else has to start at 2
4376
*/
4377
if (path->slots[0] == 0) {
4378
BTRFS_I(inode)->index_cnt = 2;
4379
goto out;
4380
}
4381
4382
path->slots[0]--;
4383
4384
leaf = path->nodes[0];
4385
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4386
4387
if (found_key.objectid != btrfs_ino(inode) ||
4388
btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4389
BTRFS_I(inode)->index_cnt = 2;
4390
goto out;
4391
}
4392
4393
BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4394
out:
4395
btrfs_free_path(path);
4396
return ret;
4397
}
4398
4399
/*
4400
* helper to find a free sequence number in a given directory. This current
4401
* code is very simple, later versions will do smarter things in the btree
4402
*/
4403
int btrfs_set_inode_index(struct inode *dir, u64 *index)
4404
{
4405
int ret = 0;
4406
4407
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4408
ret = btrfs_inode_delayed_dir_index_count(dir);
4409
if (ret) {
4410
ret = btrfs_set_inode_index_count(dir);
4411
if (ret)
4412
return ret;
4413
}
4414
}
4415
4416
*index = BTRFS_I(dir)->index_cnt;
4417
BTRFS_I(dir)->index_cnt++;
4418
4419
return ret;
4420
}
4421
4422
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4423
struct btrfs_root *root,
4424
struct inode *dir,
4425
const char *name, int name_len,
4426
u64 ref_objectid, u64 objectid, int mode,
4427
u64 *index)
4428
{
4429
struct inode *inode;
4430
struct btrfs_inode_item *inode_item;
4431
struct btrfs_key *location;
4432
struct btrfs_path *path;
4433
struct btrfs_inode_ref *ref;
4434
struct btrfs_key key[2];
4435
u32 sizes[2];
4436
unsigned long ptr;
4437
int ret;
4438
int owner;
4439
4440
path = btrfs_alloc_path();
4441
BUG_ON(!path);
4442
4443
inode = new_inode(root->fs_info->sb);
4444
if (!inode) {
4445
btrfs_free_path(path);
4446
return ERR_PTR(-ENOMEM);
4447
}
4448
4449
/*
4450
* we have to initialize this early, so we can reclaim the inode
4451
* number if we fail afterwards in this function.
4452
*/
4453
inode->i_ino = objectid;
4454
4455
if (dir) {
4456
trace_btrfs_inode_request(dir);
4457
4458
ret = btrfs_set_inode_index(dir, index);
4459
if (ret) {
4460
btrfs_free_path(path);
4461
iput(inode);
4462
return ERR_PTR(ret);
4463
}
4464
}
4465
/*
4466
* index_cnt is ignored for everything but a dir,
4467
* btrfs_get_inode_index_count has an explanation for the magic
4468
* number
4469
*/
4470
BTRFS_I(inode)->index_cnt = 2;
4471
BTRFS_I(inode)->root = root;
4472
BTRFS_I(inode)->generation = trans->transid;
4473
inode->i_generation = BTRFS_I(inode)->generation;
4474
btrfs_set_inode_space_info(root, inode);
4475
4476
if (mode & S_IFDIR)
4477
owner = 0;
4478
else
4479
owner = 1;
4480
4481
key[0].objectid = objectid;
4482
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4483
key[0].offset = 0;
4484
4485
key[1].objectid = objectid;
4486
btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4487
key[1].offset = ref_objectid;
4488
4489
sizes[0] = sizeof(struct btrfs_inode_item);
4490
sizes[1] = name_len + sizeof(*ref);
4491
4492
path->leave_spinning = 1;
4493
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4494
if (ret != 0)
4495
goto fail;
4496
4497
inode_init_owner(inode, dir, mode);
4498
inode_set_bytes(inode, 0);
4499
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4500
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4501
struct btrfs_inode_item);
4502
fill_inode_item(trans, path->nodes[0], inode_item, inode);
4503
4504
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4505
struct btrfs_inode_ref);
4506
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4507
btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4508
ptr = (unsigned long)(ref + 1);
4509
write_extent_buffer(path->nodes[0], name, ptr, name_len);
4510
4511
btrfs_mark_buffer_dirty(path->nodes[0]);
4512
btrfs_free_path(path);
4513
4514
location = &BTRFS_I(inode)->location;
4515
location->objectid = objectid;
4516
location->offset = 0;
4517
btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4518
4519
btrfs_inherit_iflags(inode, dir);
4520
4521
if ((mode & S_IFREG)) {
4522
if (btrfs_test_opt(root, NODATASUM))
4523
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4524
if (btrfs_test_opt(root, NODATACOW) ||
4525
(BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4526
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4527
}
4528
4529
insert_inode_hash(inode);
4530
inode_tree_add(inode);
4531
4532
trace_btrfs_inode_new(inode);
4533
btrfs_set_inode_last_trans(trans, inode);
4534
4535
return inode;
4536
fail:
4537
if (dir)
4538
BTRFS_I(dir)->index_cnt--;
4539
btrfs_free_path(path);
4540
iput(inode);
4541
return ERR_PTR(ret);
4542
}
4543
4544
static inline u8 btrfs_inode_type(struct inode *inode)
4545
{
4546
return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4547
}
4548
4549
/*
4550
* utility function to add 'inode' into 'parent_inode' with
4551
* a give name and a given sequence number.
4552
* if 'add_backref' is true, also insert a backref from the
4553
* inode to the parent directory.
4554
*/
4555
int btrfs_add_link(struct btrfs_trans_handle *trans,
4556
struct inode *parent_inode, struct inode *inode,
4557
const char *name, int name_len, int add_backref, u64 index)
4558
{
4559
int ret = 0;
4560
struct btrfs_key key;
4561
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4562
u64 ino = btrfs_ino(inode);
4563
u64 parent_ino = btrfs_ino(parent_inode);
4564
4565
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4566
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4567
} else {
4568
key.objectid = ino;
4569
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4570
key.offset = 0;
4571
}
4572
4573
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4574
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4575
key.objectid, root->root_key.objectid,
4576
parent_ino, index, name, name_len);
4577
} else if (add_backref) {
4578
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4579
parent_ino, index);
4580
}
4581
4582
if (ret == 0) {
4583
ret = btrfs_insert_dir_item(trans, root, name, name_len,
4584
parent_inode, &key,
4585
btrfs_inode_type(inode), index);
4586
BUG_ON(ret);
4587
4588
btrfs_i_size_write(parent_inode, parent_inode->i_size +
4589
name_len * 2);
4590
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4591
ret = btrfs_update_inode(trans, root, parent_inode);
4592
}
4593
return ret;
4594
}
4595
4596
static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4597
struct inode *dir, struct dentry *dentry,
4598
struct inode *inode, int backref, u64 index)
4599
{
4600
int err = btrfs_add_link(trans, dir, inode,
4601
dentry->d_name.name, dentry->d_name.len,
4602
backref, index);
4603
if (!err) {
4604
d_instantiate(dentry, inode);
4605
return 0;
4606
}
4607
if (err > 0)
4608
err = -EEXIST;
4609
return err;
4610
}
4611
4612
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4613
int mode, dev_t rdev)
4614
{
4615
struct btrfs_trans_handle *trans;
4616
struct btrfs_root *root = BTRFS_I(dir)->root;
4617
struct inode *inode = NULL;
4618
int err;
4619
int drop_inode = 0;
4620
u64 objectid;
4621
unsigned long nr = 0;
4622
u64 index = 0;
4623
4624
if (!new_valid_dev(rdev))
4625
return -EINVAL;
4626
4627
/*
4628
* 2 for inode item and ref
4629
* 2 for dir items
4630
* 1 for xattr if selinux is on
4631
*/
4632
trans = btrfs_start_transaction(root, 5);
4633
if (IS_ERR(trans))
4634
return PTR_ERR(trans);
4635
4636
err = btrfs_find_free_ino(root, &objectid);
4637
if (err)
4638
goto out_unlock;
4639
4640
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4641
dentry->d_name.len, btrfs_ino(dir), objectid,
4642
mode, &index);
4643
if (IS_ERR(inode)) {
4644
err = PTR_ERR(inode);
4645
goto out_unlock;
4646
}
4647
4648
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4649
if (err) {
4650
drop_inode = 1;
4651
goto out_unlock;
4652
}
4653
4654
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4655
if (err)
4656
drop_inode = 1;
4657
else {
4658
inode->i_op = &btrfs_special_inode_operations;
4659
init_special_inode(inode, inode->i_mode, rdev);
4660
btrfs_update_inode(trans, root, inode);
4661
}
4662
out_unlock:
4663
nr = trans->blocks_used;
4664
btrfs_end_transaction_throttle(trans, root);
4665
btrfs_btree_balance_dirty(root, nr);
4666
if (drop_inode) {
4667
inode_dec_link_count(inode);
4668
iput(inode);
4669
}
4670
return err;
4671
}
4672
4673
static int btrfs_create(struct inode *dir, struct dentry *dentry,
4674
int mode, struct nameidata *nd)
4675
{
4676
struct btrfs_trans_handle *trans;
4677
struct btrfs_root *root = BTRFS_I(dir)->root;
4678
struct inode *inode = NULL;
4679
int drop_inode = 0;
4680
int err;
4681
unsigned long nr = 0;
4682
u64 objectid;
4683
u64 index = 0;
4684
4685
/*
4686
* 2 for inode item and ref
4687
* 2 for dir items
4688
* 1 for xattr if selinux is on
4689
*/
4690
trans = btrfs_start_transaction(root, 5);
4691
if (IS_ERR(trans))
4692
return PTR_ERR(trans);
4693
4694
err = btrfs_find_free_ino(root, &objectid);
4695
if (err)
4696
goto out_unlock;
4697
4698
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4699
dentry->d_name.len, btrfs_ino(dir), objectid,
4700
mode, &index);
4701
if (IS_ERR(inode)) {
4702
err = PTR_ERR(inode);
4703
goto out_unlock;
4704
}
4705
4706
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4707
if (err) {
4708
drop_inode = 1;
4709
goto out_unlock;
4710
}
4711
4712
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4713
if (err)
4714
drop_inode = 1;
4715
else {
4716
inode->i_mapping->a_ops = &btrfs_aops;
4717
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4718
inode->i_fop = &btrfs_file_operations;
4719
inode->i_op = &btrfs_file_inode_operations;
4720
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4721
}
4722
out_unlock:
4723
nr = trans->blocks_used;
4724
btrfs_end_transaction_throttle(trans, root);
4725
if (drop_inode) {
4726
inode_dec_link_count(inode);
4727
iput(inode);
4728
}
4729
btrfs_btree_balance_dirty(root, nr);
4730
return err;
4731
}
4732
4733
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4734
struct dentry *dentry)
4735
{
4736
struct btrfs_trans_handle *trans;
4737
struct btrfs_root *root = BTRFS_I(dir)->root;
4738
struct inode *inode = old_dentry->d_inode;
4739
u64 index;
4740
unsigned long nr = 0;
4741
int err;
4742
int drop_inode = 0;
4743
4744
/* do not allow sys_link's with other subvols of the same device */
4745
if (root->objectid != BTRFS_I(inode)->root->objectid)
4746
return -EXDEV;
4747
4748
if (inode->i_nlink == ~0U)
4749
return -EMLINK;
4750
4751
err = btrfs_set_inode_index(dir, &index);
4752
if (err)
4753
goto fail;
4754
4755
/*
4756
* 2 items for inode and inode ref
4757
* 2 items for dir items
4758
* 1 item for parent inode
4759
*/
4760
trans = btrfs_start_transaction(root, 5);
4761
if (IS_ERR(trans)) {
4762
err = PTR_ERR(trans);
4763
goto fail;
4764
}
4765
4766
btrfs_inc_nlink(inode);
4767
inode->i_ctime = CURRENT_TIME;
4768
ihold(inode);
4769
4770
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4771
4772
if (err) {
4773
drop_inode = 1;
4774
} else {
4775
struct dentry *parent = dget_parent(dentry);
4776
err = btrfs_update_inode(trans, root, inode);
4777
BUG_ON(err);
4778
btrfs_log_new_name(trans, inode, NULL, parent);
4779
dput(parent);
4780
}
4781
4782
nr = trans->blocks_used;
4783
btrfs_end_transaction_throttle(trans, root);
4784
fail:
4785
if (drop_inode) {
4786
inode_dec_link_count(inode);
4787
iput(inode);
4788
}
4789
btrfs_btree_balance_dirty(root, nr);
4790
return err;
4791
}
4792
4793
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4794
{
4795
struct inode *inode = NULL;
4796
struct btrfs_trans_handle *trans;
4797
struct btrfs_root *root = BTRFS_I(dir)->root;
4798
int err = 0;
4799
int drop_on_err = 0;
4800
u64 objectid = 0;
4801
u64 index = 0;
4802
unsigned long nr = 1;
4803
4804
/*
4805
* 2 items for inode and ref
4806
* 2 items for dir items
4807
* 1 for xattr if selinux is on
4808
*/
4809
trans = btrfs_start_transaction(root, 5);
4810
if (IS_ERR(trans))
4811
return PTR_ERR(trans);
4812
4813
err = btrfs_find_free_ino(root, &objectid);
4814
if (err)
4815
goto out_fail;
4816
4817
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4818
dentry->d_name.len, btrfs_ino(dir), objectid,
4819
S_IFDIR | mode, &index);
4820
if (IS_ERR(inode)) {
4821
err = PTR_ERR(inode);
4822
goto out_fail;
4823
}
4824
4825
drop_on_err = 1;
4826
4827
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4828
if (err)
4829
goto out_fail;
4830
4831
inode->i_op = &btrfs_dir_inode_operations;
4832
inode->i_fop = &btrfs_dir_file_operations;
4833
4834
btrfs_i_size_write(inode, 0);
4835
err = btrfs_update_inode(trans, root, inode);
4836
if (err)
4837
goto out_fail;
4838
4839
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
4840
dentry->d_name.len, 0, index);
4841
if (err)
4842
goto out_fail;
4843
4844
d_instantiate(dentry, inode);
4845
drop_on_err = 0;
4846
4847
out_fail:
4848
nr = trans->blocks_used;
4849
btrfs_end_transaction_throttle(trans, root);
4850
if (drop_on_err)
4851
iput(inode);
4852
btrfs_btree_balance_dirty(root, nr);
4853
return err;
4854
}
4855
4856
/* helper for btfs_get_extent. Given an existing extent in the tree,
4857
* and an extent that you want to insert, deal with overlap and insert
4858
* the new extent into the tree.
4859
*/
4860
static int merge_extent_mapping(struct extent_map_tree *em_tree,
4861
struct extent_map *existing,
4862
struct extent_map *em,
4863
u64 map_start, u64 map_len)
4864
{
4865
u64 start_diff;
4866
4867
BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4868
start_diff = map_start - em->start;
4869
em->start = map_start;
4870
em->len = map_len;
4871
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4872
!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4873
em->block_start += start_diff;
4874
em->block_len -= start_diff;
4875
}
4876
return add_extent_mapping(em_tree, em);
4877
}
4878
4879
static noinline int uncompress_inline(struct btrfs_path *path,
4880
struct inode *inode, struct page *page,
4881
size_t pg_offset, u64 extent_offset,
4882
struct btrfs_file_extent_item *item)
4883
{
4884
int ret;
4885
struct extent_buffer *leaf = path->nodes[0];
4886
char *tmp;
4887
size_t max_size;
4888
unsigned long inline_size;
4889
unsigned long ptr;
4890
int compress_type;
4891
4892
WARN_ON(pg_offset != 0);
4893
compress_type = btrfs_file_extent_compression(leaf, item);
4894
max_size = btrfs_file_extent_ram_bytes(leaf, item);
4895
inline_size = btrfs_file_extent_inline_item_len(leaf,
4896
btrfs_item_nr(leaf, path->slots[0]));
4897
tmp = kmalloc(inline_size, GFP_NOFS);
4898
if (!tmp)
4899
return -ENOMEM;
4900
ptr = btrfs_file_extent_inline_start(item);
4901
4902
read_extent_buffer(leaf, tmp, ptr, inline_size);
4903
4904
max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4905
ret = btrfs_decompress(compress_type, tmp, page,
4906
extent_offset, inline_size, max_size);
4907
if (ret) {
4908
char *kaddr = kmap_atomic(page, KM_USER0);
4909
unsigned long copy_size = min_t(u64,
4910
PAGE_CACHE_SIZE - pg_offset,
4911
max_size - extent_offset);
4912
memset(kaddr + pg_offset, 0, copy_size);
4913
kunmap_atomic(kaddr, KM_USER0);
4914
}
4915
kfree(tmp);
4916
return 0;
4917
}
4918
4919
/*
4920
* a bit scary, this does extent mapping from logical file offset to the disk.
4921
* the ugly parts come from merging extents from the disk with the in-ram
4922
* representation. This gets more complex because of the data=ordered code,
4923
* where the in-ram extents might be locked pending data=ordered completion.
4924
*
4925
* This also copies inline extents directly into the page.
4926
*/
4927
4928
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4929
size_t pg_offset, u64 start, u64 len,
4930
int create)
4931
{
4932
int ret;
4933
int err = 0;
4934
u64 bytenr;
4935
u64 extent_start = 0;
4936
u64 extent_end = 0;
4937
u64 objectid = btrfs_ino(inode);
4938
u32 found_type;
4939
struct btrfs_path *path = NULL;
4940
struct btrfs_root *root = BTRFS_I(inode)->root;
4941
struct btrfs_file_extent_item *item;
4942
struct extent_buffer *leaf;
4943
struct btrfs_key found_key;
4944
struct extent_map *em = NULL;
4945
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4946
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4947
struct btrfs_trans_handle *trans = NULL;
4948
int compress_type;
4949
4950
again:
4951
read_lock(&em_tree->lock);
4952
em = lookup_extent_mapping(em_tree, start, len);
4953
if (em)
4954
em->bdev = root->fs_info->fs_devices->latest_bdev;
4955
read_unlock(&em_tree->lock);
4956
4957
if (em) {
4958
if (em->start > start || em->start + em->len <= start)
4959
free_extent_map(em);
4960
else if (em->block_start == EXTENT_MAP_INLINE && page)
4961
free_extent_map(em);
4962
else
4963
goto out;
4964
}
4965
em = alloc_extent_map();
4966
if (!em) {
4967
err = -ENOMEM;
4968
goto out;
4969
}
4970
em->bdev = root->fs_info->fs_devices->latest_bdev;
4971
em->start = EXTENT_MAP_HOLE;
4972
em->orig_start = EXTENT_MAP_HOLE;
4973
em->len = (u64)-1;
4974
em->block_len = (u64)-1;
4975
4976
if (!path) {
4977
path = btrfs_alloc_path();
4978
if (!path) {
4979
err = -ENOMEM;
4980
goto out;
4981
}
4982
/*
4983
* Chances are we'll be called again, so go ahead and do
4984
* readahead
4985
*/
4986
path->reada = 1;
4987
}
4988
4989
ret = btrfs_lookup_file_extent(trans, root, path,
4990
objectid, start, trans != NULL);
4991
if (ret < 0) {
4992
err = ret;
4993
goto out;
4994
}
4995
4996
if (ret != 0) {
4997
if (path->slots[0] == 0)
4998
goto not_found;
4999
path->slots[0]--;
5000
}
5001
5002
leaf = path->nodes[0];
5003
item = btrfs_item_ptr(leaf, path->slots[0],
5004
struct btrfs_file_extent_item);
5005
/* are we inside the extent that was found? */
5006
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5007
found_type = btrfs_key_type(&found_key);
5008
if (found_key.objectid != objectid ||
5009
found_type != BTRFS_EXTENT_DATA_KEY) {
5010
goto not_found;
5011
}
5012
5013
found_type = btrfs_file_extent_type(leaf, item);
5014
extent_start = found_key.offset;
5015
compress_type = btrfs_file_extent_compression(leaf, item);
5016
if (found_type == BTRFS_FILE_EXTENT_REG ||
5017
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5018
extent_end = extent_start +
5019
btrfs_file_extent_num_bytes(leaf, item);
5020
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5021
size_t size;
5022
size = btrfs_file_extent_inline_len(leaf, item);
5023
extent_end = (extent_start + size + root->sectorsize - 1) &
5024
~((u64)root->sectorsize - 1);
5025
}
5026
5027
if (start >= extent_end) {
5028
path->slots[0]++;
5029
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5030
ret = btrfs_next_leaf(root, path);
5031
if (ret < 0) {
5032
err = ret;
5033
goto out;
5034
}
5035
if (ret > 0)
5036
goto not_found;
5037
leaf = path->nodes[0];
5038
}
5039
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5040
if (found_key.objectid != objectid ||
5041
found_key.type != BTRFS_EXTENT_DATA_KEY)
5042
goto not_found;
5043
if (start + len <= found_key.offset)
5044
goto not_found;
5045
em->start = start;
5046
em->len = found_key.offset - start;
5047
goto not_found_em;
5048
}
5049
5050
if (found_type == BTRFS_FILE_EXTENT_REG ||
5051
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5052
em->start = extent_start;
5053
em->len = extent_end - extent_start;
5054
em->orig_start = extent_start -
5055
btrfs_file_extent_offset(leaf, item);
5056
bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5057
if (bytenr == 0) {
5058
em->block_start = EXTENT_MAP_HOLE;
5059
goto insert;
5060
}
5061
if (compress_type != BTRFS_COMPRESS_NONE) {
5062
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5063
em->compress_type = compress_type;
5064
em->block_start = bytenr;
5065
em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5066
item);
5067
} else {
5068
bytenr += btrfs_file_extent_offset(leaf, item);
5069
em->block_start = bytenr;
5070
em->block_len = em->len;
5071
if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5072
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5073
}
5074
goto insert;
5075
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5076
unsigned long ptr;
5077
char *map;
5078
size_t size;
5079
size_t extent_offset;
5080
size_t copy_size;
5081
5082
em->block_start = EXTENT_MAP_INLINE;
5083
if (!page || create) {
5084
em->start = extent_start;
5085
em->len = extent_end - extent_start;
5086
goto out;
5087
}
5088
5089
size = btrfs_file_extent_inline_len(leaf, item);
5090
extent_offset = page_offset(page) + pg_offset - extent_start;
5091
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5092
size - extent_offset);
5093
em->start = extent_start + extent_offset;
5094
em->len = (copy_size + root->sectorsize - 1) &
5095
~((u64)root->sectorsize - 1);
5096
em->orig_start = EXTENT_MAP_INLINE;
5097
if (compress_type) {
5098
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5099
em->compress_type = compress_type;
5100
}
5101
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5102
if (create == 0 && !PageUptodate(page)) {
5103
if (btrfs_file_extent_compression(leaf, item) !=
5104
BTRFS_COMPRESS_NONE) {
5105
ret = uncompress_inline(path, inode, page,
5106
pg_offset,
5107
extent_offset, item);
5108
BUG_ON(ret);
5109
} else {
5110
map = kmap(page);
5111
read_extent_buffer(leaf, map + pg_offset, ptr,
5112
copy_size);
5113
if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5114
memset(map + pg_offset + copy_size, 0,
5115
PAGE_CACHE_SIZE - pg_offset -
5116
copy_size);
5117
}
5118
kunmap(page);
5119
}
5120
flush_dcache_page(page);
5121
} else if (create && PageUptodate(page)) {
5122
WARN_ON(1);
5123
if (!trans) {
5124
kunmap(page);
5125
free_extent_map(em);
5126
em = NULL;
5127
5128
btrfs_release_path(path);
5129
trans = btrfs_join_transaction(root);
5130
5131
if (IS_ERR(trans))
5132
return ERR_CAST(trans);
5133
goto again;
5134
}
5135
map = kmap(page);
5136
write_extent_buffer(leaf, map + pg_offset, ptr,
5137
copy_size);
5138
kunmap(page);
5139
btrfs_mark_buffer_dirty(leaf);
5140
}
5141
set_extent_uptodate(io_tree, em->start,
5142
extent_map_end(em) - 1, NULL, GFP_NOFS);
5143
goto insert;
5144
} else {
5145
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5146
WARN_ON(1);
5147
}
5148
not_found:
5149
em->start = start;
5150
em->len = len;
5151
not_found_em:
5152
em->block_start = EXTENT_MAP_HOLE;
5153
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5154
insert:
5155
btrfs_release_path(path);
5156
if (em->start > start || extent_map_end(em) <= start) {
5157
printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5158
"[%llu %llu]\n", (unsigned long long)em->start,
5159
(unsigned long long)em->len,
5160
(unsigned long long)start,
5161
(unsigned long long)len);
5162
err = -EIO;
5163
goto out;
5164
}
5165
5166
err = 0;
5167
write_lock(&em_tree->lock);
5168
ret = add_extent_mapping(em_tree, em);
5169
/* it is possible that someone inserted the extent into the tree
5170
* while we had the lock dropped. It is also possible that
5171
* an overlapping map exists in the tree
5172
*/
5173
if (ret == -EEXIST) {
5174
struct extent_map *existing;
5175
5176
ret = 0;
5177
5178
existing = lookup_extent_mapping(em_tree, start, len);
5179
if (existing && (existing->start > start ||
5180
existing->start + existing->len <= start)) {
5181
free_extent_map(existing);
5182
existing = NULL;
5183
}
5184
if (!existing) {
5185
existing = lookup_extent_mapping(em_tree, em->start,
5186
em->len);
5187
if (existing) {
5188
err = merge_extent_mapping(em_tree, existing,
5189
em, start,
5190
root->sectorsize);
5191
free_extent_map(existing);
5192
if (err) {
5193
free_extent_map(em);
5194
em = NULL;
5195
}
5196
} else {
5197
err = -EIO;
5198
free_extent_map(em);
5199
em = NULL;
5200
}
5201
} else {
5202
free_extent_map(em);
5203
em = existing;
5204
err = 0;
5205
}
5206
}
5207
write_unlock(&em_tree->lock);
5208
out:
5209
5210
trace_btrfs_get_extent(root, em);
5211
5212
if (path)
5213
btrfs_free_path(path);
5214
if (trans) {
5215
ret = btrfs_end_transaction(trans, root);
5216
if (!err)
5217
err = ret;
5218
}
5219
if (err) {
5220
free_extent_map(em);
5221
return ERR_PTR(err);
5222
}
5223
return em;
5224
}
5225
5226
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5227
size_t pg_offset, u64 start, u64 len,
5228
int create)
5229
{
5230
struct extent_map *em;
5231
struct extent_map *hole_em = NULL;
5232
u64 range_start = start;
5233
u64 end;
5234
u64 found;
5235
u64 found_end;
5236
int err = 0;
5237
5238
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5239
if (IS_ERR(em))
5240
return em;
5241
if (em) {
5242
/*
5243
* if our em maps to a hole, there might
5244
* actually be delalloc bytes behind it
5245
*/
5246
if (em->block_start != EXTENT_MAP_HOLE)
5247
return em;
5248
else
5249
hole_em = em;
5250
}
5251
5252
/* check to see if we've wrapped (len == -1 or similar) */
5253
end = start + len;
5254
if (end < start)
5255
end = (u64)-1;
5256
else
5257
end -= 1;
5258
5259
em = NULL;
5260
5261
/* ok, we didn't find anything, lets look for delalloc */
5262
found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5263
end, len, EXTENT_DELALLOC, 1);
5264
found_end = range_start + found;
5265
if (found_end < range_start)
5266
found_end = (u64)-1;
5267
5268
/*
5269
* we didn't find anything useful, return
5270
* the original results from get_extent()
5271
*/
5272
if (range_start > end || found_end <= start) {
5273
em = hole_em;
5274
hole_em = NULL;
5275
goto out;
5276
}
5277
5278
/* adjust the range_start to make sure it doesn't
5279
* go backwards from the start they passed in
5280
*/
5281
range_start = max(start,range_start);
5282
found = found_end - range_start;
5283
5284
if (found > 0) {
5285
u64 hole_start = start;
5286
u64 hole_len = len;
5287
5288
em = alloc_extent_map();
5289
if (!em) {
5290
err = -ENOMEM;
5291
goto out;
5292
}
5293
/*
5294
* when btrfs_get_extent can't find anything it
5295
* returns one huge hole
5296
*
5297
* make sure what it found really fits our range, and
5298
* adjust to make sure it is based on the start from
5299
* the caller
5300
*/
5301
if (hole_em) {
5302
u64 calc_end = extent_map_end(hole_em);
5303
5304
if (calc_end <= start || (hole_em->start > end)) {
5305
free_extent_map(hole_em);
5306
hole_em = NULL;
5307
} else {
5308
hole_start = max(hole_em->start, start);
5309
hole_len = calc_end - hole_start;
5310
}
5311
}
5312
em->bdev = NULL;
5313
if (hole_em && range_start > hole_start) {
5314
/* our hole starts before our delalloc, so we
5315
* have to return just the parts of the hole
5316
* that go until the delalloc starts
5317
*/
5318
em->len = min(hole_len,
5319
range_start - hole_start);
5320
em->start = hole_start;
5321
em->orig_start = hole_start;
5322
/*
5323
* don't adjust block start at all,
5324
* it is fixed at EXTENT_MAP_HOLE
5325
*/
5326
em->block_start = hole_em->block_start;
5327
em->block_len = hole_len;
5328
} else {
5329
em->start = range_start;
5330
em->len = found;
5331
em->orig_start = range_start;
5332
em->block_start = EXTENT_MAP_DELALLOC;
5333
em->block_len = found;
5334
}
5335
} else if (hole_em) {
5336
return hole_em;
5337
}
5338
out:
5339
5340
free_extent_map(hole_em);
5341
if (err) {
5342
free_extent_map(em);
5343
return ERR_PTR(err);
5344
}
5345
return em;
5346
}
5347
5348
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5349
struct extent_map *em,
5350
u64 start, u64 len)
5351
{
5352
struct btrfs_root *root = BTRFS_I(inode)->root;
5353
struct btrfs_trans_handle *trans;
5354
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5355
struct btrfs_key ins;
5356
u64 alloc_hint;
5357
int ret;
5358
bool insert = false;
5359
5360
/*
5361
* Ok if the extent map we looked up is a hole and is for the exact
5362
* range we want, there is no reason to allocate a new one, however if
5363
* it is not right then we need to free this one and drop the cache for
5364
* our range.
5365
*/
5366
if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5367
em->len != len) {
5368
free_extent_map(em);
5369
em = NULL;
5370
insert = true;
5371
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5372
}
5373
5374
trans = btrfs_join_transaction(root);
5375
if (IS_ERR(trans))
5376
return ERR_CAST(trans);
5377
5378
if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5379
btrfs_add_inode_defrag(trans, inode);
5380
5381
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5382
5383
alloc_hint = get_extent_allocation_hint(inode, start, len);
5384
ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5385
alloc_hint, (u64)-1, &ins, 1);
5386
if (ret) {
5387
em = ERR_PTR(ret);
5388
goto out;
5389
}
5390
5391
if (!em) {
5392
em = alloc_extent_map();
5393
if (!em) {
5394
em = ERR_PTR(-ENOMEM);
5395
goto out;
5396
}
5397
}
5398
5399
em->start = start;
5400
em->orig_start = em->start;
5401
em->len = ins.offset;
5402
5403
em->block_start = ins.objectid;
5404
em->block_len = ins.offset;
5405
em->bdev = root->fs_info->fs_devices->latest_bdev;
5406
5407
/*
5408
* We need to do this because if we're using the original em we searched
5409
* for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5410
*/
5411
em->flags = 0;
5412
set_bit(EXTENT_FLAG_PINNED, &em->flags);
5413
5414
while (insert) {
5415
write_lock(&em_tree->lock);
5416
ret = add_extent_mapping(em_tree, em);
5417
write_unlock(&em_tree->lock);
5418
if (ret != -EEXIST)
5419
break;
5420
btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5421
}
5422
5423
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5424
ins.offset, ins.offset, 0);
5425
if (ret) {
5426
btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5427
em = ERR_PTR(ret);
5428
}
5429
out:
5430
btrfs_end_transaction(trans, root);
5431
return em;
5432
}
5433
5434
/*
5435
* returns 1 when the nocow is safe, < 1 on error, 0 if the
5436
* block must be cow'd
5437
*/
5438
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5439
struct inode *inode, u64 offset, u64 len)
5440
{
5441
struct btrfs_path *path;
5442
int ret;
5443
struct extent_buffer *leaf;
5444
struct btrfs_root *root = BTRFS_I(inode)->root;
5445
struct btrfs_file_extent_item *fi;
5446
struct btrfs_key key;
5447
u64 disk_bytenr;
5448
u64 backref_offset;
5449
u64 extent_end;
5450
u64 num_bytes;
5451
int slot;
5452
int found_type;
5453
5454
path = btrfs_alloc_path();
5455
if (!path)
5456
return -ENOMEM;
5457
5458
ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5459
offset, 0);
5460
if (ret < 0)
5461
goto out;
5462
5463
slot = path->slots[0];
5464
if (ret == 1) {
5465
if (slot == 0) {
5466
/* can't find the item, must cow */
5467
ret = 0;
5468
goto out;
5469
}
5470
slot--;
5471
}
5472
ret = 0;
5473
leaf = path->nodes[0];
5474
btrfs_item_key_to_cpu(leaf, &key, slot);
5475
if (key.objectid != btrfs_ino(inode) ||
5476
key.type != BTRFS_EXTENT_DATA_KEY) {
5477
/* not our file or wrong item type, must cow */
5478
goto out;
5479
}
5480
5481
if (key.offset > offset) {
5482
/* Wrong offset, must cow */
5483
goto out;
5484
}
5485
5486
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5487
found_type = btrfs_file_extent_type(leaf, fi);
5488
if (found_type != BTRFS_FILE_EXTENT_REG &&
5489
found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5490
/* not a regular extent, must cow */
5491
goto out;
5492
}
5493
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5494
backref_offset = btrfs_file_extent_offset(leaf, fi);
5495
5496
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5497
if (extent_end < offset + len) {
5498
/* extent doesn't include our full range, must cow */
5499
goto out;
5500
}
5501
5502
if (btrfs_extent_readonly(root, disk_bytenr))
5503
goto out;
5504
5505
/*
5506
* look for other files referencing this extent, if we
5507
* find any we must cow
5508
*/
5509
if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5510
key.offset - backref_offset, disk_bytenr))
5511
goto out;
5512
5513
/*
5514
* adjust disk_bytenr and num_bytes to cover just the bytes
5515
* in this extent we are about to write. If there
5516
* are any csums in that range we have to cow in order
5517
* to keep the csums correct
5518
*/
5519
disk_bytenr += backref_offset;
5520
disk_bytenr += offset - key.offset;
5521
num_bytes = min(offset + len, extent_end) - offset;
5522
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5523
goto out;
5524
/*
5525
* all of the above have passed, it is safe to overwrite this extent
5526
* without cow
5527
*/
5528
ret = 1;
5529
out:
5530
btrfs_free_path(path);
5531
return ret;
5532
}
5533
5534
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5535
struct buffer_head *bh_result, int create)
5536
{
5537
struct extent_map *em;
5538
struct btrfs_root *root = BTRFS_I(inode)->root;
5539
u64 start = iblock << inode->i_blkbits;
5540
u64 len = bh_result->b_size;
5541
struct btrfs_trans_handle *trans;
5542
5543
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5544
if (IS_ERR(em))
5545
return PTR_ERR(em);
5546
5547
/*
5548
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5549
* io. INLINE is special, and we could probably kludge it in here, but
5550
* it's still buffered so for safety lets just fall back to the generic
5551
* buffered path.
5552
*
5553
* For COMPRESSED we _have_ to read the entire extent in so we can
5554
* decompress it, so there will be buffering required no matter what we
5555
* do, so go ahead and fallback to buffered.
5556
*
5557
* We return -ENOTBLK because thats what makes DIO go ahead and go back
5558
* to buffered IO. Don't blame me, this is the price we pay for using
5559
* the generic code.
5560
*/
5561
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5562
em->block_start == EXTENT_MAP_INLINE) {
5563
free_extent_map(em);
5564
return -ENOTBLK;
5565
}
5566
5567
/* Just a good old fashioned hole, return */
5568
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5569
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5570
free_extent_map(em);
5571
/* DIO will do one hole at a time, so just unlock a sector */
5572
unlock_extent(&BTRFS_I(inode)->io_tree, start,
5573
start + root->sectorsize - 1, GFP_NOFS);
5574
return 0;
5575
}
5576
5577
/*
5578
* We don't allocate a new extent in the following cases
5579
*
5580
* 1) The inode is marked as NODATACOW. In this case we'll just use the
5581
* existing extent.
5582
* 2) The extent is marked as PREALLOC. We're good to go here and can
5583
* just use the extent.
5584
*
5585
*/
5586
if (!create) {
5587
len = em->len - (start - em->start);
5588
goto map;
5589
}
5590
5591
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5592
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5593
em->block_start != EXTENT_MAP_HOLE)) {
5594
int type;
5595
int ret;
5596
u64 block_start;
5597
5598
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5599
type = BTRFS_ORDERED_PREALLOC;
5600
else
5601
type = BTRFS_ORDERED_NOCOW;
5602
len = min(len, em->len - (start - em->start));
5603
block_start = em->block_start + (start - em->start);
5604
5605
/*
5606
* we're not going to log anything, but we do need
5607
* to make sure the current transaction stays open
5608
* while we look for nocow cross refs
5609
*/
5610
trans = btrfs_join_transaction(root);
5611
if (IS_ERR(trans))
5612
goto must_cow;
5613
5614
if (can_nocow_odirect(trans, inode, start, len) == 1) {
5615
ret = btrfs_add_ordered_extent_dio(inode, start,
5616
block_start, len, len, type);
5617
btrfs_end_transaction(trans, root);
5618
if (ret) {
5619
free_extent_map(em);
5620
return ret;
5621
}
5622
goto unlock;
5623
}
5624
btrfs_end_transaction(trans, root);
5625
}
5626
must_cow:
5627
/*
5628
* this will cow the extent, reset the len in case we changed
5629
* it above
5630
*/
5631
len = bh_result->b_size;
5632
em = btrfs_new_extent_direct(inode, em, start, len);
5633
if (IS_ERR(em))
5634
return PTR_ERR(em);
5635
len = min(len, em->len - (start - em->start));
5636
unlock:
5637
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5638
EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5639
0, NULL, GFP_NOFS);
5640
map:
5641
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5642
inode->i_blkbits;
5643
bh_result->b_size = len;
5644
bh_result->b_bdev = em->bdev;
5645
set_buffer_mapped(bh_result);
5646
if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5647
set_buffer_new(bh_result);
5648
5649
free_extent_map(em);
5650
5651
return 0;
5652
}
5653
5654
struct btrfs_dio_private {
5655
struct inode *inode;
5656
u64 logical_offset;
5657
u64 disk_bytenr;
5658
u64 bytes;
5659
u32 *csums;
5660
void *private;
5661
5662
/* number of bios pending for this dio */
5663
atomic_t pending_bios;
5664
5665
/* IO errors */
5666
int errors;
5667
5668
struct bio *orig_bio;
5669
};
5670
5671
static void btrfs_endio_direct_read(struct bio *bio, int err)
5672
{
5673
struct btrfs_dio_private *dip = bio->bi_private;
5674
struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5675
struct bio_vec *bvec = bio->bi_io_vec;
5676
struct inode *inode = dip->inode;
5677
struct btrfs_root *root = BTRFS_I(inode)->root;
5678
u64 start;
5679
u32 *private = dip->csums;
5680
5681
start = dip->logical_offset;
5682
do {
5683
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
5684
struct page *page = bvec->bv_page;
5685
char *kaddr;
5686
u32 csum = ~(u32)0;
5687
unsigned long flags;
5688
5689
local_irq_save(flags);
5690
kaddr = kmap_atomic(page, KM_IRQ0);
5691
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5692
csum, bvec->bv_len);
5693
btrfs_csum_final(csum, (char *)&csum);
5694
kunmap_atomic(kaddr, KM_IRQ0);
5695
local_irq_restore(flags);
5696
5697
flush_dcache_page(bvec->bv_page);
5698
if (csum != *private) {
5699
printk(KERN_ERR "btrfs csum failed ino %llu off"
5700
" %llu csum %u private %u\n",
5701
(unsigned long long)btrfs_ino(inode),
5702
(unsigned long long)start,
5703
csum, *private);
5704
err = -EIO;
5705
}
5706
}
5707
5708
start += bvec->bv_len;
5709
private++;
5710
bvec++;
5711
} while (bvec <= bvec_end);
5712
5713
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5714
dip->logical_offset + dip->bytes - 1, GFP_NOFS);
5715
bio->bi_private = dip->private;
5716
5717
kfree(dip->csums);
5718
kfree(dip);
5719
5720
/* If we had a csum failure make sure to clear the uptodate flag */
5721
if (err)
5722
clear_bit(BIO_UPTODATE, &bio->bi_flags);
5723
dio_end_io(bio, err);
5724
}
5725
5726
static void btrfs_endio_direct_write(struct bio *bio, int err)
5727
{
5728
struct btrfs_dio_private *dip = bio->bi_private;
5729
struct inode *inode = dip->inode;
5730
struct btrfs_root *root = BTRFS_I(inode)->root;
5731
struct btrfs_trans_handle *trans;
5732
struct btrfs_ordered_extent *ordered = NULL;
5733
struct extent_state *cached_state = NULL;
5734
u64 ordered_offset = dip->logical_offset;
5735
u64 ordered_bytes = dip->bytes;
5736
int ret;
5737
5738
if (err)
5739
goto out_done;
5740
again:
5741
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5742
&ordered_offset,
5743
ordered_bytes);
5744
if (!ret)
5745
goto out_test;
5746
5747
BUG_ON(!ordered);
5748
5749
trans = btrfs_join_transaction(root);
5750
if (IS_ERR(trans)) {
5751
err = -ENOMEM;
5752
goto out;
5753
}
5754
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5755
5756
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5757
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5758
if (!ret)
5759
ret = btrfs_update_inode(trans, root, inode);
5760
err = ret;
5761
goto out;
5762
}
5763
5764
lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5765
ordered->file_offset + ordered->len - 1, 0,
5766
&cached_state, GFP_NOFS);
5767
5768
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5769
ret = btrfs_mark_extent_written(trans, inode,
5770
ordered->file_offset,
5771
ordered->file_offset +
5772
ordered->len);
5773
if (ret) {
5774
err = ret;
5775
goto out_unlock;
5776
}
5777
} else {
5778
ret = insert_reserved_file_extent(trans, inode,
5779
ordered->file_offset,
5780
ordered->start,
5781
ordered->disk_len,
5782
ordered->len,
5783
ordered->len,
5784
0, 0, 0,
5785
BTRFS_FILE_EXTENT_REG);
5786
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
5787
ordered->file_offset, ordered->len);
5788
if (ret) {
5789
err = ret;
5790
WARN_ON(1);
5791
goto out_unlock;
5792
}
5793
}
5794
5795
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5796
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5797
if (!ret)
5798
btrfs_update_inode(trans, root, inode);
5799
ret = 0;
5800
out_unlock:
5801
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5802
ordered->file_offset + ordered->len - 1,
5803
&cached_state, GFP_NOFS);
5804
out:
5805
btrfs_delalloc_release_metadata(inode, ordered->len);
5806
btrfs_end_transaction(trans, root);
5807
ordered_offset = ordered->file_offset + ordered->len;
5808
btrfs_put_ordered_extent(ordered);
5809
btrfs_put_ordered_extent(ordered);
5810
5811
out_test:
5812
/*
5813
* our bio might span multiple ordered extents. If we haven't
5814
* completed the accounting for the whole dio, go back and try again
5815
*/
5816
if (ordered_offset < dip->logical_offset + dip->bytes) {
5817
ordered_bytes = dip->logical_offset + dip->bytes -
5818
ordered_offset;
5819
goto again;
5820
}
5821
out_done:
5822
bio->bi_private = dip->private;
5823
5824
kfree(dip->csums);
5825
kfree(dip);
5826
5827
/* If we had an error make sure to clear the uptodate flag */
5828
if (err)
5829
clear_bit(BIO_UPTODATE, &bio->bi_flags);
5830
dio_end_io(bio, err);
5831
}
5832
5833
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5834
struct bio *bio, int mirror_num,
5835
unsigned long bio_flags, u64 offset)
5836
{
5837
int ret;
5838
struct btrfs_root *root = BTRFS_I(inode)->root;
5839
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
5840
BUG_ON(ret);
5841
return 0;
5842
}
5843
5844
static void btrfs_end_dio_bio(struct bio *bio, int err)
5845
{
5846
struct btrfs_dio_private *dip = bio->bi_private;
5847
5848
if (err) {
5849
printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
5850
"sector %#Lx len %u err no %d\n",
5851
(unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
5852
(unsigned long long)bio->bi_sector, bio->bi_size, err);
5853
dip->errors = 1;
5854
5855
/*
5856
* before atomic variable goto zero, we must make sure
5857
* dip->errors is perceived to be set.
5858
*/
5859
smp_mb__before_atomic_dec();
5860
}
5861
5862
/* if there are more bios still pending for this dio, just exit */
5863
if (!atomic_dec_and_test(&dip->pending_bios))
5864
goto out;
5865
5866
if (dip->errors)
5867
bio_io_error(dip->orig_bio);
5868
else {
5869
set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
5870
bio_endio(dip->orig_bio, 0);
5871
}
5872
out:
5873
bio_put(bio);
5874
}
5875
5876
static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5877
u64 first_sector, gfp_t gfp_flags)
5878
{
5879
int nr_vecs = bio_get_nr_vecs(bdev);
5880
return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
5881
}
5882
5883
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5884
int rw, u64 file_offset, int skip_sum,
5885
u32 *csums, int async_submit)
5886
{
5887
int write = rw & REQ_WRITE;
5888
struct btrfs_root *root = BTRFS_I(inode)->root;
5889
int ret;
5890
5891
bio_get(bio);
5892
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
5893
if (ret)
5894
goto err;
5895
5896
if (skip_sum)
5897
goto map;
5898
5899
if (write && async_submit) {
5900
ret = btrfs_wq_submit_bio(root->fs_info,
5901
inode, rw, bio, 0, 0,
5902
file_offset,
5903
__btrfs_submit_bio_start_direct_io,
5904
__btrfs_submit_bio_done);
5905
goto err;
5906
} else if (write) {
5907
/*
5908
* If we aren't doing async submit, calculate the csum of the
5909
* bio now.
5910
*/
5911
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
5912
if (ret)
5913
goto err;
5914
} else if (!skip_sum) {
5915
ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
5916
file_offset, csums);
5917
if (ret)
5918
goto err;
5919
}
5920
5921
map:
5922
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
5923
err:
5924
bio_put(bio);
5925
return ret;
5926
}
5927
5928
static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5929
int skip_sum)
5930
{
5931
struct inode *inode = dip->inode;
5932
struct btrfs_root *root = BTRFS_I(inode)->root;
5933
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5934
struct bio *bio;
5935
struct bio *orig_bio = dip->orig_bio;
5936
struct bio_vec *bvec = orig_bio->bi_io_vec;
5937
u64 start_sector = orig_bio->bi_sector;
5938
u64 file_offset = dip->logical_offset;
5939
u64 submit_len = 0;
5940
u64 map_length;
5941
int nr_pages = 0;
5942
u32 *csums = dip->csums;
5943
int ret = 0;
5944
int async_submit = 0;
5945
int write = rw & REQ_WRITE;
5946
5947
map_length = orig_bio->bi_size;
5948
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5949
&map_length, NULL, 0);
5950
if (ret) {
5951
bio_put(orig_bio);
5952
return -EIO;
5953
}
5954
5955
if (map_length >= orig_bio->bi_size) {
5956
bio = orig_bio;
5957
goto submit;
5958
}
5959
5960
async_submit = 1;
5961
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5962
if (!bio)
5963
return -ENOMEM;
5964
bio->bi_private = dip;
5965
bio->bi_end_io = btrfs_end_dio_bio;
5966
atomic_inc(&dip->pending_bios);
5967
5968
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
5969
if (unlikely(map_length < submit_len + bvec->bv_len ||
5970
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5971
bvec->bv_offset) < bvec->bv_len)) {
5972
/*
5973
* inc the count before we submit the bio so
5974
* we know the end IO handler won't happen before
5975
* we inc the count. Otherwise, the dip might get freed
5976
* before we're done setting it up
5977
*/
5978
atomic_inc(&dip->pending_bios);
5979
ret = __btrfs_submit_dio_bio(bio, inode, rw,
5980
file_offset, skip_sum,
5981
csums, async_submit);
5982
if (ret) {
5983
bio_put(bio);
5984
atomic_dec(&dip->pending_bios);
5985
goto out_err;
5986
}
5987
5988
/* Write's use the ordered csums */
5989
if (!write && !skip_sum)
5990
csums = csums + nr_pages;
5991
start_sector += submit_len >> 9;
5992
file_offset += submit_len;
5993
5994
submit_len = 0;
5995
nr_pages = 0;
5996
5997
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
5998
start_sector, GFP_NOFS);
5999
if (!bio)
6000
goto out_err;
6001
bio->bi_private = dip;
6002
bio->bi_end_io = btrfs_end_dio_bio;
6003
6004
map_length = orig_bio->bi_size;
6005
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6006
&map_length, NULL, 0);
6007
if (ret) {
6008
bio_put(bio);
6009
goto out_err;
6010
}
6011
} else {
6012
submit_len += bvec->bv_len;
6013
nr_pages ++;
6014
bvec++;
6015
}
6016
}
6017
6018
submit:
6019
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6020
csums, async_submit);
6021
if (!ret)
6022
return 0;
6023
6024
bio_put(bio);
6025
out_err:
6026
dip->errors = 1;
6027
/*
6028
* before atomic variable goto zero, we must
6029
* make sure dip->errors is perceived to be set.
6030
*/
6031
smp_mb__before_atomic_dec();
6032
if (atomic_dec_and_test(&dip->pending_bios))
6033
bio_io_error(dip->orig_bio);
6034
6035
/* bio_end_io() will handle error, so we needn't return it */
6036
return 0;
6037
}
6038
6039
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6040
loff_t file_offset)
6041
{
6042
struct btrfs_root *root = BTRFS_I(inode)->root;
6043
struct btrfs_dio_private *dip;
6044
struct bio_vec *bvec = bio->bi_io_vec;
6045
int skip_sum;
6046
int write = rw & REQ_WRITE;
6047
int ret = 0;
6048
6049
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6050
6051
dip = kmalloc(sizeof(*dip), GFP_NOFS);
6052
if (!dip) {
6053
ret = -ENOMEM;
6054
goto free_ordered;
6055
}
6056
dip->csums = NULL;
6057
6058
/* Write's use the ordered csum stuff, so we don't need dip->csums */
6059
if (!write && !skip_sum) {
6060
dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6061
if (!dip->csums) {
6062
kfree(dip);
6063
ret = -ENOMEM;
6064
goto free_ordered;
6065
}
6066
}
6067
6068
dip->private = bio->bi_private;
6069
dip->inode = inode;
6070
dip->logical_offset = file_offset;
6071
6072
dip->bytes = 0;
6073
do {
6074
dip->bytes += bvec->bv_len;
6075
bvec++;
6076
} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6077
6078
dip->disk_bytenr = (u64)bio->bi_sector << 9;
6079
bio->bi_private = dip;
6080
dip->errors = 0;
6081
dip->orig_bio = bio;
6082
atomic_set(&dip->pending_bios, 0);
6083
6084
if (write)
6085
bio->bi_end_io = btrfs_endio_direct_write;
6086
else
6087
bio->bi_end_io = btrfs_endio_direct_read;
6088
6089
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6090
if (!ret)
6091
return;
6092
free_ordered:
6093
/*
6094
* If this is a write, we need to clean up the reserved space and kill
6095
* the ordered extent.
6096
*/
6097
if (write) {
6098
struct btrfs_ordered_extent *ordered;
6099
ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6100
if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6101
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6102
btrfs_free_reserved_extent(root, ordered->start,
6103
ordered->disk_len);
6104
btrfs_put_ordered_extent(ordered);
6105
btrfs_put_ordered_extent(ordered);
6106
}
6107
bio_endio(bio, ret);
6108
}
6109
6110
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6111
const struct iovec *iov, loff_t offset,
6112
unsigned long nr_segs)
6113
{
6114
int seg;
6115
int i;
6116
size_t size;
6117
unsigned long addr;
6118
unsigned blocksize_mask = root->sectorsize - 1;
6119
ssize_t retval = -EINVAL;
6120
loff_t end = offset;
6121
6122
if (offset & blocksize_mask)
6123
goto out;
6124
6125
/* Check the memory alignment. Blocks cannot straddle pages */
6126
for (seg = 0; seg < nr_segs; seg++) {
6127
addr = (unsigned long)iov[seg].iov_base;
6128
size = iov[seg].iov_len;
6129
end += size;
6130
if ((addr & blocksize_mask) || (size & blocksize_mask))
6131
goto out;
6132
6133
/* If this is a write we don't need to check anymore */
6134
if (rw & WRITE)
6135
continue;
6136
6137
/*
6138
* Check to make sure we don't have duplicate iov_base's in this
6139
* iovec, if so return EINVAL, otherwise we'll get csum errors
6140
* when reading back.
6141
*/
6142
for (i = seg + 1; i < nr_segs; i++) {
6143
if (iov[seg].iov_base == iov[i].iov_base)
6144
goto out;
6145
}
6146
}
6147
retval = 0;
6148
out:
6149
return retval;
6150
}
6151
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6152
const struct iovec *iov, loff_t offset,
6153
unsigned long nr_segs)
6154
{
6155
struct file *file = iocb->ki_filp;
6156
struct inode *inode = file->f_mapping->host;
6157
struct btrfs_ordered_extent *ordered;
6158
struct extent_state *cached_state = NULL;
6159
u64 lockstart, lockend;
6160
ssize_t ret;
6161
int writing = rw & WRITE;
6162
int write_bits = 0;
6163
size_t count = iov_length(iov, nr_segs);
6164
6165
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6166
offset, nr_segs)) {
6167
return 0;
6168
}
6169
6170
lockstart = offset;
6171
lockend = offset + count - 1;
6172
6173
if (writing) {
6174
ret = btrfs_delalloc_reserve_space(inode, count);
6175
if (ret)
6176
goto out;
6177
}
6178
6179
while (1) {
6180
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6181
0, &cached_state, GFP_NOFS);
6182
/*
6183
* We're concerned with the entire range that we're going to be
6184
* doing DIO to, so we need to make sure theres no ordered
6185
* extents in this range.
6186
*/
6187
ordered = btrfs_lookup_ordered_range(inode, lockstart,
6188
lockend - lockstart + 1);
6189
if (!ordered)
6190
break;
6191
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6192
&cached_state, GFP_NOFS);
6193
btrfs_start_ordered_extent(inode, ordered, 1);
6194
btrfs_put_ordered_extent(ordered);
6195
cond_resched();
6196
}
6197
6198
/*
6199
* we don't use btrfs_set_extent_delalloc because we don't want
6200
* the dirty or uptodate bits
6201
*/
6202
if (writing) {
6203
write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6204
ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6205
EXTENT_DELALLOC, 0, NULL, &cached_state,
6206
GFP_NOFS);
6207
if (ret) {
6208
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6209
lockend, EXTENT_LOCKED | write_bits,
6210
1, 0, &cached_state, GFP_NOFS);
6211
goto out;
6212
}
6213
}
6214
6215
free_extent_state(cached_state);
6216
cached_state = NULL;
6217
6218
ret = __blockdev_direct_IO(rw, iocb, inode,
6219
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6220
iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6221
btrfs_submit_direct, 0);
6222
6223
if (ret < 0 && ret != -EIOCBQUEUED) {
6224
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6225
offset + iov_length(iov, nr_segs) - 1,
6226
EXTENT_LOCKED | write_bits, 1, 0,
6227
&cached_state, GFP_NOFS);
6228
} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6229
/*
6230
* We're falling back to buffered, unlock the section we didn't
6231
* do IO on.
6232
*/
6233
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6234
offset + iov_length(iov, nr_segs) - 1,
6235
EXTENT_LOCKED | write_bits, 1, 0,
6236
&cached_state, GFP_NOFS);
6237
}
6238
out:
6239
free_extent_state(cached_state);
6240
return ret;
6241
}
6242
6243
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6244
__u64 start, __u64 len)
6245
{
6246
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6247
}
6248
6249
int btrfs_readpage(struct file *file, struct page *page)
6250
{
6251
struct extent_io_tree *tree;
6252
tree = &BTRFS_I(page->mapping->host)->io_tree;
6253
return extent_read_full_page(tree, page, btrfs_get_extent);
6254
}
6255
6256
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6257
{
6258
struct extent_io_tree *tree;
6259
6260
6261
if (current->flags & PF_MEMALLOC) {
6262
redirty_page_for_writepage(wbc, page);
6263
unlock_page(page);
6264
return 0;
6265
}
6266
tree = &BTRFS_I(page->mapping->host)->io_tree;
6267
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6268
}
6269
6270
int btrfs_writepages(struct address_space *mapping,
6271
struct writeback_control *wbc)
6272
{
6273
struct extent_io_tree *tree;
6274
6275
tree = &BTRFS_I(mapping->host)->io_tree;
6276
return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6277
}
6278
6279
static int
6280
btrfs_readpages(struct file *file, struct address_space *mapping,
6281
struct list_head *pages, unsigned nr_pages)
6282
{
6283
struct extent_io_tree *tree;
6284
tree = &BTRFS_I(mapping->host)->io_tree;
6285
return extent_readpages(tree, mapping, pages, nr_pages,
6286
btrfs_get_extent);
6287
}
6288
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6289
{
6290
struct extent_io_tree *tree;
6291
struct extent_map_tree *map;
6292
int ret;
6293
6294
tree = &BTRFS_I(page->mapping->host)->io_tree;
6295
map = &BTRFS_I(page->mapping->host)->extent_tree;
6296
ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6297
if (ret == 1) {
6298
ClearPagePrivate(page);
6299
set_page_private(page, 0);
6300
page_cache_release(page);
6301
}
6302
return ret;
6303
}
6304
6305
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6306
{
6307
if (PageWriteback(page) || PageDirty(page))
6308
return 0;
6309
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6310
}
6311
6312
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6313
{
6314
struct extent_io_tree *tree;
6315
struct btrfs_ordered_extent *ordered;
6316
struct extent_state *cached_state = NULL;
6317
u64 page_start = page_offset(page);
6318
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6319
6320
6321
/*
6322
* we have the page locked, so new writeback can't start,
6323
* and the dirty bit won't be cleared while we are here.
6324
*
6325
* Wait for IO on this page so that we can safely clear
6326
* the PagePrivate2 bit and do ordered accounting
6327
*/
6328
wait_on_page_writeback(page);
6329
6330
tree = &BTRFS_I(page->mapping->host)->io_tree;
6331
if (offset) {
6332
btrfs_releasepage(page, GFP_NOFS);
6333
return;
6334
}
6335
lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
6336
GFP_NOFS);
6337
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
6338
page_offset(page));
6339
if (ordered) {
6340
/*
6341
* IO on this page will never be started, so we need
6342
* to account for any ordered extents now
6343
*/
6344
clear_extent_bit(tree, page_start, page_end,
6345
EXTENT_DIRTY | EXTENT_DELALLOC |
6346
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6347
&cached_state, GFP_NOFS);
6348
/*
6349
* whoever cleared the private bit is responsible
6350
* for the finish_ordered_io
6351
*/
6352
if (TestClearPagePrivate2(page)) {
6353
btrfs_finish_ordered_io(page->mapping->host,
6354
page_start, page_end);
6355
}
6356
btrfs_put_ordered_extent(ordered);
6357
cached_state = NULL;
6358
lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
6359
GFP_NOFS);
6360
}
6361
clear_extent_bit(tree, page_start, page_end,
6362
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6363
EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6364
__btrfs_releasepage(page, GFP_NOFS);
6365
6366
ClearPageChecked(page);
6367
if (PagePrivate(page)) {
6368
ClearPagePrivate(page);
6369
set_page_private(page, 0);
6370
page_cache_release(page);
6371
}
6372
}
6373
6374
/*
6375
* btrfs_page_mkwrite() is not allowed to change the file size as it gets
6376
* called from a page fault handler when a page is first dirtied. Hence we must
6377
* be careful to check for EOF conditions here. We set the page up correctly
6378
* for a written page which means we get ENOSPC checking when writing into
6379
* holes and correct delalloc and unwritten extent mapping on filesystems that
6380
* support these features.
6381
*
6382
* We are not allowed to take the i_mutex here so we have to play games to
6383
* protect against truncate races as the page could now be beyond EOF. Because
6384
* vmtruncate() writes the inode size before removing pages, once we have the
6385
* page lock we can determine safely if the page is beyond EOF. If it is not
6386
* beyond EOF, then the page is guaranteed safe against truncation until we
6387
* unlock the page.
6388
*/
6389
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6390
{
6391
struct page *page = vmf->page;
6392
struct inode *inode = fdentry(vma->vm_file)->d_inode;
6393
struct btrfs_root *root = BTRFS_I(inode)->root;
6394
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6395
struct btrfs_ordered_extent *ordered;
6396
struct extent_state *cached_state = NULL;
6397
char *kaddr;
6398
unsigned long zero_start;
6399
loff_t size;
6400
int ret;
6401
u64 page_start;
6402
u64 page_end;
6403
6404
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6405
if (ret) {
6406
if (ret == -ENOMEM)
6407
ret = VM_FAULT_OOM;
6408
else /* -ENOSPC, -EIO, etc */
6409
ret = VM_FAULT_SIGBUS;
6410
goto out;
6411
}
6412
6413
ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6414
again:
6415
lock_page(page);
6416
size = i_size_read(inode);
6417
page_start = page_offset(page);
6418
page_end = page_start + PAGE_CACHE_SIZE - 1;
6419
6420
if ((page->mapping != inode->i_mapping) ||
6421
(page_start >= size)) {
6422
/* page got truncated out from underneath us */
6423
goto out_unlock;
6424
}
6425
wait_on_page_writeback(page);
6426
6427
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
6428
GFP_NOFS);
6429
set_page_extent_mapped(page);
6430
6431
/*
6432
* we can't set the delalloc bits if there are pending ordered
6433
* extents. Drop our locks and wait for them to finish
6434
*/
6435
ordered = btrfs_lookup_ordered_extent(inode, page_start);
6436
if (ordered) {
6437
unlock_extent_cached(io_tree, page_start, page_end,
6438
&cached_state, GFP_NOFS);
6439
unlock_page(page);
6440
btrfs_start_ordered_extent(inode, ordered, 1);
6441
btrfs_put_ordered_extent(ordered);
6442
goto again;
6443
}
6444
6445
/*
6446
* XXX - page_mkwrite gets called every time the page is dirtied, even
6447
* if it was already dirty, so for space accounting reasons we need to
6448
* clear any delalloc bits for the range we are fixing to save. There
6449
* is probably a better way to do this, but for now keep consistent with
6450
* prepare_pages in the normal write path.
6451
*/
6452
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6453
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6454
0, 0, &cached_state, GFP_NOFS);
6455
6456
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6457
&cached_state);
6458
if (ret) {
6459
unlock_extent_cached(io_tree, page_start, page_end,
6460
&cached_state, GFP_NOFS);
6461
ret = VM_FAULT_SIGBUS;
6462
goto out_unlock;
6463
}
6464
ret = 0;
6465
6466
/* page is wholly or partially inside EOF */
6467
if (page_start + PAGE_CACHE_SIZE > size)
6468
zero_start = size & ~PAGE_CACHE_MASK;
6469
else
6470
zero_start = PAGE_CACHE_SIZE;
6471
6472
if (zero_start != PAGE_CACHE_SIZE) {
6473
kaddr = kmap(page);
6474
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6475
flush_dcache_page(page);
6476
kunmap(page);
6477
}
6478
ClearPageChecked(page);
6479
set_page_dirty(page);
6480
SetPageUptodate(page);
6481
6482
BTRFS_I(inode)->last_trans = root->fs_info->generation;
6483
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6484
6485
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6486
6487
out_unlock:
6488
if (!ret)
6489
return VM_FAULT_LOCKED;
6490
unlock_page(page);
6491
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6492
out:
6493
return ret;
6494
}
6495
6496
static int btrfs_truncate(struct inode *inode)
6497
{
6498
struct btrfs_root *root = BTRFS_I(inode)->root;
6499
struct btrfs_block_rsv *rsv;
6500
int ret;
6501
int err = 0;
6502
struct btrfs_trans_handle *trans;
6503
unsigned long nr;
6504
u64 mask = root->sectorsize - 1;
6505
6506
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6507
if (ret)
6508
return ret;
6509
6510
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6511
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6512
6513
/*
6514
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
6515
* 3 things going on here
6516
*
6517
* 1) We need to reserve space for our orphan item and the space to
6518
* delete our orphan item. Lord knows we don't want to have a dangling
6519
* orphan item because we didn't reserve space to remove it.
6520
*
6521
* 2) We need to reserve space to update our inode.
6522
*
6523
* 3) We need to have something to cache all the space that is going to
6524
* be free'd up by the truncate operation, but also have some slack
6525
* space reserved in case it uses space during the truncate (thank you
6526
* very much snapshotting).
6527
*
6528
* And we need these to all be seperate. The fact is we can use alot of
6529
* space doing the truncate, and we have no earthly idea how much space
6530
* we will use, so we need the truncate reservation to be seperate so it
6531
* doesn't end up using space reserved for updating the inode or
6532
* removing the orphan item. We also need to be able to stop the
6533
* transaction and start a new one, which means we need to be able to
6534
* update the inode several times, and we have no idea of knowing how
6535
* many times that will be, so we can't just reserve 1 item for the
6536
* entirety of the opration, so that has to be done seperately as well.
6537
* Then there is the orphan item, which does indeed need to be held on
6538
* to for the whole operation, and we need nobody to touch this reserved
6539
* space except the orphan code.
6540
*
6541
* So that leaves us with
6542
*
6543
* 1) root->orphan_block_rsv - for the orphan deletion.
6544
* 2) rsv - for the truncate reservation, which we will steal from the
6545
* transaction reservation.
6546
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6547
* updating the inode.
6548
*/
6549
rsv = btrfs_alloc_block_rsv(root);
6550
if (!rsv)
6551
return -ENOMEM;
6552
btrfs_add_durable_block_rsv(root->fs_info, rsv);
6553
6554
trans = btrfs_start_transaction(root, 4);
6555
if (IS_ERR(trans)) {
6556
err = PTR_ERR(trans);
6557
goto out;
6558
}
6559
6560
/*
6561
* Reserve space for the truncate process. Truncate should be adding
6562
* space, but if there are snapshots it may end up using space.
6563
*/
6564
ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
6565
BUG_ON(ret);
6566
6567
ret = btrfs_orphan_add(trans, inode);
6568
if (ret) {
6569
btrfs_end_transaction(trans, root);
6570
goto out;
6571
}
6572
6573
nr = trans->blocks_used;
6574
btrfs_end_transaction(trans, root);
6575
btrfs_btree_balance_dirty(root, nr);
6576
6577
/*
6578
* Ok so we've already migrated our bytes over for the truncate, so here
6579
* just reserve the one slot we need for updating the inode.
6580
*/
6581
trans = btrfs_start_transaction(root, 1);
6582
if (IS_ERR(trans)) {
6583
err = PTR_ERR(trans);
6584
goto out;
6585
}
6586
trans->block_rsv = rsv;
6587
6588
/*
6589
* setattr is responsible for setting the ordered_data_close flag,
6590
* but that is only tested during the last file release. That
6591
* could happen well after the next commit, leaving a great big
6592
* window where new writes may get lost if someone chooses to write
6593
* to this file after truncating to zero
6594
*
6595
* The inode doesn't have any dirty data here, and so if we commit
6596
* this is a noop. If someone immediately starts writing to the inode
6597
* it is very likely we'll catch some of their writes in this
6598
* transaction, and the commit will find this file on the ordered
6599
* data list with good things to send down.
6600
*
6601
* This is a best effort solution, there is still a window where
6602
* using truncate to replace the contents of the file will
6603
* end up with a zero length file after a crash.
6604
*/
6605
if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
6606
btrfs_add_ordered_operation(trans, root, inode);
6607
6608
while (1) {
6609
if (!trans) {
6610
trans = btrfs_start_transaction(root, 3);
6611
if (IS_ERR(trans)) {
6612
err = PTR_ERR(trans);
6613
goto out;
6614
}
6615
6616
ret = btrfs_truncate_reserve_metadata(trans, root,
6617
rsv);
6618
BUG_ON(ret);
6619
6620
trans->block_rsv = rsv;
6621
}
6622
6623
ret = btrfs_truncate_inode_items(trans, root, inode,
6624
inode->i_size,
6625
BTRFS_EXTENT_DATA_KEY);
6626
if (ret != -EAGAIN) {
6627
err = ret;
6628
break;
6629
}
6630
6631
trans->block_rsv = &root->fs_info->trans_block_rsv;
6632
ret = btrfs_update_inode(trans, root, inode);
6633
if (ret) {
6634
err = ret;
6635
break;
6636
}
6637
6638
nr = trans->blocks_used;
6639
btrfs_end_transaction(trans, root);
6640
trans = NULL;
6641
btrfs_btree_balance_dirty(root, nr);
6642
}
6643
6644
if (ret == 0 && inode->i_nlink > 0) {
6645
trans->block_rsv = root->orphan_block_rsv;
6646
ret = btrfs_orphan_del(trans, inode);
6647
if (ret)
6648
err = ret;
6649
} else if (ret && inode->i_nlink > 0) {
6650
/*
6651
* Failed to do the truncate, remove us from the in memory
6652
* orphan list.
6653
*/
6654
ret = btrfs_orphan_del(NULL, inode);
6655
}
6656
6657
trans->block_rsv = &root->fs_info->trans_block_rsv;
6658
ret = btrfs_update_inode(trans, root, inode);
6659
if (ret && !err)
6660
err = ret;
6661
6662
nr = trans->blocks_used;
6663
ret = btrfs_end_transaction_throttle(trans, root);
6664
btrfs_btree_balance_dirty(root, nr);
6665
6666
out:
6667
btrfs_free_block_rsv(root, rsv);
6668
6669
if (ret && !err)
6670
err = ret;
6671
6672
return err;
6673
}
6674
6675
/*
6676
* create a new subvolume directory/inode (helper for the ioctl).
6677
*/
6678
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6679
struct btrfs_root *new_root, u64 new_dirid)
6680
{
6681
struct inode *inode;
6682
int err;
6683
u64 index = 0;
6684
6685
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6686
new_dirid, S_IFDIR | 0700, &index);
6687
if (IS_ERR(inode))
6688
return PTR_ERR(inode);
6689
inode->i_op = &btrfs_dir_inode_operations;
6690
inode->i_fop = &btrfs_dir_file_operations;
6691
6692
inode->i_nlink = 1;
6693
btrfs_i_size_write(inode, 0);
6694
6695
err = btrfs_update_inode(trans, new_root, inode);
6696
BUG_ON(err);
6697
6698
iput(inode);
6699
return 0;
6700
}
6701
6702
/* helper function for file defrag and space balancing. This
6703
* forces readahead on a given range of bytes in an inode
6704
*/
6705
unsigned long btrfs_force_ra(struct address_space *mapping,
6706
struct file_ra_state *ra, struct file *file,
6707
pgoff_t offset, pgoff_t last_index)
6708
{
6709
pgoff_t req_size = last_index - offset + 1;
6710
6711
page_cache_sync_readahead(mapping, ra, file, offset, req_size);
6712
return offset + req_size;
6713
}
6714
6715
struct inode *btrfs_alloc_inode(struct super_block *sb)
6716
{
6717
struct btrfs_inode *ei;
6718
struct inode *inode;
6719
6720
ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
6721
if (!ei)
6722
return NULL;
6723
6724
ei->root = NULL;
6725
ei->space_info = NULL;
6726
ei->generation = 0;
6727
ei->sequence = 0;
6728
ei->last_trans = 0;
6729
ei->last_sub_trans = 0;
6730
ei->logged_trans = 0;
6731
ei->delalloc_bytes = 0;
6732
ei->reserved_bytes = 0;
6733
ei->disk_i_size = 0;
6734
ei->flags = 0;
6735
ei->index_cnt = (u64)-1;
6736
ei->last_unlink_trans = 0;
6737
6738
atomic_set(&ei->outstanding_extents, 0);
6739
atomic_set(&ei->reserved_extents, 0);
6740
6741
ei->ordered_data_close = 0;
6742
ei->orphan_meta_reserved = 0;
6743
ei->dummy_inode = 0;
6744
ei->in_defrag = 0;
6745
ei->force_compress = BTRFS_COMPRESS_NONE;
6746
6747
ei->delayed_node = NULL;
6748
6749
inode = &ei->vfs_inode;
6750
extent_map_tree_init(&ei->extent_tree);
6751
extent_io_tree_init(&ei->io_tree, &inode->i_data);
6752
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
6753
mutex_init(&ei->log_mutex);
6754
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6755
INIT_LIST_HEAD(&ei->i_orphan);
6756
INIT_LIST_HEAD(&ei->delalloc_inodes);
6757
INIT_LIST_HEAD(&ei->ordered_operations);
6758
RB_CLEAR_NODE(&ei->rb_node);
6759
6760
return inode;
6761
}
6762
6763
static void btrfs_i_callback(struct rcu_head *head)
6764
{
6765
struct inode *inode = container_of(head, struct inode, i_rcu);
6766
INIT_LIST_HEAD(&inode->i_dentry);
6767
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6768
}
6769
6770
void btrfs_destroy_inode(struct inode *inode)
6771
{
6772
struct btrfs_ordered_extent *ordered;
6773
struct btrfs_root *root = BTRFS_I(inode)->root;
6774
6775
WARN_ON(!list_empty(&inode->i_dentry));
6776
WARN_ON(inode->i_data.nrpages);
6777
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6778
WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
6779
6780
/*
6781
* This can happen where we create an inode, but somebody else also
6782
* created the same inode and we need to destroy the one we already
6783
* created.
6784
*/
6785
if (!root)
6786
goto free;
6787
6788
/*
6789
* Make sure we're properly removed from the ordered operation
6790
* lists.
6791
*/
6792
smp_mb();
6793
if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
6794
spin_lock(&root->fs_info->ordered_extent_lock);
6795
list_del_init(&BTRFS_I(inode)->ordered_operations);
6796
spin_unlock(&root->fs_info->ordered_extent_lock);
6797
}
6798
6799
spin_lock(&root->orphan_lock);
6800
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6801
printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
6802
(unsigned long long)btrfs_ino(inode));
6803
list_del_init(&BTRFS_I(inode)->i_orphan);
6804
}
6805
spin_unlock(&root->orphan_lock);
6806
6807
while (1) {
6808
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
6809
if (!ordered)
6810
break;
6811
else {
6812
printk(KERN_ERR "btrfs found ordered "
6813
"extent %llu %llu on inode cleanup\n",
6814
(unsigned long long)ordered->file_offset,
6815
(unsigned long long)ordered->len);
6816
btrfs_remove_ordered_extent(inode, ordered);
6817
btrfs_put_ordered_extent(ordered);
6818
btrfs_put_ordered_extent(ordered);
6819
}
6820
}
6821
inode_tree_del(inode);
6822
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6823
free:
6824
btrfs_remove_delayed_node(inode);
6825
call_rcu(&inode->i_rcu, btrfs_i_callback);
6826
}
6827
6828
int btrfs_drop_inode(struct inode *inode)
6829
{
6830
struct btrfs_root *root = BTRFS_I(inode)->root;
6831
6832
if (btrfs_root_refs(&root->root_item) == 0 &&
6833
!is_free_space_inode(root, inode))
6834
return 1;
6835
else
6836
return generic_drop_inode(inode);
6837
}
6838
6839
static void init_once(void *foo)
6840
{
6841
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
6842
6843
inode_init_once(&ei->vfs_inode);
6844
}
6845
6846
void btrfs_destroy_cachep(void)
6847
{
6848
if (btrfs_inode_cachep)
6849
kmem_cache_destroy(btrfs_inode_cachep);
6850
if (btrfs_trans_handle_cachep)
6851
kmem_cache_destroy(btrfs_trans_handle_cachep);
6852
if (btrfs_transaction_cachep)
6853
kmem_cache_destroy(btrfs_transaction_cachep);
6854
if (btrfs_path_cachep)
6855
kmem_cache_destroy(btrfs_path_cachep);
6856
if (btrfs_free_space_cachep)
6857
kmem_cache_destroy(btrfs_free_space_cachep);
6858
}
6859
6860
int btrfs_init_cachep(void)
6861
{
6862
btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
6863
sizeof(struct btrfs_inode), 0,
6864
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
6865
if (!btrfs_inode_cachep)
6866
goto fail;
6867
6868
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
6869
sizeof(struct btrfs_trans_handle), 0,
6870
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6871
if (!btrfs_trans_handle_cachep)
6872
goto fail;
6873
6874
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
6875
sizeof(struct btrfs_transaction), 0,
6876
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6877
if (!btrfs_transaction_cachep)
6878
goto fail;
6879
6880
btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
6881
sizeof(struct btrfs_path), 0,
6882
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6883
if (!btrfs_path_cachep)
6884
goto fail;
6885
6886
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
6887
sizeof(struct btrfs_free_space), 0,
6888
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6889
if (!btrfs_free_space_cachep)
6890
goto fail;
6891
6892
return 0;
6893
fail:
6894
btrfs_destroy_cachep();
6895
return -ENOMEM;
6896
}
6897
6898
static int btrfs_getattr(struct vfsmount *mnt,
6899
struct dentry *dentry, struct kstat *stat)
6900
{
6901
struct inode *inode = dentry->d_inode;
6902
generic_fillattr(inode, stat);
6903
stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
6904
stat->blksize = PAGE_CACHE_SIZE;
6905
stat->blocks = (inode_get_bytes(inode) +
6906
BTRFS_I(inode)->delalloc_bytes) >> 9;
6907
return 0;
6908
}
6909
6910
/*
6911
* If a file is moved, it will inherit the cow and compression flags of the new
6912
* directory.
6913
*/
6914
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
6915
{
6916
struct btrfs_inode *b_dir = BTRFS_I(dir);
6917
struct btrfs_inode *b_inode = BTRFS_I(inode);
6918
6919
if (b_dir->flags & BTRFS_INODE_NODATACOW)
6920
b_inode->flags |= BTRFS_INODE_NODATACOW;
6921
else
6922
b_inode->flags &= ~BTRFS_INODE_NODATACOW;
6923
6924
if (b_dir->flags & BTRFS_INODE_COMPRESS)
6925
b_inode->flags |= BTRFS_INODE_COMPRESS;
6926
else
6927
b_inode->flags &= ~BTRFS_INODE_COMPRESS;
6928
}
6929
6930
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6931
struct inode *new_dir, struct dentry *new_dentry)
6932
{
6933
struct btrfs_trans_handle *trans;
6934
struct btrfs_root *root = BTRFS_I(old_dir)->root;
6935
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
6936
struct inode *new_inode = new_dentry->d_inode;
6937
struct inode *old_inode = old_dentry->d_inode;
6938
struct timespec ctime = CURRENT_TIME;
6939
u64 index = 0;
6940
u64 root_objectid;
6941
int ret;
6942
u64 old_ino = btrfs_ino(old_inode);
6943
6944
if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6945
return -EPERM;
6946
6947
/* we only allow rename subvolume link between subvolumes */
6948
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
6949
return -EXDEV;
6950
6951
if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
6952
(new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
6953
return -ENOTEMPTY;
6954
6955
if (S_ISDIR(old_inode->i_mode) && new_inode &&
6956
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
6957
return -ENOTEMPTY;
6958
/*
6959
* we're using rename to replace one file with another.
6960
* and the replacement file is large. Start IO on it now so
6961
* we don't add too much work to the end of the transaction
6962
*/
6963
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
6964
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
6965
filemap_flush(old_inode->i_mapping);
6966
6967
/* close the racy window with snapshot create/destroy ioctl */
6968
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
6969
down_read(&root->fs_info->subvol_sem);
6970
/*
6971
* We want to reserve the absolute worst case amount of items. So if
6972
* both inodes are subvols and we need to unlink them then that would
6973
* require 4 item modifications, but if they are both normal inodes it
6974
* would require 5 item modifications, so we'll assume their normal
6975
* inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
6976
* should cover the worst case number of items we'll modify.
6977
*/
6978
trans = btrfs_start_transaction(root, 20);
6979
if (IS_ERR(trans)) {
6980
ret = PTR_ERR(trans);
6981
goto out_notrans;
6982
}
6983
6984
if (dest != root)
6985
btrfs_record_root_in_trans(trans, dest);
6986
6987
ret = btrfs_set_inode_index(new_dir, &index);
6988
if (ret)
6989
goto out_fail;
6990
6991
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
6992
/* force full log commit if subvolume involved. */
6993
root->fs_info->last_trans_log_full_commit = trans->transid;
6994
} else {
6995
ret = btrfs_insert_inode_ref(trans, dest,
6996
new_dentry->d_name.name,
6997
new_dentry->d_name.len,
6998
old_ino,
6999
btrfs_ino(new_dir), index);
7000
if (ret)
7001
goto out_fail;
7002
/*
7003
* this is an ugly little race, but the rename is required
7004
* to make sure that if we crash, the inode is either at the
7005
* old name or the new one. pinning the log transaction lets
7006
* us make sure we don't allow a log commit to come in after
7007
* we unlink the name but before we add the new name back in.
7008
*/
7009
btrfs_pin_log_trans(root);
7010
}
7011
/*
7012
* make sure the inode gets flushed if it is replacing
7013
* something.
7014
*/
7015
if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7016
btrfs_add_ordered_operation(trans, root, old_inode);
7017
7018
old_dir->i_ctime = old_dir->i_mtime = ctime;
7019
new_dir->i_ctime = new_dir->i_mtime = ctime;
7020
old_inode->i_ctime = ctime;
7021
7022
if (old_dentry->d_parent != new_dentry->d_parent)
7023
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7024
7025
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7026
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7027
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7028
old_dentry->d_name.name,
7029
old_dentry->d_name.len);
7030
} else {
7031
ret = __btrfs_unlink_inode(trans, root, old_dir,
7032
old_dentry->d_inode,
7033
old_dentry->d_name.name,
7034
old_dentry->d_name.len);
7035
if (!ret)
7036
ret = btrfs_update_inode(trans, root, old_inode);
7037
}
7038
BUG_ON(ret);
7039
7040
if (new_inode) {
7041
new_inode->i_ctime = CURRENT_TIME;
7042
if (unlikely(btrfs_ino(new_inode) ==
7043
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7044
root_objectid = BTRFS_I(new_inode)->location.objectid;
7045
ret = btrfs_unlink_subvol(trans, dest, new_dir,
7046
root_objectid,
7047
new_dentry->d_name.name,
7048
new_dentry->d_name.len);
7049
BUG_ON(new_inode->i_nlink == 0);
7050
} else {
7051
ret = btrfs_unlink_inode(trans, dest, new_dir,
7052
new_dentry->d_inode,
7053
new_dentry->d_name.name,
7054
new_dentry->d_name.len);
7055
}
7056
BUG_ON(ret);
7057
if (new_inode->i_nlink == 0) {
7058
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7059
BUG_ON(ret);
7060
}
7061
}
7062
7063
fixup_inode_flags(new_dir, old_inode);
7064
7065
ret = btrfs_add_link(trans, new_dir, old_inode,
7066
new_dentry->d_name.name,
7067
new_dentry->d_name.len, 0, index);
7068
BUG_ON(ret);
7069
7070
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7071
struct dentry *parent = dget_parent(new_dentry);
7072
btrfs_log_new_name(trans, old_inode, old_dir, parent);
7073
dput(parent);
7074
btrfs_end_log_trans(root);
7075
}
7076
out_fail:
7077
btrfs_end_transaction_throttle(trans, root);
7078
out_notrans:
7079
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7080
up_read(&root->fs_info->subvol_sem);
7081
7082
return ret;
7083
}
7084
7085
/*
7086
* some fairly slow code that needs optimization. This walks the list
7087
* of all the inodes with pending delalloc and forces them to disk.
7088
*/
7089
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7090
{
7091
struct list_head *head = &root->fs_info->delalloc_inodes;
7092
struct btrfs_inode *binode;
7093
struct inode *inode;
7094
7095
if (root->fs_info->sb->s_flags & MS_RDONLY)
7096
return -EROFS;
7097
7098
spin_lock(&root->fs_info->delalloc_lock);
7099
while (!list_empty(head)) {
7100
binode = list_entry(head->next, struct btrfs_inode,
7101
delalloc_inodes);
7102
inode = igrab(&binode->vfs_inode);
7103
if (!inode)
7104
list_del_init(&binode->delalloc_inodes);
7105
spin_unlock(&root->fs_info->delalloc_lock);
7106
if (inode) {
7107
filemap_flush(inode->i_mapping);
7108
if (delay_iput)
7109
btrfs_add_delayed_iput(inode);
7110
else
7111
iput(inode);
7112
}
7113
cond_resched();
7114
spin_lock(&root->fs_info->delalloc_lock);
7115
}
7116
spin_unlock(&root->fs_info->delalloc_lock);
7117
7118
/* the filemap_flush will queue IO into the worker threads, but
7119
* we have to make sure the IO is actually started and that
7120
* ordered extents get created before we return
7121
*/
7122
atomic_inc(&root->fs_info->async_submit_draining);
7123
while (atomic_read(&root->fs_info->nr_async_submits) ||
7124
atomic_read(&root->fs_info->async_delalloc_pages)) {
7125
wait_event(root->fs_info->async_submit_wait,
7126
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7127
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7128
}
7129
atomic_dec(&root->fs_info->async_submit_draining);
7130
return 0;
7131
}
7132
7133
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7134
const char *symname)
7135
{
7136
struct btrfs_trans_handle *trans;
7137
struct btrfs_root *root = BTRFS_I(dir)->root;
7138
struct btrfs_path *path;
7139
struct btrfs_key key;
7140
struct inode *inode = NULL;
7141
int err;
7142
int drop_inode = 0;
7143
u64 objectid;
7144
u64 index = 0 ;
7145
int name_len;
7146
int datasize;
7147
unsigned long ptr;
7148
struct btrfs_file_extent_item *ei;
7149
struct extent_buffer *leaf;
7150
unsigned long nr = 0;
7151
7152
name_len = strlen(symname) + 1;
7153
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7154
return -ENAMETOOLONG;
7155
7156
/*
7157
* 2 items for inode item and ref
7158
* 2 items for dir items
7159
* 1 item for xattr if selinux is on
7160
*/
7161
trans = btrfs_start_transaction(root, 5);
7162
if (IS_ERR(trans))
7163
return PTR_ERR(trans);
7164
7165
err = btrfs_find_free_ino(root, &objectid);
7166
if (err)
7167
goto out_unlock;
7168
7169
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7170
dentry->d_name.len, btrfs_ino(dir), objectid,
7171
S_IFLNK|S_IRWXUGO, &index);
7172
if (IS_ERR(inode)) {
7173
err = PTR_ERR(inode);
7174
goto out_unlock;
7175
}
7176
7177
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7178
if (err) {
7179
drop_inode = 1;
7180
goto out_unlock;
7181
}
7182
7183
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7184
if (err)
7185
drop_inode = 1;
7186
else {
7187
inode->i_mapping->a_ops = &btrfs_aops;
7188
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7189
inode->i_fop = &btrfs_file_operations;
7190
inode->i_op = &btrfs_file_inode_operations;
7191
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7192
}
7193
if (drop_inode)
7194
goto out_unlock;
7195
7196
path = btrfs_alloc_path();
7197
BUG_ON(!path);
7198
key.objectid = btrfs_ino(inode);
7199
key.offset = 0;
7200
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7201
datasize = btrfs_file_extent_calc_inline_size(name_len);
7202
err = btrfs_insert_empty_item(trans, root, path, &key,
7203
datasize);
7204
if (err) {
7205
drop_inode = 1;
7206
btrfs_free_path(path);
7207
goto out_unlock;
7208
}
7209
leaf = path->nodes[0];
7210
ei = btrfs_item_ptr(leaf, path->slots[0],
7211
struct btrfs_file_extent_item);
7212
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7213
btrfs_set_file_extent_type(leaf, ei,
7214
BTRFS_FILE_EXTENT_INLINE);
7215
btrfs_set_file_extent_encryption(leaf, ei, 0);
7216
btrfs_set_file_extent_compression(leaf, ei, 0);
7217
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7218
btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7219
7220
ptr = btrfs_file_extent_inline_start(ei);
7221
write_extent_buffer(leaf, symname, ptr, name_len);
7222
btrfs_mark_buffer_dirty(leaf);
7223
btrfs_free_path(path);
7224
7225
inode->i_op = &btrfs_symlink_inode_operations;
7226
inode->i_mapping->a_ops = &btrfs_symlink_aops;
7227
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7228
inode_set_bytes(inode, name_len);
7229
btrfs_i_size_write(inode, name_len - 1);
7230
err = btrfs_update_inode(trans, root, inode);
7231
if (err)
7232
drop_inode = 1;
7233
7234
out_unlock:
7235
nr = trans->blocks_used;
7236
btrfs_end_transaction_throttle(trans, root);
7237
if (drop_inode) {
7238
inode_dec_link_count(inode);
7239
iput(inode);
7240
}
7241
btrfs_btree_balance_dirty(root, nr);
7242
return err;
7243
}
7244
7245
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7246
u64 start, u64 num_bytes, u64 min_size,
7247
loff_t actual_len, u64 *alloc_hint,
7248
struct btrfs_trans_handle *trans)
7249
{
7250
struct btrfs_root *root = BTRFS_I(inode)->root;
7251
struct btrfs_key ins;
7252
u64 cur_offset = start;
7253
u64 i_size;
7254
int ret = 0;
7255
bool own_trans = true;
7256
7257
if (trans)
7258
own_trans = false;
7259
while (num_bytes > 0) {
7260
if (own_trans) {
7261
trans = btrfs_start_transaction(root, 3);
7262
if (IS_ERR(trans)) {
7263
ret = PTR_ERR(trans);
7264
break;
7265
}
7266
}
7267
7268
ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7269
0, *alloc_hint, (u64)-1, &ins, 1);
7270
if (ret) {
7271
if (own_trans)
7272
btrfs_end_transaction(trans, root);
7273
break;
7274
}
7275
7276
ret = insert_reserved_file_extent(trans, inode,
7277
cur_offset, ins.objectid,
7278
ins.offset, ins.offset,
7279
ins.offset, 0, 0, 0,
7280
BTRFS_FILE_EXTENT_PREALLOC);
7281
BUG_ON(ret);
7282
btrfs_drop_extent_cache(inode, cur_offset,
7283
cur_offset + ins.offset -1, 0);
7284
7285
num_bytes -= ins.offset;
7286
cur_offset += ins.offset;
7287
*alloc_hint = ins.objectid + ins.offset;
7288
7289
inode->i_ctime = CURRENT_TIME;
7290
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7291
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7292
(actual_len > inode->i_size) &&
7293
(cur_offset > inode->i_size)) {
7294
if (cur_offset > actual_len)
7295
i_size = actual_len;
7296
else
7297
i_size = cur_offset;
7298
i_size_write(inode, i_size);
7299
btrfs_ordered_update_i_size(inode, i_size, NULL);
7300
}
7301
7302
ret = btrfs_update_inode(trans, root, inode);
7303
BUG_ON(ret);
7304
7305
if (own_trans)
7306
btrfs_end_transaction(trans, root);
7307
}
7308
return ret;
7309
}
7310
7311
int btrfs_prealloc_file_range(struct inode *inode, int mode,
7312
u64 start, u64 num_bytes, u64 min_size,
7313
loff_t actual_len, u64 *alloc_hint)
7314
{
7315
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7316
min_size, actual_len, alloc_hint,
7317
NULL);
7318
}
7319
7320
int btrfs_prealloc_file_range_trans(struct inode *inode,
7321
struct btrfs_trans_handle *trans, int mode,
7322
u64 start, u64 num_bytes, u64 min_size,
7323
loff_t actual_len, u64 *alloc_hint)
7324
{
7325
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7326
min_size, actual_len, alloc_hint, trans);
7327
}
7328
7329
static int btrfs_set_page_dirty(struct page *page)
7330
{
7331
return __set_page_dirty_nobuffers(page);
7332
}
7333
7334
static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
7335
{
7336
struct btrfs_root *root = BTRFS_I(inode)->root;
7337
7338
if (btrfs_root_readonly(root) && (mask & MAY_WRITE))
7339
return -EROFS;
7340
if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
7341
return -EACCES;
7342
return generic_permission(inode, mask, flags, btrfs_check_acl);
7343
}
7344
7345
static const struct inode_operations btrfs_dir_inode_operations = {
7346
.getattr = btrfs_getattr,
7347
.lookup = btrfs_lookup,
7348
.create = btrfs_create,
7349
.unlink = btrfs_unlink,
7350
.link = btrfs_link,
7351
.mkdir = btrfs_mkdir,
7352
.rmdir = btrfs_rmdir,
7353
.rename = btrfs_rename,
7354
.symlink = btrfs_symlink,
7355
.setattr = btrfs_setattr,
7356
.mknod = btrfs_mknod,
7357
.setxattr = btrfs_setxattr,
7358
.getxattr = btrfs_getxattr,
7359
.listxattr = btrfs_listxattr,
7360
.removexattr = btrfs_removexattr,
7361
.permission = btrfs_permission,
7362
};
7363
static const struct inode_operations btrfs_dir_ro_inode_operations = {
7364
.lookup = btrfs_lookup,
7365
.permission = btrfs_permission,
7366
};
7367
7368
static const struct file_operations btrfs_dir_file_operations = {
7369
.llseek = generic_file_llseek,
7370
.read = generic_read_dir,
7371
.readdir = btrfs_real_readdir,
7372
.unlocked_ioctl = btrfs_ioctl,
7373
#ifdef CONFIG_COMPAT
7374
.compat_ioctl = btrfs_ioctl,
7375
#endif
7376
.release = btrfs_release_file,
7377
.fsync = btrfs_sync_file,
7378
};
7379
7380
static struct extent_io_ops btrfs_extent_io_ops = {
7381
.fill_delalloc = run_delalloc_range,
7382
.submit_bio_hook = btrfs_submit_bio_hook,
7383
.merge_bio_hook = btrfs_merge_bio_hook,
7384
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7385
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7386
.writepage_start_hook = btrfs_writepage_start_hook,
7387
.readpage_io_failed_hook = btrfs_io_failed_hook,
7388
.set_bit_hook = btrfs_set_bit_hook,
7389
.clear_bit_hook = btrfs_clear_bit_hook,
7390
.merge_extent_hook = btrfs_merge_extent_hook,
7391
.split_extent_hook = btrfs_split_extent_hook,
7392
};
7393
7394
/*
7395
* btrfs doesn't support the bmap operation because swapfiles
7396
* use bmap to make a mapping of extents in the file. They assume
7397
* these extents won't change over the life of the file and they
7398
* use the bmap result to do IO directly to the drive.
7399
*
7400
* the btrfs bmap call would return logical addresses that aren't
7401
* suitable for IO and they also will change frequently as COW
7402
* operations happen. So, swapfile + btrfs == corruption.
7403
*
7404
* For now we're avoiding this by dropping bmap.
7405
*/
7406
static const struct address_space_operations btrfs_aops = {
7407
.readpage = btrfs_readpage,
7408
.writepage = btrfs_writepage,
7409
.writepages = btrfs_writepages,
7410
.readpages = btrfs_readpages,
7411
.direct_IO = btrfs_direct_IO,
7412
.invalidatepage = btrfs_invalidatepage,
7413
.releasepage = btrfs_releasepage,
7414
.set_page_dirty = btrfs_set_page_dirty,
7415
.error_remove_page = generic_error_remove_page,
7416
};
7417
7418
static const struct address_space_operations btrfs_symlink_aops = {
7419
.readpage = btrfs_readpage,
7420
.writepage = btrfs_writepage,
7421
.invalidatepage = btrfs_invalidatepage,
7422
.releasepage = btrfs_releasepage,
7423
};
7424
7425
static const struct inode_operations btrfs_file_inode_operations = {
7426
.getattr = btrfs_getattr,
7427
.setattr = btrfs_setattr,
7428
.setxattr = btrfs_setxattr,
7429
.getxattr = btrfs_getxattr,
7430
.listxattr = btrfs_listxattr,
7431
.removexattr = btrfs_removexattr,
7432
.permission = btrfs_permission,
7433
.fiemap = btrfs_fiemap,
7434
};
7435
static const struct inode_operations btrfs_special_inode_operations = {
7436
.getattr = btrfs_getattr,
7437
.setattr = btrfs_setattr,
7438
.permission = btrfs_permission,
7439
.setxattr = btrfs_setxattr,
7440
.getxattr = btrfs_getxattr,
7441
.listxattr = btrfs_listxattr,
7442
.removexattr = btrfs_removexattr,
7443
};
7444
static const struct inode_operations btrfs_symlink_inode_operations = {
7445
.readlink = generic_readlink,
7446
.follow_link = page_follow_link_light,
7447
.put_link = page_put_link,
7448
.getattr = btrfs_getattr,
7449
.permission = btrfs_permission,
7450
.setxattr = btrfs_setxattr,
7451
.getxattr = btrfs_getxattr,
7452
.listxattr = btrfs_listxattr,
7453
.removexattr = btrfs_removexattr,
7454
};
7455
7456
const struct dentry_operations btrfs_dentry_operations = {
7457
.d_delete = btrfs_dentry_delete,
7458
};
7459
7460