Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/affs/file.c
26289 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/fs/affs/file.c
4
*
5
* (c) 1996 Hans-Joachim Widmaier - Rewritten
6
*
7
* (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
8
*
9
* (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
10
*
11
* (C) 1991 Linus Torvalds - minix filesystem
12
*
13
* affs regular file handling primitives
14
*/
15
16
#include <linux/uio.h>
17
#include <linux/blkdev.h>
18
#include <linux/mpage.h>
19
#include "affs.h"
20
21
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
22
23
static int
24
affs_file_open(struct inode *inode, struct file *filp)
25
{
26
pr_debug("open(%lu,%d)\n",
27
inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
28
atomic_inc(&AFFS_I(inode)->i_opencnt);
29
return 0;
30
}
31
32
static int
33
affs_file_release(struct inode *inode, struct file *filp)
34
{
35
pr_debug("release(%lu, %d)\n",
36
inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
37
38
if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
39
inode_lock(inode);
40
if (inode->i_size != AFFS_I(inode)->mmu_private)
41
affs_truncate(inode);
42
affs_free_prealloc(inode);
43
inode_unlock(inode);
44
}
45
46
return 0;
47
}
48
49
static int
50
affs_grow_extcache(struct inode *inode, u32 lc_idx)
51
{
52
struct super_block *sb = inode->i_sb;
53
struct buffer_head *bh;
54
u32 lc_max;
55
int i, j, key;
56
57
if (!AFFS_I(inode)->i_lc) {
58
char *ptr = (char *)get_zeroed_page(GFP_NOFS);
59
if (!ptr)
60
return -ENOMEM;
61
AFFS_I(inode)->i_lc = (u32 *)ptr;
62
AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
63
}
64
65
lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
66
67
if (AFFS_I(inode)->i_extcnt > lc_max) {
68
u32 lc_shift, lc_mask, tmp, off;
69
70
/* need to recalculate linear cache, start from old size */
71
lc_shift = AFFS_I(inode)->i_lc_shift;
72
tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
73
for (; tmp; tmp >>= 1)
74
lc_shift++;
75
lc_mask = (1 << lc_shift) - 1;
76
77
/* fix idx and old size to new shift */
78
lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
79
AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
80
81
/* first shrink old cache to make more space */
82
off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
83
for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
84
AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
85
86
AFFS_I(inode)->i_lc_shift = lc_shift;
87
AFFS_I(inode)->i_lc_mask = lc_mask;
88
}
89
90
/* fill cache to the needed index */
91
i = AFFS_I(inode)->i_lc_size;
92
AFFS_I(inode)->i_lc_size = lc_idx + 1;
93
for (; i <= lc_idx; i++) {
94
if (!i) {
95
AFFS_I(inode)->i_lc[0] = inode->i_ino;
96
continue;
97
}
98
key = AFFS_I(inode)->i_lc[i - 1];
99
j = AFFS_I(inode)->i_lc_mask + 1;
100
// unlock cache
101
for (; j > 0; j--) {
102
bh = affs_bread(sb, key);
103
if (!bh)
104
goto err;
105
key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
106
affs_brelse(bh);
107
}
108
// lock cache
109
AFFS_I(inode)->i_lc[i] = key;
110
}
111
112
return 0;
113
114
err:
115
// lock cache
116
return -EIO;
117
}
118
119
static struct buffer_head *
120
affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
121
{
122
struct super_block *sb = inode->i_sb;
123
struct buffer_head *new_bh;
124
u32 blocknr, tmp;
125
126
blocknr = affs_alloc_block(inode, bh->b_blocknr);
127
if (!blocknr)
128
return ERR_PTR(-ENOSPC);
129
130
new_bh = affs_getzeroblk(sb, blocknr);
131
if (!new_bh) {
132
affs_free_block(sb, blocknr);
133
return ERR_PTR(-EIO);
134
}
135
136
AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
137
AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
138
AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
139
AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
140
affs_fix_checksum(sb, new_bh);
141
142
mark_buffer_dirty_inode(new_bh, inode);
143
144
tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
145
if (tmp)
146
affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
147
AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
148
affs_adjust_checksum(bh, blocknr - tmp);
149
mark_buffer_dirty_inode(bh, inode);
150
151
AFFS_I(inode)->i_extcnt++;
152
mark_inode_dirty(inode);
153
154
return new_bh;
155
}
156
157
static inline struct buffer_head *
158
affs_get_extblock(struct inode *inode, u32 ext)
159
{
160
/* inline the simplest case: same extended block as last time */
161
struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
162
if (ext == AFFS_I(inode)->i_ext_last)
163
get_bh(bh);
164
else
165
/* we have to do more (not inlined) */
166
bh = affs_get_extblock_slow(inode, ext);
167
168
return bh;
169
}
170
171
static struct buffer_head *
172
affs_get_extblock_slow(struct inode *inode, u32 ext)
173
{
174
struct super_block *sb = inode->i_sb;
175
struct buffer_head *bh;
176
u32 ext_key;
177
u32 lc_idx, lc_off, ac_idx;
178
u32 tmp, idx;
179
180
if (ext == AFFS_I(inode)->i_ext_last + 1) {
181
/* read the next extended block from the current one */
182
bh = AFFS_I(inode)->i_ext_bh;
183
ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
184
if (ext < AFFS_I(inode)->i_extcnt)
185
goto read_ext;
186
BUG_ON(ext > AFFS_I(inode)->i_extcnt);
187
bh = affs_alloc_extblock(inode, bh, ext);
188
if (IS_ERR(bh))
189
return bh;
190
goto store_ext;
191
}
192
193
if (ext == 0) {
194
/* we seek back to the file header block */
195
ext_key = inode->i_ino;
196
goto read_ext;
197
}
198
199
if (ext >= AFFS_I(inode)->i_extcnt) {
200
struct buffer_head *prev_bh;
201
202
/* allocate a new extended block */
203
BUG_ON(ext > AFFS_I(inode)->i_extcnt);
204
205
/* get previous extended block */
206
prev_bh = affs_get_extblock(inode, ext - 1);
207
if (IS_ERR(prev_bh))
208
return prev_bh;
209
bh = affs_alloc_extblock(inode, prev_bh, ext);
210
affs_brelse(prev_bh);
211
if (IS_ERR(bh))
212
return bh;
213
goto store_ext;
214
}
215
216
again:
217
/* check if there is an extended cache and whether it's large enough */
218
lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
219
lc_off = ext & AFFS_I(inode)->i_lc_mask;
220
221
if (lc_idx >= AFFS_I(inode)->i_lc_size) {
222
int err;
223
224
err = affs_grow_extcache(inode, lc_idx);
225
if (err)
226
return ERR_PTR(err);
227
goto again;
228
}
229
230
/* every n'th key we find in the linear cache */
231
if (!lc_off) {
232
ext_key = AFFS_I(inode)->i_lc[lc_idx];
233
goto read_ext;
234
}
235
236
/* maybe it's still in the associative cache */
237
ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
238
if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
239
ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
240
goto read_ext;
241
}
242
243
/* try to find one of the previous extended blocks */
244
tmp = ext;
245
idx = ac_idx;
246
while (--tmp, --lc_off > 0) {
247
idx = (idx - 1) & AFFS_AC_MASK;
248
if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
249
ext_key = AFFS_I(inode)->i_ac[idx].key;
250
goto find_ext;
251
}
252
}
253
254
/* fall back to the linear cache */
255
ext_key = AFFS_I(inode)->i_lc[lc_idx];
256
find_ext:
257
/* read all extended blocks until we find the one we need */
258
//unlock cache
259
do {
260
bh = affs_bread(sb, ext_key);
261
if (!bh)
262
goto err_bread;
263
ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
264
affs_brelse(bh);
265
tmp++;
266
} while (tmp < ext);
267
//lock cache
268
269
/* store it in the associative cache */
270
// recalculate ac_idx?
271
AFFS_I(inode)->i_ac[ac_idx].ext = ext;
272
AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
273
274
read_ext:
275
/* finally read the right extended block */
276
//unlock cache
277
bh = affs_bread(sb, ext_key);
278
if (!bh)
279
goto err_bread;
280
//lock cache
281
282
store_ext:
283
/* release old cached extended block and store the new one */
284
affs_brelse(AFFS_I(inode)->i_ext_bh);
285
AFFS_I(inode)->i_ext_last = ext;
286
AFFS_I(inode)->i_ext_bh = bh;
287
get_bh(bh);
288
289
return bh;
290
291
err_bread:
292
affs_brelse(bh);
293
return ERR_PTR(-EIO);
294
}
295
296
static int
297
affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
298
{
299
struct super_block *sb = inode->i_sb;
300
struct buffer_head *ext_bh;
301
u32 ext;
302
303
pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino,
304
(unsigned long long)block);
305
306
BUG_ON(block > (sector_t)0x7fffffffUL);
307
308
if (block >= AFFS_I(inode)->i_blkcnt) {
309
if (block > AFFS_I(inode)->i_blkcnt || !create)
310
goto err_big;
311
} else
312
create = 0;
313
314
//lock cache
315
affs_lock_ext(inode);
316
317
ext = (u32)block / AFFS_SB(sb)->s_hashsize;
318
block -= ext * AFFS_SB(sb)->s_hashsize;
319
ext_bh = affs_get_extblock(inode, ext);
320
if (IS_ERR(ext_bh))
321
goto err_ext;
322
map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
323
324
if (create) {
325
u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
326
if (!blocknr)
327
goto err_alloc;
328
set_buffer_new(bh_result);
329
AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
330
AFFS_I(inode)->i_blkcnt++;
331
332
/* store new block */
333
if (bh_result->b_blocknr)
334
affs_warning(sb, "get_block",
335
"block already set (%llx)",
336
(unsigned long long)bh_result->b_blocknr);
337
AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
338
AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
339
affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
340
bh_result->b_blocknr = blocknr;
341
342
if (!block) {
343
/* insert first block into header block */
344
u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
345
if (tmp)
346
affs_warning(sb, "get_block", "first block already set (%d)", tmp);
347
AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
348
affs_adjust_checksum(ext_bh, blocknr - tmp);
349
}
350
}
351
352
affs_brelse(ext_bh);
353
//unlock cache
354
affs_unlock_ext(inode);
355
return 0;
356
357
err_big:
358
affs_error(inode->i_sb, "get_block", "strange block request %llu",
359
(unsigned long long)block);
360
return -EIO;
361
err_ext:
362
// unlock cache
363
affs_unlock_ext(inode);
364
return PTR_ERR(ext_bh);
365
err_alloc:
366
brelse(ext_bh);
367
clear_buffer_mapped(bh_result);
368
bh_result->b_bdev = NULL;
369
// unlock cache
370
affs_unlock_ext(inode);
371
return -ENOSPC;
372
}
373
374
static int affs_writepages(struct address_space *mapping,
375
struct writeback_control *wbc)
376
{
377
return mpage_writepages(mapping, wbc, affs_get_block);
378
}
379
380
static int affs_read_folio(struct file *file, struct folio *folio)
381
{
382
return block_read_full_folio(folio, affs_get_block);
383
}
384
385
static void affs_write_failed(struct address_space *mapping, loff_t to)
386
{
387
struct inode *inode = mapping->host;
388
389
if (to > inode->i_size) {
390
truncate_pagecache(inode, inode->i_size);
391
affs_truncate(inode);
392
}
393
}
394
395
static ssize_t
396
affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
397
{
398
struct file *file = iocb->ki_filp;
399
struct address_space *mapping = file->f_mapping;
400
struct inode *inode = mapping->host;
401
size_t count = iov_iter_count(iter);
402
loff_t offset = iocb->ki_pos;
403
ssize_t ret;
404
405
if (iov_iter_rw(iter) == WRITE) {
406
loff_t size = offset + count;
407
408
if (AFFS_I(inode)->mmu_private < size)
409
return 0;
410
}
411
412
ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
413
if (ret < 0 && iov_iter_rw(iter) == WRITE)
414
affs_write_failed(mapping, offset + count);
415
return ret;
416
}
417
418
static int affs_write_begin(const struct kiocb *iocb,
419
struct address_space *mapping,
420
loff_t pos, unsigned len,
421
struct folio **foliop, void **fsdata)
422
{
423
int ret;
424
425
ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
426
affs_get_block,
427
&AFFS_I(mapping->host)->mmu_private);
428
if (unlikely(ret))
429
affs_write_failed(mapping, pos + len);
430
431
return ret;
432
}
433
434
static int affs_write_end(const struct kiocb *iocb,
435
struct address_space *mapping, loff_t pos,
436
unsigned int len, unsigned int copied,
437
struct folio *folio, void *fsdata)
438
{
439
struct inode *inode = mapping->host;
440
int ret;
441
442
ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
443
444
/* Clear Archived bit on file writes, as AmigaOS would do */
445
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
446
AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
447
mark_inode_dirty(inode);
448
}
449
450
return ret;
451
}
452
453
static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
454
{
455
return generic_block_bmap(mapping,block,affs_get_block);
456
}
457
458
const struct address_space_operations affs_aops = {
459
.dirty_folio = block_dirty_folio,
460
.invalidate_folio = block_invalidate_folio,
461
.read_folio = affs_read_folio,
462
.writepages = affs_writepages,
463
.write_begin = affs_write_begin,
464
.write_end = affs_write_end,
465
.direct_IO = affs_direct_IO,
466
.migrate_folio = buffer_migrate_folio,
467
.bmap = _affs_bmap
468
};
469
470
static inline struct buffer_head *
471
affs_bread_ino(struct inode *inode, int block, int create)
472
{
473
struct buffer_head *bh, tmp_bh;
474
int err;
475
476
tmp_bh.b_state = 0;
477
err = affs_get_block(inode, block, &tmp_bh, create);
478
if (!err) {
479
bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
480
if (bh) {
481
bh->b_state |= tmp_bh.b_state;
482
return bh;
483
}
484
err = -EIO;
485
}
486
return ERR_PTR(err);
487
}
488
489
static inline struct buffer_head *
490
affs_getzeroblk_ino(struct inode *inode, int block)
491
{
492
struct buffer_head *bh, tmp_bh;
493
int err;
494
495
tmp_bh.b_state = 0;
496
err = affs_get_block(inode, block, &tmp_bh, 1);
497
if (!err) {
498
bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
499
if (bh) {
500
bh->b_state |= tmp_bh.b_state;
501
return bh;
502
}
503
err = -EIO;
504
}
505
return ERR_PTR(err);
506
}
507
508
static inline struct buffer_head *
509
affs_getemptyblk_ino(struct inode *inode, int block)
510
{
511
struct buffer_head *bh, tmp_bh;
512
int err;
513
514
tmp_bh.b_state = 0;
515
err = affs_get_block(inode, block, &tmp_bh, 1);
516
if (!err) {
517
bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
518
if (bh) {
519
bh->b_state |= tmp_bh.b_state;
520
return bh;
521
}
522
err = -EIO;
523
}
524
return ERR_PTR(err);
525
}
526
527
static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create)
528
{
529
struct inode *inode = folio->mapping->host;
530
struct super_block *sb = inode->i_sb;
531
struct buffer_head *bh;
532
size_t pos = 0;
533
size_t bidx, boff, bsize;
534
u32 tmp;
535
536
pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino,
537
folio->index, to);
538
BUG_ON(to > folio_size(folio));
539
bsize = AFFS_SB(sb)->s_data_blksize;
540
tmp = folio_pos(folio);
541
bidx = tmp / bsize;
542
boff = tmp % bsize;
543
544
while (pos < to) {
545
bh = affs_bread_ino(inode, bidx, create);
546
if (IS_ERR(bh))
547
return PTR_ERR(bh);
548
tmp = min(bsize - boff, to - pos);
549
BUG_ON(pos + tmp > to || tmp > bsize);
550
memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp);
551
affs_brelse(bh);
552
bidx++;
553
pos += tmp;
554
boff = 0;
555
}
556
return 0;
557
}
558
559
static int
560
affs_extent_file_ofs(struct inode *inode, u32 newsize)
561
{
562
struct super_block *sb = inode->i_sb;
563
struct buffer_head *bh, *prev_bh;
564
u32 bidx, boff;
565
u32 size, bsize;
566
u32 tmp;
567
568
pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize);
569
bsize = AFFS_SB(sb)->s_data_blksize;
570
bh = NULL;
571
size = AFFS_I(inode)->mmu_private;
572
bidx = size / bsize;
573
boff = size % bsize;
574
if (boff) {
575
bh = affs_bread_ino(inode, bidx, 0);
576
if (IS_ERR(bh))
577
return PTR_ERR(bh);
578
tmp = min(bsize - boff, newsize - size);
579
BUG_ON(boff + tmp > bsize || tmp > bsize);
580
memset(AFFS_DATA(bh) + boff, 0, tmp);
581
be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
582
affs_fix_checksum(sb, bh);
583
mark_buffer_dirty_inode(bh, inode);
584
size += tmp;
585
bidx++;
586
} else if (bidx) {
587
bh = affs_bread_ino(inode, bidx - 1, 0);
588
if (IS_ERR(bh))
589
return PTR_ERR(bh);
590
}
591
592
while (size < newsize) {
593
prev_bh = bh;
594
bh = affs_getzeroblk_ino(inode, bidx);
595
if (IS_ERR(bh))
596
goto out;
597
tmp = min(bsize, newsize - size);
598
BUG_ON(tmp > bsize);
599
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
600
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
601
AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
602
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
603
affs_fix_checksum(sb, bh);
604
bh->b_state &= ~(1UL << BH_New);
605
mark_buffer_dirty_inode(bh, inode);
606
if (prev_bh) {
607
u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
608
609
if (tmp_next)
610
affs_warning(sb, "extent_file_ofs",
611
"next block already set for %d (%d)",
612
bidx, tmp_next);
613
AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
614
affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
615
mark_buffer_dirty_inode(prev_bh, inode);
616
affs_brelse(prev_bh);
617
}
618
size += bsize;
619
bidx++;
620
}
621
affs_brelse(bh);
622
inode->i_size = AFFS_I(inode)->mmu_private = newsize;
623
return 0;
624
625
out:
626
inode->i_size = AFFS_I(inode)->mmu_private = newsize;
627
return PTR_ERR(bh);
628
}
629
630
static int affs_read_folio_ofs(struct file *file, struct folio *folio)
631
{
632
struct inode *inode = folio->mapping->host;
633
size_t to;
634
int err;
635
636
pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index);
637
to = folio_size(folio);
638
if (folio_pos(folio) + to > inode->i_size) {
639
to = inode->i_size - folio_pos(folio);
640
folio_zero_segment(folio, to, folio_size(folio));
641
}
642
643
err = affs_do_read_folio_ofs(folio, to, 0);
644
if (!err)
645
folio_mark_uptodate(folio);
646
folio_unlock(folio);
647
return err;
648
}
649
650
static int affs_write_begin_ofs(const struct kiocb *iocb,
651
struct address_space *mapping,
652
loff_t pos, unsigned len,
653
struct folio **foliop, void **fsdata)
654
{
655
struct inode *inode = mapping->host;
656
struct folio *folio;
657
pgoff_t index;
658
int err = 0;
659
660
pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
661
pos + len);
662
if (pos > AFFS_I(inode)->mmu_private) {
663
/* XXX: this probably leaves a too-big i_size in case of
664
* failure. Should really be updating i_size at write_end time
665
*/
666
err = affs_extent_file_ofs(inode, pos);
667
if (err)
668
return err;
669
}
670
671
index = pos >> PAGE_SHIFT;
672
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
673
mapping_gfp_mask(mapping));
674
if (IS_ERR(folio))
675
return PTR_ERR(folio);
676
*foliop = folio;
677
678
if (folio_test_uptodate(folio))
679
return 0;
680
681
/* XXX: inefficient but safe in the face of short writes */
682
err = affs_do_read_folio_ofs(folio, folio_size(folio), 1);
683
if (err) {
684
folio_unlock(folio);
685
folio_put(folio);
686
}
687
return err;
688
}
689
690
static int affs_write_end_ofs(const struct kiocb *iocb,
691
struct address_space *mapping,
692
loff_t pos, unsigned len, unsigned copied,
693
struct folio *folio, void *fsdata)
694
{
695
struct inode *inode = mapping->host;
696
struct super_block *sb = inode->i_sb;
697
struct buffer_head *bh, *prev_bh;
698
char *data;
699
u32 bidx, boff, bsize;
700
unsigned from, to;
701
u32 tmp;
702
int written;
703
704
from = pos & (PAGE_SIZE - 1);
705
to = from + len;
706
/*
707
* XXX: not sure if this can handle short copies (len < copied), but
708
* we don't have to, because the folio should always be uptodate here,
709
* due to write_begin.
710
*/
711
712
pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
713
pos + len);
714
bsize = AFFS_SB(sb)->s_data_blksize;
715
data = folio_address(folio);
716
717
bh = NULL;
718
written = 0;
719
tmp = (folio->index << PAGE_SHIFT) + from;
720
bidx = tmp / bsize;
721
boff = tmp % bsize;
722
if (boff) {
723
bh = affs_bread_ino(inode, bidx, 0);
724
if (IS_ERR(bh)) {
725
written = PTR_ERR(bh);
726
goto err_first_bh;
727
}
728
tmp = min(bsize - boff, to - from);
729
BUG_ON(boff + tmp > bsize || tmp > bsize);
730
memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
731
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
732
max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
733
affs_fix_checksum(sb, bh);
734
mark_buffer_dirty_inode(bh, inode);
735
written += tmp;
736
from += tmp;
737
bidx++;
738
} else if (bidx) {
739
bh = affs_bread_ino(inode, bidx - 1, 0);
740
if (IS_ERR(bh)) {
741
written = PTR_ERR(bh);
742
goto err_first_bh;
743
}
744
}
745
while (from + bsize <= to) {
746
prev_bh = bh;
747
bh = affs_getemptyblk_ino(inode, bidx);
748
if (IS_ERR(bh))
749
goto err_bh;
750
memcpy(AFFS_DATA(bh), data + from, bsize);
751
if (buffer_new(bh)) {
752
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
753
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
754
AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
755
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
756
AFFS_DATA_HEAD(bh)->next = 0;
757
bh->b_state &= ~(1UL << BH_New);
758
if (prev_bh) {
759
u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
760
761
if (tmp_next)
762
affs_warning(sb, "commit_write_ofs",
763
"next block already set for %d (%d)",
764
bidx, tmp_next);
765
AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
766
affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
767
mark_buffer_dirty_inode(prev_bh, inode);
768
}
769
}
770
affs_brelse(prev_bh);
771
affs_fix_checksum(sb, bh);
772
mark_buffer_dirty_inode(bh, inode);
773
written += bsize;
774
from += bsize;
775
bidx++;
776
}
777
if (from < to) {
778
prev_bh = bh;
779
bh = affs_bread_ino(inode, bidx, 1);
780
if (IS_ERR(bh))
781
goto err_bh;
782
tmp = min(bsize, to - from);
783
BUG_ON(tmp > bsize);
784
memcpy(AFFS_DATA(bh), data + from, tmp);
785
if (buffer_new(bh)) {
786
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
787
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
788
AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
789
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
790
AFFS_DATA_HEAD(bh)->next = 0;
791
bh->b_state &= ~(1UL << BH_New);
792
if (prev_bh) {
793
u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
794
795
if (tmp_next)
796
affs_warning(sb, "commit_write_ofs",
797
"next block already set for %d (%d)",
798
bidx, tmp_next);
799
AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
800
affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
801
mark_buffer_dirty_inode(prev_bh, inode);
802
}
803
} else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
804
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
805
affs_brelse(prev_bh);
806
affs_fix_checksum(sb, bh);
807
mark_buffer_dirty_inode(bh, inode);
808
written += tmp;
809
from += tmp;
810
bidx++;
811
}
812
folio_mark_uptodate(folio);
813
814
done:
815
affs_brelse(bh);
816
tmp = (folio->index << PAGE_SHIFT) + from;
817
if (tmp > inode->i_size)
818
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
819
820
/* Clear Archived bit on file writes, as AmigaOS would do */
821
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
822
AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
823
mark_inode_dirty(inode);
824
}
825
826
err_first_bh:
827
folio_unlock(folio);
828
folio_put(folio);
829
830
return written;
831
832
err_bh:
833
bh = prev_bh;
834
if (!written)
835
written = PTR_ERR(bh);
836
goto done;
837
}
838
839
const struct address_space_operations affs_aops_ofs = {
840
.dirty_folio = block_dirty_folio,
841
.invalidate_folio = block_invalidate_folio,
842
.read_folio = affs_read_folio_ofs,
843
//.writepages = affs_writepages_ofs,
844
.write_begin = affs_write_begin_ofs,
845
.write_end = affs_write_end_ofs,
846
.migrate_folio = filemap_migrate_folio,
847
};
848
849
/* Free any preallocated blocks. */
850
851
void
852
affs_free_prealloc(struct inode *inode)
853
{
854
struct super_block *sb = inode->i_sb;
855
856
pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
857
858
while (AFFS_I(inode)->i_pa_cnt) {
859
AFFS_I(inode)->i_pa_cnt--;
860
affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
861
}
862
}
863
864
/* Truncate (or enlarge) a file to the requested size. */
865
866
void
867
affs_truncate(struct inode *inode)
868
{
869
struct super_block *sb = inode->i_sb;
870
u32 ext, ext_key;
871
u32 last_blk, blkcnt, blk;
872
u32 size;
873
struct buffer_head *ext_bh;
874
int i;
875
876
pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
877
inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size);
878
879
last_blk = 0;
880
ext = 0;
881
if (inode->i_size) {
882
last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
883
ext = last_blk / AFFS_SB(sb)->s_hashsize;
884
}
885
886
if (inode->i_size > AFFS_I(inode)->mmu_private) {
887
struct address_space *mapping = inode->i_mapping;
888
struct folio *folio;
889
void *fsdata = NULL;
890
loff_t isize = inode->i_size;
891
int res;
892
893
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
894
if (!res)
895
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
896
else
897
inode->i_size = AFFS_I(inode)->mmu_private;
898
mark_inode_dirty(inode);
899
return;
900
} else if (inode->i_size == AFFS_I(inode)->mmu_private)
901
return;
902
903
// lock cache
904
ext_bh = affs_get_extblock(inode, ext);
905
if (IS_ERR(ext_bh)) {
906
affs_warning(sb, "truncate",
907
"unexpected read error for ext block %u (%ld)",
908
ext, PTR_ERR(ext_bh));
909
return;
910
}
911
if (AFFS_I(inode)->i_lc) {
912
/* clear linear cache */
913
i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
914
if (AFFS_I(inode)->i_lc_size > i) {
915
AFFS_I(inode)->i_lc_size = i;
916
for (; i < AFFS_LC_SIZE; i++)
917
AFFS_I(inode)->i_lc[i] = 0;
918
}
919
/* clear associative cache */
920
for (i = 0; i < AFFS_AC_SIZE; i++)
921
if (AFFS_I(inode)->i_ac[i].ext >= ext)
922
AFFS_I(inode)->i_ac[i].ext = 0;
923
}
924
ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
925
926
blkcnt = AFFS_I(inode)->i_blkcnt;
927
i = 0;
928
blk = last_blk;
929
if (inode->i_size) {
930
i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
931
blk++;
932
} else
933
AFFS_HEAD(ext_bh)->first_data = 0;
934
AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
935
size = AFFS_SB(sb)->s_hashsize;
936
if (size > blkcnt - blk + i)
937
size = blkcnt - blk + i;
938
for (; i < size; i++, blk++) {
939
affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
940
AFFS_BLOCK(sb, ext_bh, i) = 0;
941
}
942
AFFS_TAIL(sb, ext_bh)->extension = 0;
943
affs_fix_checksum(sb, ext_bh);
944
mark_buffer_dirty_inode(ext_bh, inode);
945
affs_brelse(ext_bh);
946
947
if (inode->i_size) {
948
AFFS_I(inode)->i_blkcnt = last_blk + 1;
949
AFFS_I(inode)->i_extcnt = ext + 1;
950
if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
951
struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
952
u32 tmp;
953
if (IS_ERR(bh)) {
954
affs_warning(sb, "truncate",
955
"unexpected read error for last block %u (%ld)",
956
ext, PTR_ERR(bh));
957
return;
958
}
959
tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
960
AFFS_DATA_HEAD(bh)->next = 0;
961
affs_adjust_checksum(bh, -tmp);
962
affs_brelse(bh);
963
}
964
} else {
965
AFFS_I(inode)->i_blkcnt = 0;
966
AFFS_I(inode)->i_extcnt = 1;
967
}
968
AFFS_I(inode)->mmu_private = inode->i_size;
969
// unlock cache
970
971
while (ext_key) {
972
ext_bh = affs_bread(sb, ext_key);
973
size = AFFS_SB(sb)->s_hashsize;
974
if (size > blkcnt - blk)
975
size = blkcnt - blk;
976
for (i = 0; i < size; i++, blk++)
977
affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
978
affs_free_block(sb, ext_key);
979
ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
980
affs_brelse(ext_bh);
981
}
982
affs_free_prealloc(inode);
983
}
984
985
int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
986
{
987
struct inode *inode = filp->f_mapping->host;
988
int ret, err;
989
990
err = file_write_and_wait_range(filp, start, end);
991
if (err)
992
return err;
993
994
inode_lock(inode);
995
ret = write_inode_now(inode, 0);
996
err = sync_blockdev(inode->i_sb->s_bdev);
997
if (!ret)
998
ret = err;
999
inode_unlock(inode);
1000
return ret;
1001
}
1002
const struct file_operations affs_file_operations = {
1003
.llseek = generic_file_llseek,
1004
.read_iter = generic_file_read_iter,
1005
.write_iter = generic_file_write_iter,
1006
.mmap_prepare = generic_file_mmap_prepare,
1007
.open = affs_file_open,
1008
.release = affs_file_release,
1009
.fsync = affs_file_fsync,
1010
.splice_read = filemap_splice_read,
1011
};
1012
1013
const struct inode_operations affs_file_inode_operations = {
1014
.setattr = affs_notify_change,
1015
};
1016
1017