Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/cachefiles/io.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* kiocb-using read/write
3
*
4
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5
* Written by David Howells ([email protected])
6
*/
7
8
#include <linux/mount.h>
9
#include <linux/slab.h>
10
#include <linux/file.h>
11
#include <linux/uio.h>
12
#include <linux/bio.h>
13
#include <linux/falloc.h>
14
#include <linux/sched/mm.h>
15
#include <trace/events/fscache.h>
16
#include <trace/events/netfs.h>
17
#include "internal.h"
18
19
struct cachefiles_kiocb {
20
struct kiocb iocb;
21
refcount_t ki_refcnt;
22
loff_t start;
23
union {
24
size_t skipped;
25
size_t len;
26
};
27
struct cachefiles_object *object;
28
netfs_io_terminated_t term_func;
29
void *term_func_priv;
30
bool was_async;
31
unsigned int inval_counter; /* Copy of cookie->inval_counter */
32
u64 b_writing;
33
};
34
35
static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
36
{
37
if (refcount_dec_and_test(&ki->ki_refcnt)) {
38
cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
39
fput(ki->iocb.ki_filp);
40
kfree(ki);
41
}
42
}
43
44
/*
45
* Handle completion of a read from the cache.
46
*/
47
static void cachefiles_read_complete(struct kiocb *iocb, long ret)
48
{
49
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
50
struct inode *inode = file_inode(ki->iocb.ki_filp);
51
52
_enter("%ld", ret);
53
54
if (ret < 0)
55
trace_cachefiles_io_error(ki->object, inode, ret,
56
cachefiles_trace_read_error);
57
58
if (ki->term_func) {
59
if (ret >= 0) {
60
if (ki->object->cookie->inval_counter == ki->inval_counter)
61
ki->skipped += ret;
62
else
63
ret = -ESTALE;
64
}
65
66
ki->term_func(ki->term_func_priv, ret);
67
}
68
69
cachefiles_put_kiocb(ki);
70
}
71
72
/*
73
* Initiate a read from the cache.
74
*/
75
static int cachefiles_read(struct netfs_cache_resources *cres,
76
loff_t start_pos,
77
struct iov_iter *iter,
78
enum netfs_read_from_hole read_hole,
79
netfs_io_terminated_t term_func,
80
void *term_func_priv)
81
{
82
struct cachefiles_object *object;
83
struct cachefiles_kiocb *ki;
84
struct file *file;
85
unsigned int old_nofs;
86
ssize_t ret = -ENOBUFS;
87
size_t len = iov_iter_count(iter), skipped = 0;
88
89
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
90
goto presubmission_error;
91
92
fscache_count_read();
93
object = cachefiles_cres_object(cres);
94
file = cachefiles_cres_file(cres);
95
96
_enter("%pD,%li,%llx,%zx/%llx",
97
file, file_inode(file)->i_ino, start_pos, len,
98
i_size_read(file_inode(file)));
99
100
/* If the caller asked us to seek for data before doing the read, then
101
* we should do that now. If we find a gap, we fill it with zeros.
102
*/
103
if (read_hole != NETFS_READ_HOLE_IGNORE) {
104
loff_t off = start_pos, off2;
105
106
off2 = cachefiles_inject_read_error();
107
if (off2 == 0)
108
off2 = vfs_llseek(file, off, SEEK_DATA);
109
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
110
skipped = 0;
111
ret = off2;
112
goto presubmission_error;
113
}
114
115
if (off2 == -ENXIO || off2 >= start_pos + len) {
116
/* The region is beyond the EOF or there's no more data
117
* in the region, so clear the rest of the buffer and
118
* return success.
119
*/
120
ret = -ENODATA;
121
if (read_hole == NETFS_READ_HOLE_FAIL)
122
goto presubmission_error;
123
124
iov_iter_zero(len, iter);
125
skipped = len;
126
ret = 0;
127
goto presubmission_error;
128
}
129
130
skipped = off2 - off;
131
iov_iter_zero(skipped, iter);
132
}
133
134
ret = -ENOMEM;
135
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
136
if (!ki)
137
goto presubmission_error;
138
139
refcount_set(&ki->ki_refcnt, 2);
140
ki->iocb.ki_filp = file;
141
ki->iocb.ki_pos = start_pos + skipped;
142
ki->iocb.ki_flags = IOCB_DIRECT;
143
ki->iocb.ki_ioprio = get_current_ioprio();
144
ki->skipped = skipped;
145
ki->object = object;
146
ki->inval_counter = cres->inval_counter;
147
ki->term_func = term_func;
148
ki->term_func_priv = term_func_priv;
149
ki->was_async = true;
150
151
if (ki->term_func)
152
ki->iocb.ki_complete = cachefiles_read_complete;
153
154
get_file(ki->iocb.ki_filp);
155
cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
156
157
trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
158
old_nofs = memalloc_nofs_save();
159
ret = cachefiles_inject_read_error();
160
if (ret == 0)
161
ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
162
memalloc_nofs_restore(old_nofs);
163
switch (ret) {
164
case -EIOCBQUEUED:
165
goto in_progress;
166
167
case -ERESTARTSYS:
168
case -ERESTARTNOINTR:
169
case -ERESTARTNOHAND:
170
case -ERESTART_RESTARTBLOCK:
171
/* There's no easy way to restart the syscall since other AIO's
172
* may be already running. Just fail this IO with EINTR.
173
*/
174
ret = -EINTR;
175
fallthrough;
176
default:
177
ki->was_async = false;
178
cachefiles_read_complete(&ki->iocb, ret);
179
if (ret > 0)
180
ret = 0;
181
break;
182
}
183
184
in_progress:
185
cachefiles_put_kiocb(ki);
186
_leave(" = %zd", ret);
187
return ret;
188
189
presubmission_error:
190
if (term_func)
191
term_func(term_func_priv, ret < 0 ? ret : skipped);
192
return ret;
193
}
194
195
/*
196
* Query the occupancy of the cache in a region, returning where the next chunk
197
* of data starts and how long it is.
198
*/
199
static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
200
loff_t start, size_t len, size_t granularity,
201
loff_t *_data_start, size_t *_data_len)
202
{
203
struct cachefiles_object *object;
204
struct file *file;
205
loff_t off, off2;
206
207
*_data_start = -1;
208
*_data_len = 0;
209
210
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
211
return -ENOBUFS;
212
213
object = cachefiles_cres_object(cres);
214
file = cachefiles_cres_file(cres);
215
granularity = max_t(size_t, object->volume->cache->bsize, granularity);
216
217
_enter("%pD,%li,%llx,%zx/%llx",
218
file, file_inode(file)->i_ino, start, len,
219
i_size_read(file_inode(file)));
220
221
off = cachefiles_inject_read_error();
222
if (off == 0)
223
off = vfs_llseek(file, start, SEEK_DATA);
224
if (off == -ENXIO)
225
return -ENODATA; /* Beyond EOF */
226
if (off < 0 && off >= (loff_t)-MAX_ERRNO)
227
return -ENOBUFS; /* Error. */
228
if (round_up(off, granularity) >= start + len)
229
return -ENODATA; /* No data in range */
230
231
off2 = cachefiles_inject_read_error();
232
if (off2 == 0)
233
off2 = vfs_llseek(file, off, SEEK_HOLE);
234
if (off2 == -ENXIO)
235
return -ENODATA; /* Beyond EOF */
236
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
237
return -ENOBUFS; /* Error. */
238
239
/* Round away partial blocks */
240
off = round_up(off, granularity);
241
off2 = round_down(off2, granularity);
242
if (off2 <= off)
243
return -ENODATA;
244
245
*_data_start = off;
246
if (off2 > start + len)
247
*_data_len = len;
248
else
249
*_data_len = off2 - off;
250
return 0;
251
}
252
253
/*
254
* Handle completion of a write to the cache.
255
*/
256
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
257
{
258
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
259
struct cachefiles_object *object = ki->object;
260
struct inode *inode = file_inode(ki->iocb.ki_filp);
261
262
_enter("%ld", ret);
263
264
if (ki->was_async)
265
kiocb_end_write(iocb);
266
267
if (ret < 0)
268
trace_cachefiles_io_error(object, inode, ret,
269
cachefiles_trace_write_error);
270
271
atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
272
set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
273
if (ki->term_func)
274
ki->term_func(ki->term_func_priv, ret);
275
cachefiles_put_kiocb(ki);
276
}
277
278
/*
279
* Initiate a write to the cache.
280
*/
281
int __cachefiles_write(struct cachefiles_object *object,
282
struct file *file,
283
loff_t start_pos,
284
struct iov_iter *iter,
285
netfs_io_terminated_t term_func,
286
void *term_func_priv)
287
{
288
struct cachefiles_cache *cache;
289
struct cachefiles_kiocb *ki;
290
unsigned int old_nofs;
291
ssize_t ret;
292
size_t len = iov_iter_count(iter);
293
294
fscache_count_write();
295
cache = object->volume->cache;
296
297
_enter("%pD,%li,%llx,%zx/%llx",
298
file, file_inode(file)->i_ino, start_pos, len,
299
i_size_read(file_inode(file)));
300
301
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
302
if (!ki) {
303
if (term_func)
304
term_func(term_func_priv, -ENOMEM);
305
return -ENOMEM;
306
}
307
308
refcount_set(&ki->ki_refcnt, 2);
309
ki->iocb.ki_filp = file;
310
ki->iocb.ki_pos = start_pos;
311
ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
312
ki->iocb.ki_ioprio = get_current_ioprio();
313
ki->object = object;
314
ki->start = start_pos;
315
ki->len = len;
316
ki->term_func = term_func;
317
ki->term_func_priv = term_func_priv;
318
ki->was_async = true;
319
ki->b_writing = (len + (1 << cache->bshift) - 1) >> cache->bshift;
320
321
if (ki->term_func)
322
ki->iocb.ki_complete = cachefiles_write_complete;
323
atomic_long_add(ki->b_writing, &cache->b_writing);
324
325
get_file(ki->iocb.ki_filp);
326
cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
327
328
trace_cachefiles_write(object, file_inode(file), ki->iocb.ki_pos, len);
329
old_nofs = memalloc_nofs_save();
330
ret = cachefiles_inject_write_error();
331
if (ret == 0)
332
ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
333
memalloc_nofs_restore(old_nofs);
334
switch (ret) {
335
case -EIOCBQUEUED:
336
goto in_progress;
337
338
case -ERESTARTSYS:
339
case -ERESTARTNOINTR:
340
case -ERESTARTNOHAND:
341
case -ERESTART_RESTARTBLOCK:
342
/* There's no easy way to restart the syscall since other AIO's
343
* may be already running. Just fail this IO with EINTR.
344
*/
345
ret = -EINTR;
346
fallthrough;
347
default:
348
ki->was_async = false;
349
cachefiles_write_complete(&ki->iocb, ret);
350
break;
351
}
352
353
in_progress:
354
cachefiles_put_kiocb(ki);
355
_leave(" = %zd", ret);
356
return ret;
357
}
358
359
static int cachefiles_write(struct netfs_cache_resources *cres,
360
loff_t start_pos,
361
struct iov_iter *iter,
362
netfs_io_terminated_t term_func,
363
void *term_func_priv)
364
{
365
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
366
if (term_func)
367
term_func(term_func_priv, -ENOBUFS);
368
trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite);
369
return -ENOBUFS;
370
}
371
372
return __cachefiles_write(cachefiles_cres_object(cres),
373
cachefiles_cres_file(cres),
374
start_pos, iter,
375
term_func, term_func_priv);
376
}
377
378
static inline enum netfs_io_source
379
cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
380
loff_t start, size_t *_len, loff_t i_size,
381
unsigned long *_flags, ino_t netfs_ino)
382
{
383
enum cachefiles_prepare_read_trace why;
384
struct cachefiles_object *object = NULL;
385
struct cachefiles_cache *cache;
386
struct fscache_cookie *cookie = fscache_cres_cookie(cres);
387
const struct cred *saved_cred;
388
struct file *file = cachefiles_cres_file(cres);
389
enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
390
size_t len = *_len;
391
loff_t off, to;
392
ino_t ino = file ? file_inode(file)->i_ino : 0;
393
int rc;
394
395
_enter("%zx @%llx/%llx", len, start, i_size);
396
397
if (start >= i_size) {
398
ret = NETFS_FILL_WITH_ZEROES;
399
why = cachefiles_trace_read_after_eof;
400
goto out_no_object;
401
}
402
403
if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
404
__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
405
why = cachefiles_trace_read_no_data;
406
if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
407
goto out_no_object;
408
}
409
410
/* The object and the file may be being created in the background. */
411
if (!file) {
412
why = cachefiles_trace_read_no_file;
413
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
414
goto out_no_object;
415
file = cachefiles_cres_file(cres);
416
if (!file)
417
goto out_no_object;
418
ino = file_inode(file)->i_ino;
419
}
420
421
object = cachefiles_cres_object(cres);
422
cache = object->volume->cache;
423
cachefiles_begin_secure(cache, &saved_cred);
424
retry:
425
off = cachefiles_inject_read_error();
426
if (off == 0)
427
off = vfs_llseek(file, start, SEEK_DATA);
428
if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
429
if (off == (loff_t)-ENXIO) {
430
why = cachefiles_trace_read_seek_nxio;
431
goto download_and_store;
432
}
433
trace_cachefiles_io_error(object, file_inode(file), off,
434
cachefiles_trace_seek_error);
435
why = cachefiles_trace_read_seek_error;
436
goto out;
437
}
438
439
if (off >= start + len) {
440
why = cachefiles_trace_read_found_hole;
441
goto download_and_store;
442
}
443
444
if (off > start) {
445
off = round_up(off, cache->bsize);
446
len = off - start;
447
*_len = len;
448
why = cachefiles_trace_read_found_part;
449
goto download_and_store;
450
}
451
452
to = cachefiles_inject_read_error();
453
if (to == 0)
454
to = vfs_llseek(file, start, SEEK_HOLE);
455
if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
456
trace_cachefiles_io_error(object, file_inode(file), to,
457
cachefiles_trace_seek_error);
458
why = cachefiles_trace_read_seek_error;
459
goto out;
460
}
461
462
if (to < start + len) {
463
if (start + len >= i_size)
464
to = round_up(to, cache->bsize);
465
else
466
to = round_down(to, cache->bsize);
467
len = to - start;
468
*_len = len;
469
}
470
471
why = cachefiles_trace_read_have_data;
472
ret = NETFS_READ_FROM_CACHE;
473
goto out;
474
475
download_and_store:
476
__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
477
if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
478
rc = cachefiles_ondemand_read(object, start, len);
479
if (!rc) {
480
__clear_bit(NETFS_SREQ_ONDEMAND, _flags);
481
goto retry;
482
}
483
ret = NETFS_INVALID_READ;
484
}
485
out:
486
cachefiles_end_secure(cache, saved_cred);
487
out_no_object:
488
trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino, netfs_ino);
489
return ret;
490
}
491
492
/*
493
* Prepare a read operation, shortening it to a cached/uncached
494
* boundary as appropriate.
495
*/
496
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
497
unsigned long long i_size)
498
{
499
return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
500
subreq->start, &subreq->len, i_size,
501
&subreq->flags, subreq->rreq->inode->i_ino);
502
}
503
504
/*
505
* Prepare an on-demand read operation, shortening it to a cached/uncached
506
* boundary as appropriate.
507
*/
508
static enum netfs_io_source
509
cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
510
loff_t start, size_t *_len, loff_t i_size,
511
unsigned long *_flags, ino_t ino)
512
{
513
return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags, ino);
514
}
515
516
/*
517
* Prepare for a write to occur.
518
*/
519
int __cachefiles_prepare_write(struct cachefiles_object *object,
520
struct file *file,
521
loff_t *_start, size_t *_len, size_t upper_len,
522
bool no_space_allocated_yet)
523
{
524
struct cachefiles_cache *cache = object->volume->cache;
525
loff_t start = *_start, pos;
526
size_t len = *_len;
527
int ret;
528
529
/* Round to DIO size */
530
start = round_down(*_start, PAGE_SIZE);
531
if (start != *_start || *_len > upper_len) {
532
/* Probably asked to cache a streaming write written into the
533
* pagecache when the cookie was temporarily out of service to
534
* culling.
535
*/
536
fscache_count_dio_misfit();
537
return -ENOBUFS;
538
}
539
540
*_len = round_up(len, PAGE_SIZE);
541
542
/* We need to work out whether there's sufficient disk space to perform
543
* the write - but we can skip that check if we have space already
544
* allocated.
545
*/
546
if (no_space_allocated_yet)
547
goto check_space;
548
549
pos = cachefiles_inject_read_error();
550
if (pos == 0)
551
pos = vfs_llseek(file, start, SEEK_DATA);
552
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
553
if (pos == -ENXIO)
554
goto check_space; /* Unallocated tail */
555
trace_cachefiles_io_error(object, file_inode(file), pos,
556
cachefiles_trace_seek_error);
557
return pos;
558
}
559
if ((u64)pos >= (u64)start + *_len)
560
goto check_space; /* Unallocated region */
561
562
/* We have a block that's at least partially filled - if we're low on
563
* space, we need to see if it's fully allocated. If it's not, we may
564
* want to cull it.
565
*/
566
if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
567
cachefiles_has_space_check) == 0)
568
return 0; /* Enough space to simply overwrite the whole block */
569
570
pos = cachefiles_inject_read_error();
571
if (pos == 0)
572
pos = vfs_llseek(file, start, SEEK_HOLE);
573
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
574
trace_cachefiles_io_error(object, file_inode(file), pos,
575
cachefiles_trace_seek_error);
576
return pos;
577
}
578
if ((u64)pos >= (u64)start + *_len)
579
return 0; /* Fully allocated */
580
581
/* Partially allocated, but insufficient space: cull. */
582
fscache_count_no_write_space();
583
ret = cachefiles_inject_remove_error();
584
if (ret == 0)
585
ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
586
start, *_len);
587
if (ret < 0) {
588
trace_cachefiles_io_error(object, file_inode(file), ret,
589
cachefiles_trace_fallocate_error);
590
cachefiles_io_error_obj(object,
591
"CacheFiles: fallocate failed (%d)\n", ret);
592
ret = -EIO;
593
}
594
595
return ret;
596
597
check_space:
598
return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
599
cachefiles_has_space_for_write);
600
}
601
602
static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
603
loff_t *_start, size_t *_len, size_t upper_len,
604
loff_t i_size, bool no_space_allocated_yet)
605
{
606
struct cachefiles_object *object = cachefiles_cres_object(cres);
607
struct cachefiles_cache *cache = object->volume->cache;
608
const struct cred *saved_cred;
609
int ret;
610
611
if (!cachefiles_cres_file(cres)) {
612
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
613
return -ENOBUFS;
614
if (!cachefiles_cres_file(cres))
615
return -ENOBUFS;
616
}
617
618
cachefiles_begin_secure(cache, &saved_cred);
619
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
620
_start, _len, upper_len,
621
no_space_allocated_yet);
622
cachefiles_end_secure(cache, saved_cred);
623
return ret;
624
}
625
626
static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
627
{
628
struct netfs_io_request *wreq = subreq->rreq;
629
struct netfs_cache_resources *cres = &wreq->cache_resources;
630
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
631
632
_enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
633
634
stream->sreq_max_len = MAX_RW_COUNT;
635
stream->sreq_max_segs = BIO_MAX_VECS;
636
637
if (!cachefiles_cres_file(cres)) {
638
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
639
return netfs_prepare_write_failed(subreq);
640
if (!cachefiles_cres_file(cres))
641
return netfs_prepare_write_failed(subreq);
642
}
643
}
644
645
static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
646
{
647
struct netfs_io_request *wreq = subreq->rreq;
648
struct netfs_cache_resources *cres = &wreq->cache_resources;
649
struct cachefiles_object *object = cachefiles_cres_object(cres);
650
struct cachefiles_cache *cache = object->volume->cache;
651
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
652
const struct cred *saved_cred;
653
size_t off, pre, post, len = subreq->len;
654
loff_t start = subreq->start;
655
int ret;
656
657
_enter("W=%x[%x] %llx-%llx",
658
wreq->debug_id, subreq->debug_index, start, start + len - 1);
659
660
/* We need to start on the cache granularity boundary */
661
off = start & (CACHEFILES_DIO_BLOCK_SIZE - 1);
662
if (off) {
663
pre = CACHEFILES_DIO_BLOCK_SIZE - off;
664
if (pre >= len) {
665
fscache_count_dio_misfit();
666
netfs_write_subrequest_terminated(subreq, len);
667
return;
668
}
669
subreq->transferred += pre;
670
start += pre;
671
len -= pre;
672
iov_iter_advance(&subreq->io_iter, pre);
673
}
674
675
/* We also need to end on the cache granularity boundary */
676
if (start + len == wreq->i_size) {
677
size_t part = len % CACHEFILES_DIO_BLOCK_SIZE;
678
size_t need = CACHEFILES_DIO_BLOCK_SIZE - part;
679
680
if (part && stream->submit_extendable_to >= need) {
681
len += need;
682
subreq->len += need;
683
subreq->io_iter.count += need;
684
}
685
}
686
687
post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
688
if (post) {
689
len -= post;
690
if (len == 0) {
691
fscache_count_dio_misfit();
692
netfs_write_subrequest_terminated(subreq, post);
693
return;
694
}
695
iov_iter_truncate(&subreq->io_iter, len);
696
}
697
698
trace_netfs_sreq(subreq, netfs_sreq_trace_cache_prepare);
699
cachefiles_begin_secure(cache, &saved_cred);
700
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
701
&start, &len, len, true);
702
cachefiles_end_secure(cache, saved_cred);
703
if (ret < 0) {
704
netfs_write_subrequest_terminated(subreq, ret);
705
return;
706
}
707
708
trace_netfs_sreq(subreq, netfs_sreq_trace_cache_write);
709
cachefiles_write(&subreq->rreq->cache_resources,
710
subreq->start, &subreq->io_iter,
711
netfs_write_subrequest_terminated, subreq);
712
}
713
714
/*
715
* Clean up an operation.
716
*/
717
static void cachefiles_end_operation(struct netfs_cache_resources *cres)
718
{
719
struct file *file = cachefiles_cres_file(cres);
720
721
if (file)
722
fput(file);
723
fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
724
}
725
726
static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
727
.end_operation = cachefiles_end_operation,
728
.read = cachefiles_read,
729
.write = cachefiles_write,
730
.issue_write = cachefiles_issue_write,
731
.prepare_read = cachefiles_prepare_read,
732
.prepare_write = cachefiles_prepare_write,
733
.prepare_write_subreq = cachefiles_prepare_write_subreq,
734
.prepare_ondemand_read = cachefiles_prepare_ondemand_read,
735
.query_occupancy = cachefiles_query_occupancy,
736
};
737
738
/*
739
* Open the cache file when beginning a cache operation.
740
*/
741
bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
742
enum fscache_want_state want_state)
743
{
744
struct cachefiles_object *object = cachefiles_cres_object(cres);
745
746
if (!cachefiles_cres_file(cres)) {
747
cres->ops = &cachefiles_netfs_cache_ops;
748
if (object->file) {
749
spin_lock(&object->lock);
750
if (!cres->cache_priv2 && object->file)
751
cres->cache_priv2 = get_file(object->file);
752
spin_unlock(&object->lock);
753
}
754
}
755
756
if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
757
pr_err("failed to get cres->file\n");
758
return false;
759
}
760
761
return true;
762
}
763
764