Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/cachefiles/ondemand.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
#include <linux/anon_inodes.h>
3
#include <linux/uio.h>
4
#include "internal.h"
5
6
struct ondemand_anon_file {
7
struct file *file;
8
int fd;
9
};
10
11
static inline void cachefiles_req_put(struct cachefiles_req *req)
12
{
13
if (refcount_dec_and_test(&req->ref))
14
kfree(req);
15
}
16
17
static int cachefiles_ondemand_fd_release(struct inode *inode,
18
struct file *file)
19
{
20
struct cachefiles_object *object = file->private_data;
21
struct cachefiles_cache *cache;
22
struct cachefiles_ondemand_info *info;
23
int object_id;
24
struct cachefiles_req *req;
25
XA_STATE(xas, NULL, 0);
26
27
if (!object)
28
return 0;
29
30
info = object->ondemand;
31
cache = object->volume->cache;
32
xas.xa = &cache->reqs;
33
34
xa_lock(&cache->reqs);
35
spin_lock(&info->lock);
36
object_id = info->ondemand_id;
37
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
38
cachefiles_ondemand_set_object_close(object);
39
spin_unlock(&info->lock);
40
41
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
42
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
43
if (req->msg.object_id == object_id &&
44
req->msg.opcode == CACHEFILES_OP_CLOSE) {
45
complete(&req->done);
46
xas_store(&xas, NULL);
47
}
48
}
49
xa_unlock(&cache->reqs);
50
51
xa_erase(&cache->ondemand_ids, object_id);
52
trace_cachefiles_ondemand_fd_release(object, object_id);
53
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
54
cachefiles_put_unbind_pincount(cache);
55
return 0;
56
}
57
58
static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
59
struct iov_iter *iter)
60
{
61
struct cachefiles_object *object = kiocb->ki_filp->private_data;
62
struct cachefiles_cache *cache = object->volume->cache;
63
struct file *file;
64
size_t len = iter->count, aligned_len = len;
65
loff_t pos = kiocb->ki_pos;
66
const struct cred *saved_cred;
67
int ret;
68
69
spin_lock(&object->lock);
70
file = object->file;
71
if (!file) {
72
spin_unlock(&object->lock);
73
return -ENOBUFS;
74
}
75
get_file(file);
76
spin_unlock(&object->lock);
77
78
cachefiles_begin_secure(cache, &saved_cred);
79
ret = __cachefiles_prepare_write(object, file, &pos, &aligned_len, len, true);
80
cachefiles_end_secure(cache, saved_cred);
81
if (ret < 0)
82
goto out;
83
84
trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
85
ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
86
if (ret > 0)
87
kiocb->ki_pos += ret;
88
89
out:
90
fput(file);
91
return ret;
92
}
93
94
static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
95
int whence)
96
{
97
struct cachefiles_object *object = filp->private_data;
98
struct file *file;
99
loff_t ret;
100
101
spin_lock(&object->lock);
102
file = object->file;
103
if (!file) {
104
spin_unlock(&object->lock);
105
return -ENOBUFS;
106
}
107
get_file(file);
108
spin_unlock(&object->lock);
109
110
ret = vfs_llseek(file, pos, whence);
111
fput(file);
112
113
return ret;
114
}
115
116
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
117
unsigned long id)
118
{
119
struct cachefiles_object *object = filp->private_data;
120
struct cachefiles_cache *cache = object->volume->cache;
121
struct cachefiles_req *req;
122
XA_STATE(xas, &cache->reqs, id);
123
124
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
125
return -EINVAL;
126
127
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
128
return -EOPNOTSUPP;
129
130
xa_lock(&cache->reqs);
131
req = xas_load(&xas);
132
if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
133
req->object != object) {
134
xa_unlock(&cache->reqs);
135
return -EINVAL;
136
}
137
xas_store(&xas, NULL);
138
xa_unlock(&cache->reqs);
139
140
trace_cachefiles_ondemand_cread(object, id);
141
complete(&req->done);
142
return 0;
143
}
144
145
static const struct file_operations cachefiles_ondemand_fd_fops = {
146
.owner = THIS_MODULE,
147
.release = cachefiles_ondemand_fd_release,
148
.write_iter = cachefiles_ondemand_fd_write_iter,
149
.llseek = cachefiles_ondemand_fd_llseek,
150
.unlocked_ioctl = cachefiles_ondemand_fd_ioctl,
151
};
152
153
/*
154
* OPEN request Completion (copen)
155
* - command: "copen <id>,<cache_size>"
156
* <cache_size> indicates the object size if >=0, error code if negative
157
*/
158
int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
159
{
160
struct cachefiles_req *req;
161
struct fscache_cookie *cookie;
162
struct cachefiles_ondemand_info *info;
163
char *pid, *psize;
164
unsigned long id;
165
long size;
166
int ret;
167
XA_STATE(xas, &cache->reqs, 0);
168
169
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
170
return -EOPNOTSUPP;
171
172
if (!*args) {
173
pr_err("Empty id specified\n");
174
return -EINVAL;
175
}
176
177
pid = args;
178
psize = strchr(args, ',');
179
if (!psize) {
180
pr_err("Cache size is not specified\n");
181
return -EINVAL;
182
}
183
184
*psize = 0;
185
psize++;
186
187
ret = kstrtoul(pid, 0, &id);
188
if (ret)
189
return ret;
190
191
xa_lock(&cache->reqs);
192
xas.xa_index = id;
193
req = xas_load(&xas);
194
if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
195
!req->object->ondemand->ondemand_id) {
196
xa_unlock(&cache->reqs);
197
return -EINVAL;
198
}
199
xas_store(&xas, NULL);
200
xa_unlock(&cache->reqs);
201
202
info = req->object->ondemand;
203
/* fail OPEN request if copen format is invalid */
204
ret = kstrtol(psize, 0, &size);
205
if (ret) {
206
req->error = ret;
207
goto out;
208
}
209
210
/* fail OPEN request if daemon reports an error */
211
if (size < 0) {
212
if (!IS_ERR_VALUE(size)) {
213
req->error = -EINVAL;
214
ret = -EINVAL;
215
} else {
216
req->error = size;
217
ret = 0;
218
}
219
goto out;
220
}
221
222
spin_lock(&info->lock);
223
/*
224
* The anonymous fd was closed before copen ? Fail the request.
225
*
226
* t1 | t2
227
* ---------------------------------------------------------
228
* cachefiles_ondemand_copen
229
* req = xa_erase(&cache->reqs, id)
230
* // Anon fd is maliciously closed.
231
* cachefiles_ondemand_fd_release
232
* xa_lock(&cache->reqs)
233
* cachefiles_ondemand_set_object_close(object)
234
* xa_unlock(&cache->reqs)
235
* cachefiles_ondemand_set_object_open
236
* // No one will ever close it again.
237
* cachefiles_ondemand_daemon_read
238
* cachefiles_ondemand_select_req
239
*
240
* Get a read req but its fd is already closed. The daemon can't
241
* issue a cread ioctl with an closed fd, then hung.
242
*/
243
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
244
spin_unlock(&info->lock);
245
req->error = -EBADFD;
246
goto out;
247
}
248
cookie = req->object->cookie;
249
cookie->object_size = size;
250
if (size)
251
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
252
else
253
set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
254
trace_cachefiles_ondemand_copen(req->object, id, size);
255
256
cachefiles_ondemand_set_object_open(req->object);
257
spin_unlock(&info->lock);
258
wake_up_all(&cache->daemon_pollwq);
259
260
out:
261
spin_lock(&info->lock);
262
/* Need to set object close to avoid reopen status continuing */
263
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
264
cachefiles_ondemand_set_object_close(req->object);
265
spin_unlock(&info->lock);
266
complete(&req->done);
267
return ret;
268
}
269
270
int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
271
{
272
struct cachefiles_req *req;
273
274
XA_STATE(xas, &cache->reqs, 0);
275
276
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
277
return -EOPNOTSUPP;
278
279
/*
280
* Reset the requests to CACHEFILES_REQ_NEW state, so that the
281
* requests have been processed halfway before the crash of the
282
* user daemon could be reprocessed after the recovery.
283
*/
284
xas_lock(&xas);
285
xas_for_each(&xas, req, ULONG_MAX)
286
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
287
xas_unlock(&xas);
288
289
wake_up_all(&cache->daemon_pollwq);
290
return 0;
291
}
292
293
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
294
struct ondemand_anon_file *anon_file)
295
{
296
struct cachefiles_object *object;
297
struct cachefiles_cache *cache;
298
struct cachefiles_open *load;
299
u32 object_id;
300
int ret;
301
302
object = cachefiles_grab_object(req->object,
303
cachefiles_obj_get_ondemand_fd);
304
cache = object->volume->cache;
305
306
ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
307
XA_LIMIT(1, INT_MAX),
308
&cache->ondemand_id_next, GFP_KERNEL);
309
if (ret < 0)
310
goto err;
311
312
anon_file->fd = get_unused_fd_flags(O_WRONLY);
313
if (anon_file->fd < 0) {
314
ret = anon_file->fd;
315
goto err_free_id;
316
}
317
318
anon_file->file = anon_inode_getfile_fmode("[cachefiles]",
319
&cachefiles_ondemand_fd_fops, object,
320
O_WRONLY, FMODE_PWRITE | FMODE_LSEEK);
321
if (IS_ERR(anon_file->file)) {
322
ret = PTR_ERR(anon_file->file);
323
goto err_put_fd;
324
}
325
326
spin_lock(&object->ondemand->lock);
327
if (object->ondemand->ondemand_id > 0) {
328
spin_unlock(&object->ondemand->lock);
329
/* Pair with check in cachefiles_ondemand_fd_release(). */
330
anon_file->file->private_data = NULL;
331
ret = -EEXIST;
332
goto err_put_file;
333
}
334
335
load = (void *)req->msg.data;
336
load->fd = anon_file->fd;
337
object->ondemand->ondemand_id = object_id;
338
spin_unlock(&object->ondemand->lock);
339
340
cachefiles_get_unbind_pincount(cache);
341
trace_cachefiles_ondemand_open(object, &req->msg, load);
342
return 0;
343
344
err_put_file:
345
fput(anon_file->file);
346
anon_file->file = NULL;
347
err_put_fd:
348
put_unused_fd(anon_file->fd);
349
anon_file->fd = ret;
350
err_free_id:
351
xa_erase(&cache->ondemand_ids, object_id);
352
err:
353
spin_lock(&object->ondemand->lock);
354
/* Avoid marking an opened object as closed. */
355
if (object->ondemand->ondemand_id <= 0)
356
cachefiles_ondemand_set_object_close(object);
357
spin_unlock(&object->ondemand->lock);
358
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
359
return ret;
360
}
361
362
static void ondemand_object_worker(struct work_struct *work)
363
{
364
struct cachefiles_ondemand_info *info =
365
container_of(work, struct cachefiles_ondemand_info, ondemand_work);
366
367
cachefiles_ondemand_init_object(info->object);
368
}
369
370
/*
371
* If there are any inflight or subsequent READ requests on the
372
* closed object, reopen it.
373
* Skip read requests whose related object is reopening.
374
*/
375
static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
376
unsigned long xa_max)
377
{
378
struct cachefiles_req *req;
379
struct cachefiles_object *object;
380
struct cachefiles_ondemand_info *info;
381
382
xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
383
if (req->msg.opcode != CACHEFILES_OP_READ)
384
return req;
385
object = req->object;
386
info = object->ondemand;
387
if (cachefiles_ondemand_object_is_close(object)) {
388
cachefiles_ondemand_set_object_reopening(object);
389
queue_work(fscache_wq, &info->ondemand_work);
390
continue;
391
}
392
if (cachefiles_ondemand_object_is_reopening(object))
393
continue;
394
return req;
395
}
396
return NULL;
397
}
398
399
static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
400
struct xa_state *xas, int err)
401
{
402
if (unlikely(!xas || !req))
403
return false;
404
405
if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
406
return false;
407
408
req->error = err;
409
complete(&req->done);
410
return true;
411
}
412
413
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
414
char __user *_buffer, size_t buflen)
415
{
416
struct cachefiles_req *req;
417
struct cachefiles_msg *msg;
418
size_t n;
419
int ret = 0;
420
struct ondemand_anon_file anon_file;
421
XA_STATE(xas, &cache->reqs, cache->req_id_next);
422
423
xa_lock(&cache->reqs);
424
/*
425
* Cyclically search for a request that has not ever been processed,
426
* to prevent requests from being processed repeatedly, and make
427
* request distribution fair.
428
*/
429
req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
430
if (!req && cache->req_id_next > 0) {
431
xas_set(&xas, 0);
432
req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
433
}
434
if (!req) {
435
xa_unlock(&cache->reqs);
436
return 0;
437
}
438
439
msg = &req->msg;
440
n = msg->len;
441
442
if (n > buflen) {
443
xa_unlock(&cache->reqs);
444
return -EMSGSIZE;
445
}
446
447
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
448
cache->req_id_next = xas.xa_index + 1;
449
refcount_inc(&req->ref);
450
cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
451
xa_unlock(&cache->reqs);
452
453
if (msg->opcode == CACHEFILES_OP_OPEN) {
454
ret = cachefiles_ondemand_get_fd(req, &anon_file);
455
if (ret)
456
goto out;
457
}
458
459
msg->msg_id = xas.xa_index;
460
msg->object_id = req->object->ondemand->ondemand_id;
461
462
if (copy_to_user(_buffer, msg, n) != 0)
463
ret = -EFAULT;
464
465
if (msg->opcode == CACHEFILES_OP_OPEN) {
466
if (ret < 0) {
467
fput(anon_file.file);
468
put_unused_fd(anon_file.fd);
469
goto out;
470
}
471
fd_install(anon_file.fd, anon_file.file);
472
}
473
out:
474
cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
475
/* Remove error request and CLOSE request has no reply */
476
if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
477
cachefiles_ondemand_finish_req(req, &xas, ret);
478
cachefiles_req_put(req);
479
return ret ? ret : n;
480
}
481
482
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
483
484
static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
485
enum cachefiles_opcode opcode,
486
size_t data_len,
487
init_req_fn init_req,
488
void *private)
489
{
490
struct cachefiles_cache *cache = object->volume->cache;
491
struct cachefiles_req *req = NULL;
492
XA_STATE(xas, &cache->reqs, 0);
493
int ret;
494
495
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
496
return 0;
497
498
if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
499
ret = -EIO;
500
goto out;
501
}
502
503
req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
504
if (!req) {
505
ret = -ENOMEM;
506
goto out;
507
}
508
509
refcount_set(&req->ref, 1);
510
req->object = object;
511
init_completion(&req->done);
512
req->msg.opcode = opcode;
513
req->msg.len = sizeof(struct cachefiles_msg) + data_len;
514
515
ret = init_req(req, private);
516
if (ret)
517
goto out;
518
519
do {
520
/*
521
* Stop enqueuing the request when daemon is dying. The
522
* following two operations need to be atomic as a whole.
523
* 1) check cache state, and
524
* 2) enqueue request if cache is alive.
525
* Otherwise the request may be enqueued after xarray has been
526
* flushed, leaving the orphan request never being completed.
527
*
528
* CPU 1 CPU 2
529
* ===== =====
530
* test CACHEFILES_DEAD bit
531
* set CACHEFILES_DEAD bit
532
* flush requests in the xarray
533
* enqueue the request
534
*/
535
xas_lock(&xas);
536
537
if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
538
cachefiles_ondemand_object_is_dropping(object)) {
539
xas_unlock(&xas);
540
ret = -EIO;
541
goto out;
542
}
543
544
/* coupled with the barrier in cachefiles_flush_reqs() */
545
smp_mb();
546
547
if (opcode == CACHEFILES_OP_CLOSE &&
548
!cachefiles_ondemand_object_is_open(object)) {
549
WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
550
xas_unlock(&xas);
551
ret = -EIO;
552
goto out;
553
}
554
555
/*
556
* Cyclically find a free xas to avoid msg_id reuse that would
557
* cause the daemon to successfully copen a stale msg_id.
558
*/
559
xas.xa_index = cache->msg_id_next;
560
xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
561
if (xas.xa_node == XAS_RESTART) {
562
xas.xa_index = 0;
563
xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
564
}
565
if (xas.xa_node == XAS_RESTART)
566
xas_set_err(&xas, -EBUSY);
567
568
xas_store(&xas, req);
569
if (xas_valid(&xas)) {
570
cache->msg_id_next = xas.xa_index + 1;
571
xas_clear_mark(&xas, XA_FREE_MARK);
572
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
573
}
574
xas_unlock(&xas);
575
} while (xas_nomem(&xas, GFP_KERNEL));
576
577
ret = xas_error(&xas);
578
if (ret)
579
goto out;
580
581
wake_up_all(&cache->daemon_pollwq);
582
wait:
583
ret = wait_for_completion_killable(&req->done);
584
if (!ret) {
585
ret = req->error;
586
} else {
587
ret = -EINTR;
588
if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
589
/* Someone will complete it soon. */
590
cpu_relax();
591
goto wait;
592
}
593
}
594
cachefiles_req_put(req);
595
return ret;
596
out:
597
/* Reset the object to close state in error handling path.
598
* If error occurs after creating the anonymous fd,
599
* cachefiles_ondemand_fd_release() will set object to close.
600
*/
601
if (opcode == CACHEFILES_OP_OPEN &&
602
!cachefiles_ondemand_object_is_dropping(object))
603
cachefiles_ondemand_set_object_close(object);
604
kfree(req);
605
return ret;
606
}
607
608
static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
609
void *private)
610
{
611
struct cachefiles_object *object = req->object;
612
struct fscache_cookie *cookie = object->cookie;
613
struct fscache_volume *volume = object->volume->vcookie;
614
struct cachefiles_open *load = (void *)req->msg.data;
615
size_t volume_key_size, cookie_key_size;
616
void *volume_key, *cookie_key;
617
618
/*
619
* Volume key is a NUL-terminated string. key[0] stores strlen() of the
620
* string, followed by the content of the string (excluding '\0').
621
*/
622
volume_key_size = volume->key[0] + 1;
623
volume_key = volume->key + 1;
624
625
/* Cookie key is binary data, which is netfs specific. */
626
cookie_key_size = cookie->key_len;
627
cookie_key = fscache_get_key(cookie);
628
629
if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
630
pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
631
return -EINVAL;
632
}
633
634
load->volume_key_size = volume_key_size;
635
load->cookie_key_size = cookie_key_size;
636
memcpy(load->data, volume_key, volume_key_size);
637
memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
638
639
return 0;
640
}
641
642
static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
643
void *private)
644
{
645
struct cachefiles_object *object = req->object;
646
647
if (!cachefiles_ondemand_object_is_open(object))
648
return -ENOENT;
649
650
trace_cachefiles_ondemand_close(object, &req->msg);
651
return 0;
652
}
653
654
struct cachefiles_read_ctx {
655
loff_t off;
656
size_t len;
657
};
658
659
static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
660
void *private)
661
{
662
struct cachefiles_object *object = req->object;
663
struct cachefiles_read *load = (void *)req->msg.data;
664
struct cachefiles_read_ctx *read_ctx = private;
665
666
load->off = read_ctx->off;
667
load->len = read_ctx->len;
668
trace_cachefiles_ondemand_read(object, &req->msg, load);
669
return 0;
670
}
671
672
int cachefiles_ondemand_init_object(struct cachefiles_object *object)
673
{
674
struct fscache_cookie *cookie = object->cookie;
675
struct fscache_volume *volume = object->volume->vcookie;
676
size_t volume_key_size, cookie_key_size, data_len;
677
678
if (!object->ondemand)
679
return 0;
680
681
/*
682
* CacheFiles will firstly check the cache file under the root cache
683
* directory. If the coherency check failed, it will fallback to
684
* creating a new tmpfile as the cache file. Reuse the previously
685
* allocated object ID if any.
686
*/
687
if (cachefiles_ondemand_object_is_open(object))
688
return 0;
689
690
volume_key_size = volume->key[0] + 1;
691
cookie_key_size = cookie->key_len;
692
data_len = sizeof(struct cachefiles_open) +
693
volume_key_size + cookie_key_size;
694
695
return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
696
data_len, cachefiles_ondemand_init_open_req, NULL);
697
}
698
699
void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
700
{
701
unsigned long index;
702
struct cachefiles_req *req;
703
struct cachefiles_cache *cache;
704
705
if (!object->ondemand)
706
return;
707
708
cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
709
cachefiles_ondemand_init_close_req, NULL);
710
711
if (!object->ondemand->ondemand_id)
712
return;
713
714
/* Cancel all requests for the object that is being dropped. */
715
cache = object->volume->cache;
716
xa_lock(&cache->reqs);
717
cachefiles_ondemand_set_object_dropping(object);
718
xa_for_each(&cache->reqs, index, req) {
719
if (req->object == object) {
720
req->error = -EIO;
721
complete(&req->done);
722
__xa_erase(&cache->reqs, index);
723
}
724
}
725
xa_unlock(&cache->reqs);
726
727
/* Wait for ondemand_object_worker() to finish to avoid UAF. */
728
cancel_work_sync(&object->ondemand->ondemand_work);
729
}
730
731
int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
732
struct cachefiles_volume *volume)
733
{
734
if (!cachefiles_in_ondemand_mode(volume->cache))
735
return 0;
736
737
object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
738
GFP_KERNEL);
739
if (!object->ondemand)
740
return -ENOMEM;
741
742
object->ondemand->object = object;
743
spin_lock_init(&object->ondemand->lock);
744
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
745
return 0;
746
}
747
748
void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
749
{
750
kfree(object->ondemand);
751
object->ondemand = NULL;
752
}
753
754
int cachefiles_ondemand_read(struct cachefiles_object *object,
755
loff_t pos, size_t len)
756
{
757
struct cachefiles_read_ctx read_ctx = {pos, len};
758
759
return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
760
sizeof(struct cachefiles_read),
761
cachefiles_ondemand_init_read_req, &read_ctx);
762
}
763
764