Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/ceph/addr.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/ceph/ceph_debug.h>
3
4
#include <linux/backing-dev.h>
5
#include <linux/fs.h>
6
#include <linux/mm.h>
7
#include <linux/swap.h>
8
#include <linux/pagemap.h>
9
#include <linux/slab.h>
10
#include <linux/pagevec.h>
11
#include <linux/task_io_accounting_ops.h>
12
#include <linux/signal.h>
13
#include <linux/iversion.h>
14
#include <linux/ktime.h>
15
#include <linux/netfs.h>
16
#include <trace/events/netfs.h>
17
18
#include "super.h"
19
#include "mds_client.h"
20
#include "cache.h"
21
#include "metric.h"
22
#include "crypto.h"
23
#include <linux/ceph/osd_client.h>
24
#include <linux/ceph/striper.h>
25
26
/*
27
* Ceph address space ops.
28
*
29
* There are a few funny things going on here.
30
*
31
* The page->private field is used to reference a struct
32
* ceph_snap_context for _every_ dirty page. This indicates which
33
* snapshot the page was logically dirtied in, and thus which snap
34
* context needs to be associated with the osd write during writeback.
35
*
36
* Similarly, struct ceph_inode_info maintains a set of counters to
37
* count dirty pages on the inode. In the absence of snapshots,
38
* i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
39
*
40
* When a snapshot is taken (that is, when the client receives
41
* notification that a snapshot was taken), each inode with caps and
42
* with dirty pages (dirty pages implies there is a cap) gets a new
43
* ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
44
* order, new snaps go to the tail). The i_wrbuffer_ref_head count is
45
* moved to capsnap->dirty. (Unless a sync write is currently in
46
* progress. In that case, the capsnap is said to be "pending", new
47
* writes cannot start, and the capsnap isn't "finalized" until the
48
* write completes (or fails) and a final size/mtime for the inode for
49
* that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
50
*
51
* On writeback, we must submit writes to the osd IN SNAP ORDER. So,
52
* we look for the first capsnap in i_cap_snaps and write out pages in
53
* that snap context _only_. Then we move on to the next capsnap,
54
* eventually reaching the "live" or "head" context (i.e., pages that
55
* are not yet snapped) and are writing the most recently dirtied
56
* pages.
57
*
58
* Invalidate and so forth must take care to ensure the dirty page
59
* accounting is preserved.
60
*/
61
62
#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
63
#define CONGESTION_OFF_THRESH(congestion_kb) \
64
(CONGESTION_ON_THRESH(congestion_kb) - \
65
(CONGESTION_ON_THRESH(congestion_kb) >> 2))
66
67
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
68
struct folio **foliop, void **_fsdata);
69
70
static inline struct ceph_snap_context *page_snap_context(struct page *page)
71
{
72
if (PagePrivate(page))
73
return (void *)page->private;
74
return NULL;
75
}
76
77
/*
78
* Dirty a page. Optimistically adjust accounting, on the assumption
79
* that we won't race with invalidate. If we do, readjust.
80
*/
81
static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
82
{
83
struct inode *inode = mapping->host;
84
struct ceph_client *cl = ceph_inode_to_client(inode);
85
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
86
struct ceph_inode_info *ci;
87
struct ceph_snap_context *snapc;
88
89
if (folio_test_dirty(folio)) {
90
doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
91
ceph_vinop(inode), folio, folio->index);
92
VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
93
return false;
94
}
95
96
atomic64_inc(&mdsc->dirty_folios);
97
98
ci = ceph_inode(inode);
99
100
/* dirty the head */
101
spin_lock(&ci->i_ceph_lock);
102
if (__ceph_have_pending_cap_snap(ci)) {
103
struct ceph_cap_snap *capsnap =
104
list_last_entry(&ci->i_cap_snaps,
105
struct ceph_cap_snap,
106
ci_item);
107
snapc = ceph_get_snap_context(capsnap->context);
108
capsnap->dirty_pages++;
109
} else {
110
BUG_ON(!ci->i_head_snapc);
111
snapc = ceph_get_snap_context(ci->i_head_snapc);
112
++ci->i_wrbuffer_ref_head;
113
}
114
if (ci->i_wrbuffer_ref == 0)
115
ihold(inode);
116
++ci->i_wrbuffer_ref;
117
doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
118
"snapc %p seq %lld (%d snaps)\n",
119
ceph_vinop(inode), folio, folio->index,
120
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
121
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
122
snapc, snapc->seq, snapc->num_snaps);
123
spin_unlock(&ci->i_ceph_lock);
124
125
/*
126
* Reference snap context in folio->private. Also set
127
* PagePrivate so that we get invalidate_folio callback.
128
*/
129
VM_WARN_ON_FOLIO(folio->private, folio);
130
folio_attach_private(folio, snapc);
131
132
return ceph_fscache_dirty_folio(mapping, folio);
133
}
134
135
/*
136
* If we are truncating the full folio (i.e. offset == 0), adjust the
137
* dirty folio counters appropriately. Only called if there is private
138
* data on the folio.
139
*/
140
static void ceph_invalidate_folio(struct folio *folio, size_t offset,
141
size_t length)
142
{
143
struct inode *inode = folio->mapping->host;
144
struct ceph_client *cl = ceph_inode_to_client(inode);
145
struct ceph_inode_info *ci = ceph_inode(inode);
146
struct ceph_snap_context *snapc;
147
148
149
if (offset != 0 || length != folio_size(folio)) {
150
doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
151
ceph_vinop(inode), folio->index, offset, length);
152
return;
153
}
154
155
WARN_ON(!folio_test_locked(folio));
156
if (folio_test_private(folio)) {
157
doutc(cl, "%llx.%llx idx %lu full dirty page\n",
158
ceph_vinop(inode), folio->index);
159
160
snapc = folio_detach_private(folio);
161
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
162
ceph_put_snap_context(snapc);
163
}
164
165
netfs_invalidate_folio(folio, offset, length);
166
}
167
168
static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
169
{
170
struct inode *inode = rreq->inode;
171
struct ceph_inode_info *ci = ceph_inode(inode);
172
struct ceph_file_layout *lo = &ci->i_layout;
173
unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
174
loff_t end = rreq->start + rreq->len, new_end;
175
struct ceph_netfs_request_data *priv = rreq->netfs_priv;
176
unsigned long max_len;
177
u32 blockoff;
178
179
if (priv) {
180
/* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
181
if (priv->file_ra_disabled)
182
max_pages = 0;
183
else
184
max_pages = priv->file_ra_pages;
185
186
}
187
188
/* Readahead is disabled */
189
if (!max_pages)
190
return;
191
192
max_len = max_pages << PAGE_SHIFT;
193
194
/*
195
* Try to expand the length forward by rounding up it to the next
196
* block, but do not exceed the file size, unless the original
197
* request already exceeds it.
198
*/
199
new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
200
if (new_end > end && new_end <= rreq->start + max_len)
201
rreq->len = new_end - rreq->start;
202
203
/* Try to expand the start downward */
204
div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
205
if (rreq->len + blockoff <= max_len) {
206
rreq->start -= blockoff;
207
rreq->len += blockoff;
208
}
209
}
210
211
static void finish_netfs_read(struct ceph_osd_request *req)
212
{
213
struct inode *inode = req->r_inode;
214
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
215
struct ceph_client *cl = fsc->client;
216
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
217
struct netfs_io_subrequest *subreq = req->r_priv;
218
struct ceph_osd_req_op *op = &req->r_ops[0];
219
int err = req->r_result;
220
bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
221
222
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
223
req->r_end_latency, osd_data->length, err);
224
225
doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
226
subreq->len, i_size_read(req->r_inode));
227
228
/* no object means success but no data */
229
if (err == -ENOENT) {
230
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
231
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
232
err = 0;
233
} else if (err == -EBLOCKLISTED) {
234
fsc->blocklisted = true;
235
}
236
237
if (err >= 0) {
238
if (sparse && err > 0)
239
err = ceph_sparse_ext_map_end(op);
240
if (err < subreq->len &&
241
subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
242
subreq->rreq->origin != NETFS_DIO_READ)
243
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
244
if (IS_ENCRYPTED(inode) && err > 0) {
245
err = ceph_fscrypt_decrypt_extents(inode,
246
osd_data->pages, subreq->start,
247
op->extent.sparse_ext,
248
op->extent.sparse_ext_cnt);
249
if (err > subreq->len)
250
err = subreq->len;
251
}
252
if (err > 0)
253
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
254
}
255
256
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
257
ceph_put_page_vector(osd_data->pages,
258
calc_pages_for(osd_data->alignment,
259
osd_data->length), false);
260
}
261
if (err > 0) {
262
subreq->transferred = err;
263
err = 0;
264
}
265
subreq->error = err;
266
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
267
netfs_read_subreq_terminated(subreq);
268
iput(req->r_inode);
269
ceph_dec_osd_stopping_blocker(fsc->mdsc);
270
}
271
272
static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
273
{
274
struct netfs_io_request *rreq = subreq->rreq;
275
struct inode *inode = rreq->inode;
276
struct ceph_mds_reply_info_parsed *rinfo;
277
struct ceph_mds_reply_info_in *iinfo;
278
struct ceph_mds_request *req;
279
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
280
struct ceph_inode_info *ci = ceph_inode(inode);
281
ssize_t err = 0;
282
size_t len;
283
int mode;
284
285
if (rreq->origin != NETFS_UNBUFFERED_READ &&
286
rreq->origin != NETFS_DIO_READ)
287
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
288
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
289
290
if (subreq->start >= inode->i_size)
291
goto out;
292
293
/* We need to fetch the inline data. */
294
mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
295
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
296
if (IS_ERR(req)) {
297
err = PTR_ERR(req);
298
goto out;
299
}
300
req->r_ino1 = ci->i_vino;
301
req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
302
req->r_num_caps = 2;
303
304
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
305
err = ceph_mdsc_do_request(mdsc, NULL, req);
306
if (err < 0)
307
goto out;
308
309
rinfo = &req->r_reply_info;
310
iinfo = &rinfo->targeti;
311
if (iinfo->inline_version == CEPH_INLINE_NONE) {
312
/* The data got uninlined */
313
ceph_mdsc_put_request(req);
314
return false;
315
}
316
317
len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
318
err = copy_to_iter(iinfo->inline_data + subreq->start, len, &subreq->io_iter);
319
if (err == 0) {
320
err = -EFAULT;
321
} else {
322
subreq->transferred += err;
323
err = 0;
324
}
325
326
ceph_mdsc_put_request(req);
327
out:
328
subreq->error = err;
329
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
330
netfs_read_subreq_terminated(subreq);
331
return true;
332
}
333
334
static int ceph_netfs_prepare_read(struct netfs_io_subrequest *subreq)
335
{
336
struct netfs_io_request *rreq = subreq->rreq;
337
struct inode *inode = rreq->inode;
338
struct ceph_inode_info *ci = ceph_inode(inode);
339
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
340
u64 objno, objoff;
341
u32 xlen;
342
343
/* Truncate the extent at the end of the current block */
344
ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
345
&objno, &objoff, &xlen);
346
rreq->io_streams[0].sreq_max_len = umin(xlen, fsc->mount_options->rsize);
347
return 0;
348
}
349
350
static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
351
{
352
struct netfs_io_request *rreq = subreq->rreq;
353
struct inode *inode = rreq->inode;
354
struct ceph_inode_info *ci = ceph_inode(inode);
355
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
356
struct ceph_client *cl = fsc->client;
357
struct ceph_osd_request *req = NULL;
358
struct ceph_vino vino = ceph_vino(inode);
359
int err;
360
u64 len;
361
bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
362
u64 off = subreq->start;
363
int extent_cnt;
364
365
if (ceph_inode_is_shutdown(inode)) {
366
err = -EIO;
367
goto out;
368
}
369
370
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
371
return;
372
373
// TODO: This rounding here is slightly dodgy. It *should* work, for
374
// now, as the cache only deals in blocks that are a multiple of
375
// PAGE_SIZE and fscrypt blocks are at most PAGE_SIZE. What needs to
376
// happen is for the fscrypt driving to be moved into netfslib and the
377
// data in the cache also to be stored encrypted.
378
len = subreq->len;
379
ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
380
381
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
382
off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
383
CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq,
384
ci->i_truncate_size, false);
385
if (IS_ERR(req)) {
386
err = PTR_ERR(req);
387
req = NULL;
388
goto out;
389
}
390
391
if (sparse) {
392
extent_cnt = __ceph_sparse_read_ext_count(inode, len);
393
err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt);
394
if (err)
395
goto out;
396
}
397
398
doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
399
ceph_vinop(inode), subreq->start, subreq->len, len);
400
401
/*
402
* FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
403
* encrypted inodes. We'd need infrastructure that handles an iov_iter
404
* instead of page arrays, and we don't have that as of yet. Once the
405
* dust settles on the write helpers and encrypt/decrypt routines for
406
* netfs, we should be able to rework this.
407
*/
408
if (IS_ENCRYPTED(inode)) {
409
struct page **pages;
410
size_t page_off;
411
412
/*
413
* FIXME: io_iter.count needs to be corrected to aligned
414
* length. Otherwise, iov_iter_get_pages_alloc2() operates
415
* with the initial unaligned length value. As a result,
416
* ceph_msg_data_cursor_init() triggers BUG_ON() in the case
417
* if msg->sparse_read_total > msg->data_length.
418
*/
419
subreq->io_iter.count = len;
420
421
err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
422
if (err < 0) {
423
doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
424
ceph_vinop(inode), err);
425
goto out;
426
}
427
428
/* should always give us a page-aligned read */
429
WARN_ON_ONCE(page_off);
430
len = err;
431
err = 0;
432
433
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
434
false);
435
} else {
436
osd_req_op_extent_osd_iter(req, 0, &subreq->io_iter);
437
}
438
if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
439
err = -EIO;
440
goto out;
441
}
442
req->r_callback = finish_netfs_read;
443
req->r_priv = subreq;
444
req->r_inode = inode;
445
ihold(inode);
446
447
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
448
ceph_osdc_start_request(req->r_osdc, req);
449
out:
450
ceph_osdc_put_request(req);
451
if (err) {
452
subreq->error = err;
453
netfs_read_subreq_terminated(subreq);
454
}
455
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
456
}
457
458
static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
459
{
460
struct inode *inode = rreq->inode;
461
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
462
struct ceph_client *cl = ceph_inode_to_client(inode);
463
int got = 0, want = CEPH_CAP_FILE_CACHE;
464
struct ceph_netfs_request_data *priv;
465
int ret = 0;
466
467
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
468
__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
469
470
if (rreq->origin != NETFS_READAHEAD)
471
return 0;
472
473
priv = kzalloc(sizeof(*priv), GFP_NOFS);
474
if (!priv)
475
return -ENOMEM;
476
477
if (file) {
478
struct ceph_rw_context *rw_ctx;
479
struct ceph_file_info *fi = file->private_data;
480
481
priv->file_ra_pages = file->f_ra.ra_pages;
482
priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
483
484
rw_ctx = ceph_find_rw_context(fi);
485
if (rw_ctx) {
486
rreq->netfs_priv = priv;
487
return 0;
488
}
489
}
490
491
/*
492
* readahead callers do not necessarily hold Fcb caps
493
* (e.g. fadvise, madvise).
494
*/
495
ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
496
if (ret < 0) {
497
doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
498
goto out;
499
}
500
501
if (!(got & want)) {
502
doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
503
ret = -EACCES;
504
goto out;
505
}
506
if (ret == 0) {
507
ret = -EACCES;
508
goto out;
509
}
510
511
priv->caps = got;
512
rreq->netfs_priv = priv;
513
rreq->io_streams[0].sreq_max_len = fsc->mount_options->rsize;
514
515
out:
516
if (ret < 0) {
517
if (got)
518
ceph_put_cap_refs(ceph_inode(inode), got);
519
kfree(priv);
520
}
521
522
return ret;
523
}
524
525
static void ceph_netfs_free_request(struct netfs_io_request *rreq)
526
{
527
struct ceph_netfs_request_data *priv = rreq->netfs_priv;
528
529
if (!priv)
530
return;
531
532
if (priv->caps)
533
ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
534
kfree(priv);
535
rreq->netfs_priv = NULL;
536
}
537
538
const struct netfs_request_ops ceph_netfs_ops = {
539
.init_request = ceph_init_request,
540
.free_request = ceph_netfs_free_request,
541
.prepare_read = ceph_netfs_prepare_read,
542
.issue_read = ceph_netfs_issue_read,
543
.expand_readahead = ceph_netfs_expand_readahead,
544
.check_write_begin = ceph_netfs_check_write_begin,
545
};
546
547
#ifdef CONFIG_CEPH_FSCACHE
548
static void ceph_set_page_fscache(struct page *page)
549
{
550
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
551
}
552
553
static void ceph_fscache_write_terminated(void *priv, ssize_t error)
554
{
555
struct inode *inode = priv;
556
557
if (IS_ERR_VALUE(error) && error != -ENOBUFS)
558
ceph_fscache_invalidate(inode, false);
559
}
560
561
static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
562
{
563
struct ceph_inode_info *ci = ceph_inode(inode);
564
struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
565
566
fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
567
ceph_fscache_write_terminated, inode, true, caching);
568
}
569
#else
570
static inline void ceph_set_page_fscache(struct page *page)
571
{
572
}
573
574
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
575
{
576
}
577
#endif /* CONFIG_CEPH_FSCACHE */
578
579
struct ceph_writeback_ctl
580
{
581
loff_t i_size;
582
u64 truncate_size;
583
u32 truncate_seq;
584
bool size_stable;
585
586
bool head_snapc;
587
struct ceph_snap_context *snapc;
588
struct ceph_snap_context *last_snapc;
589
590
bool done;
591
bool should_loop;
592
bool range_whole;
593
pgoff_t start_index;
594
pgoff_t index;
595
pgoff_t end;
596
xa_mark_t tag;
597
598
pgoff_t strip_unit_end;
599
unsigned int wsize;
600
unsigned int nr_folios;
601
unsigned int max_pages;
602
unsigned int locked_pages;
603
604
int op_idx;
605
int num_ops;
606
u64 offset;
607
u64 len;
608
609
struct folio_batch fbatch;
610
unsigned int processed_in_fbatch;
611
612
bool from_pool;
613
struct page **pages;
614
struct page **data_pages;
615
};
616
617
/*
618
* Get ref for the oldest snapc for an inode with dirty data... that is, the
619
* only snap context we are allowed to write back.
620
*/
621
static struct ceph_snap_context *
622
get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
623
struct ceph_snap_context *page_snapc)
624
{
625
struct ceph_inode_info *ci = ceph_inode(inode);
626
struct ceph_client *cl = ceph_inode_to_client(inode);
627
struct ceph_snap_context *snapc = NULL;
628
struct ceph_cap_snap *capsnap = NULL;
629
630
spin_lock(&ci->i_ceph_lock);
631
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
632
doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
633
capsnap, capsnap->context, capsnap->dirty_pages);
634
if (!capsnap->dirty_pages)
635
continue;
636
637
/* get i_size, truncate_{seq,size} for page_snapc? */
638
if (snapc && capsnap->context != page_snapc)
639
continue;
640
641
if (ctl) {
642
if (capsnap->writing) {
643
ctl->i_size = i_size_read(inode);
644
ctl->size_stable = false;
645
} else {
646
ctl->i_size = capsnap->size;
647
ctl->size_stable = true;
648
}
649
ctl->truncate_size = capsnap->truncate_size;
650
ctl->truncate_seq = capsnap->truncate_seq;
651
ctl->head_snapc = false;
652
}
653
654
if (snapc)
655
break;
656
657
snapc = ceph_get_snap_context(capsnap->context);
658
if (!page_snapc ||
659
page_snapc == snapc ||
660
page_snapc->seq > snapc->seq)
661
break;
662
}
663
if (!snapc && ci->i_wrbuffer_ref_head) {
664
snapc = ceph_get_snap_context(ci->i_head_snapc);
665
doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
666
ci->i_wrbuffer_ref_head);
667
if (ctl) {
668
ctl->i_size = i_size_read(inode);
669
ctl->truncate_size = ci->i_truncate_size;
670
ctl->truncate_seq = ci->i_truncate_seq;
671
ctl->size_stable = false;
672
ctl->head_snapc = true;
673
}
674
}
675
spin_unlock(&ci->i_ceph_lock);
676
return snapc;
677
}
678
679
static u64 get_writepages_data_length(struct inode *inode,
680
struct page *page, u64 start)
681
{
682
struct ceph_inode_info *ci = ceph_inode(inode);
683
struct ceph_snap_context *snapc;
684
struct ceph_cap_snap *capsnap = NULL;
685
u64 end = i_size_read(inode);
686
u64 ret;
687
688
snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
689
if (snapc != ci->i_head_snapc) {
690
bool found = false;
691
spin_lock(&ci->i_ceph_lock);
692
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
693
if (capsnap->context == snapc) {
694
if (!capsnap->writing)
695
end = capsnap->size;
696
found = true;
697
break;
698
}
699
}
700
spin_unlock(&ci->i_ceph_lock);
701
WARN_ON(!found);
702
}
703
if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
704
end = ceph_fscrypt_page_offset(page) + thp_size(page);
705
ret = end > start ? end - start : 0;
706
if (ret && fscrypt_is_bounce_page(page))
707
ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
708
return ret;
709
}
710
711
/*
712
* Write a folio, but leave it locked.
713
*
714
* If we get a write error, mark the mapping for error, but still adjust the
715
* dirty page accounting (i.e., folio is no longer dirty).
716
*/
717
static int write_folio_nounlock(struct folio *folio,
718
struct writeback_control *wbc)
719
{
720
struct page *page = &folio->page;
721
struct inode *inode = folio->mapping->host;
722
struct ceph_inode_info *ci = ceph_inode(inode);
723
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
724
struct ceph_client *cl = fsc->client;
725
struct ceph_snap_context *snapc, *oldest;
726
loff_t page_off = folio_pos(folio);
727
int err;
728
loff_t len = folio_size(folio);
729
loff_t wlen;
730
struct ceph_writeback_ctl ceph_wbc;
731
struct ceph_osd_client *osdc = &fsc->client->osdc;
732
struct ceph_osd_request *req;
733
bool caching = ceph_is_cache_enabled(inode);
734
struct page *bounce_page = NULL;
735
736
doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio,
737
folio->index);
738
739
if (ceph_inode_is_shutdown(inode))
740
return -EIO;
741
742
/* verify this is a writeable snap context */
743
snapc = page_snap_context(&folio->page);
744
if (!snapc) {
745
doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode),
746
folio);
747
return 0;
748
}
749
oldest = get_oldest_context(inode, &ceph_wbc, snapc);
750
if (snapc->seq > oldest->seq) {
751
doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n",
752
ceph_vinop(inode), folio, snapc);
753
/* we should only noop if called by kswapd */
754
WARN_ON(!(current->flags & PF_MEMALLOC));
755
ceph_put_snap_context(oldest);
756
folio_redirty_for_writepage(wbc, folio);
757
return 0;
758
}
759
ceph_put_snap_context(oldest);
760
761
/* is this a partial page at end of file? */
762
if (page_off >= ceph_wbc.i_size) {
763
doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
764
ceph_vinop(inode), folio->index, ceph_wbc.i_size);
765
folio_invalidate(folio, 0, folio_size(folio));
766
return 0;
767
}
768
769
if (ceph_wbc.i_size < page_off + len)
770
len = ceph_wbc.i_size - page_off;
771
772
wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
773
doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n",
774
ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc,
775
snapc->seq);
776
777
if (atomic_long_inc_return(&fsc->writeback_count) >
778
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
779
fsc->write_congested = true;
780
781
req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
782
page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE,
783
CEPH_OSD_FLAG_WRITE, snapc,
784
ceph_wbc.truncate_seq,
785
ceph_wbc.truncate_size, true);
786
if (IS_ERR(req)) {
787
folio_redirty_for_writepage(wbc, folio);
788
return PTR_ERR(req);
789
}
790
791
if (wlen < len)
792
len = wlen;
793
794
folio_start_writeback(folio);
795
if (caching)
796
ceph_set_page_fscache(&folio->page);
797
ceph_fscache_write_to_cache(inode, page_off, len, caching);
798
799
if (IS_ENCRYPTED(inode)) {
800
bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
801
CEPH_FSCRYPT_BLOCK_SIZE, 0,
802
GFP_NOFS);
803
if (IS_ERR(bounce_page)) {
804
folio_redirty_for_writepage(wbc, folio);
805
folio_end_writeback(folio);
806
ceph_osdc_put_request(req);
807
return PTR_ERR(bounce_page);
808
}
809
}
810
811
/* it may be a short write due to an object boundary */
812
WARN_ON_ONCE(len > folio_size(folio));
813
osd_req_op_extent_osd_data_pages(req, 0,
814
bounce_page ? &bounce_page : &page, wlen, 0,
815
false, false);
816
doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
817
ceph_vinop(inode), page_off, len, wlen,
818
IS_ENCRYPTED(inode) ? "" : "not ");
819
820
req->r_mtime = inode_get_mtime(inode);
821
ceph_osdc_start_request(osdc, req);
822
err = ceph_osdc_wait_request(osdc, req);
823
824
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
825
req->r_end_latency, len, err);
826
fscrypt_free_bounce_page(bounce_page);
827
ceph_osdc_put_request(req);
828
if (err == 0)
829
err = len;
830
831
if (err < 0) {
832
struct writeback_control tmp_wbc;
833
if (!wbc)
834
wbc = &tmp_wbc;
835
if (err == -ERESTARTSYS) {
836
/* killed by SIGKILL */
837
doutc(cl, "%llx.%llx interrupted page %p\n",
838
ceph_vinop(inode), folio);
839
folio_redirty_for_writepage(wbc, folio);
840
folio_end_writeback(folio);
841
return err;
842
}
843
if (err == -EBLOCKLISTED)
844
fsc->blocklisted = true;
845
doutc(cl, "%llx.%llx setting mapping error %d %p\n",
846
ceph_vinop(inode), err, folio);
847
mapping_set_error(&inode->i_data, err);
848
wbc->pages_skipped++;
849
} else {
850
doutc(cl, "%llx.%llx cleaned page %p\n",
851
ceph_vinop(inode), folio);
852
err = 0; /* vfs expects us to return 0 */
853
}
854
oldest = folio_detach_private(folio);
855
WARN_ON_ONCE(oldest != snapc);
856
folio_end_writeback(folio);
857
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
858
ceph_put_snap_context(snapc); /* page's reference */
859
860
if (atomic_long_dec_return(&fsc->writeback_count) <
861
CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
862
fsc->write_congested = false;
863
864
return err;
865
}
866
867
/*
868
* async writeback completion handler.
869
*
870
* If we get an error, set the mapping error bit, but not the individual
871
* page error bits.
872
*/
873
static void writepages_finish(struct ceph_osd_request *req)
874
{
875
struct inode *inode = req->r_inode;
876
struct ceph_inode_info *ci = ceph_inode(inode);
877
struct ceph_client *cl = ceph_inode_to_client(inode);
878
struct ceph_osd_data *osd_data;
879
struct page *page;
880
int num_pages, total_pages = 0;
881
int i, j;
882
int rc = req->r_result;
883
struct ceph_snap_context *snapc = req->r_snapc;
884
struct address_space *mapping = inode->i_mapping;
885
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
886
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
887
unsigned int len = 0;
888
bool remove_page;
889
890
doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
891
if (rc < 0) {
892
mapping_set_error(mapping, rc);
893
ceph_set_error_write(ci);
894
if (rc == -EBLOCKLISTED)
895
fsc->blocklisted = true;
896
} else {
897
ceph_clear_error_write(ci);
898
}
899
900
/*
901
* We lost the cache cap, need to truncate the page before
902
* it is unlocked, otherwise we'd truncate it later in the
903
* page truncation thread, possibly losing some data that
904
* raced its way in
905
*/
906
remove_page = !(ceph_caps_issued(ci) &
907
(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
908
909
/* clean all pages */
910
for (i = 0; i < req->r_num_ops; i++) {
911
if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
912
pr_warn_client(cl,
913
"%llx.%llx incorrect op %d req %p index %d tid %llu\n",
914
ceph_vinop(inode), req->r_ops[i].op, req, i,
915
req->r_tid);
916
break;
917
}
918
919
osd_data = osd_req_op_extent_osd_data(req, i);
920
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
921
len += osd_data->length;
922
num_pages = calc_pages_for((u64)osd_data->alignment,
923
(u64)osd_data->length);
924
total_pages += num_pages;
925
for (j = 0; j < num_pages; j++) {
926
page = osd_data->pages[j];
927
if (fscrypt_is_bounce_page(page)) {
928
page = fscrypt_pagecache_page(page);
929
fscrypt_free_bounce_page(osd_data->pages[j]);
930
osd_data->pages[j] = page;
931
}
932
BUG_ON(!page);
933
WARN_ON(!PageUptodate(page));
934
935
if (atomic_long_dec_return(&fsc->writeback_count) <
936
CONGESTION_OFF_THRESH(
937
fsc->mount_options->congestion_kb))
938
fsc->write_congested = false;
939
940
ceph_put_snap_context(detach_page_private(page));
941
end_page_writeback(page);
942
943
if (atomic64_dec_return(&mdsc->dirty_folios) <= 0) {
944
wake_up_all(&mdsc->flush_end_wq);
945
WARN_ON(atomic64_read(&mdsc->dirty_folios) < 0);
946
}
947
948
doutc(cl, "unlocking %p\n", page);
949
950
if (remove_page)
951
generic_error_remove_folio(inode->i_mapping,
952
page_folio(page));
953
954
unlock_page(page);
955
}
956
doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
957
ceph_vinop(inode), osd_data->length,
958
rc >= 0 ? num_pages : 0);
959
960
release_pages(osd_data->pages, num_pages);
961
}
962
963
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
964
req->r_end_latency, len, rc);
965
966
ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
967
968
osd_data = osd_req_op_extent_osd_data(req, 0);
969
if (osd_data->pages_from_pool)
970
mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
971
else
972
kfree(osd_data->pages);
973
ceph_osdc_put_request(req);
974
ceph_dec_osd_stopping_blocker(fsc->mdsc);
975
}
976
977
static inline
978
bool is_forced_umount(struct address_space *mapping)
979
{
980
struct inode *inode = mapping->host;
981
struct ceph_inode_info *ci = ceph_inode(inode);
982
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
983
struct ceph_client *cl = fsc->client;
984
985
if (ceph_inode_is_shutdown(inode)) {
986
if (ci->i_wrbuffer_ref > 0) {
987
pr_warn_ratelimited_client(cl,
988
"%llx.%llx %lld forced umount\n",
989
ceph_vinop(inode), ceph_ino(inode));
990
}
991
mapping_set_error(mapping, -EIO);
992
return true;
993
}
994
995
return false;
996
}
997
998
static inline
999
unsigned int ceph_define_write_size(struct address_space *mapping)
1000
{
1001
struct inode *inode = mapping->host;
1002
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1003
unsigned int wsize = i_blocksize(inode);
1004
1005
if (fsc->mount_options->wsize < wsize)
1006
wsize = fsc->mount_options->wsize;
1007
1008
return wsize;
1009
}
1010
1011
static inline
1012
void ceph_folio_batch_init(struct ceph_writeback_ctl *ceph_wbc)
1013
{
1014
folio_batch_init(&ceph_wbc->fbatch);
1015
ceph_wbc->processed_in_fbatch = 0;
1016
}
1017
1018
static inline
1019
void ceph_folio_batch_reinit(struct ceph_writeback_ctl *ceph_wbc)
1020
{
1021
folio_batch_release(&ceph_wbc->fbatch);
1022
ceph_folio_batch_init(ceph_wbc);
1023
}
1024
1025
static inline
1026
void ceph_init_writeback_ctl(struct address_space *mapping,
1027
struct writeback_control *wbc,
1028
struct ceph_writeback_ctl *ceph_wbc)
1029
{
1030
ceph_wbc->snapc = NULL;
1031
ceph_wbc->last_snapc = NULL;
1032
1033
ceph_wbc->strip_unit_end = 0;
1034
ceph_wbc->wsize = ceph_define_write_size(mapping);
1035
1036
ceph_wbc->nr_folios = 0;
1037
ceph_wbc->max_pages = 0;
1038
ceph_wbc->locked_pages = 0;
1039
1040
ceph_wbc->done = false;
1041
ceph_wbc->should_loop = false;
1042
ceph_wbc->range_whole = false;
1043
1044
ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
1045
ceph_wbc->index = ceph_wbc->start_index;
1046
ceph_wbc->end = -1;
1047
1048
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
1049
ceph_wbc->tag = PAGECACHE_TAG_TOWRITE;
1050
} else {
1051
ceph_wbc->tag = PAGECACHE_TAG_DIRTY;
1052
}
1053
1054
ceph_wbc->op_idx = -1;
1055
ceph_wbc->num_ops = 0;
1056
ceph_wbc->offset = 0;
1057
ceph_wbc->len = 0;
1058
ceph_wbc->from_pool = false;
1059
1060
ceph_folio_batch_init(ceph_wbc);
1061
1062
ceph_wbc->pages = NULL;
1063
ceph_wbc->data_pages = NULL;
1064
}
1065
1066
static inline
1067
int ceph_define_writeback_range(struct address_space *mapping,
1068
struct writeback_control *wbc,
1069
struct ceph_writeback_ctl *ceph_wbc)
1070
{
1071
struct inode *inode = mapping->host;
1072
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1073
struct ceph_client *cl = fsc->client;
1074
1075
/* find oldest snap context with dirty data */
1076
ceph_wbc->snapc = get_oldest_context(inode, ceph_wbc, NULL);
1077
if (!ceph_wbc->snapc) {
1078
/* hmm, why does writepages get called when there
1079
is no dirty data? */
1080
doutc(cl, " no snap context with dirty data?\n");
1081
return -ENODATA;
1082
}
1083
1084
doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n",
1085
ceph_wbc->snapc, ceph_wbc->snapc->seq,
1086
ceph_wbc->snapc->num_snaps);
1087
1088
ceph_wbc->should_loop = false;
1089
1090
if (ceph_wbc->head_snapc && ceph_wbc->snapc != ceph_wbc->last_snapc) {
1091
/* where to start/end? */
1092
if (wbc->range_cyclic) {
1093
ceph_wbc->index = ceph_wbc->start_index;
1094
ceph_wbc->end = -1;
1095
if (ceph_wbc->index > 0)
1096
ceph_wbc->should_loop = true;
1097
doutc(cl, " cyclic, start at %lu\n", ceph_wbc->index);
1098
} else {
1099
ceph_wbc->index = wbc->range_start >> PAGE_SHIFT;
1100
ceph_wbc->end = wbc->range_end >> PAGE_SHIFT;
1101
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1102
ceph_wbc->range_whole = true;
1103
doutc(cl, " not cyclic, %lu to %lu\n",
1104
ceph_wbc->index, ceph_wbc->end);
1105
}
1106
} else if (!ceph_wbc->head_snapc) {
1107
/* Do not respect wbc->range_{start,end}. Dirty pages
1108
* in that range can be associated with newer snapc.
1109
* They are not writeable until we write all dirty pages
1110
* associated with 'snapc' get written */
1111
if (ceph_wbc->index > 0)
1112
ceph_wbc->should_loop = true;
1113
doutc(cl, " non-head snapc, range whole\n");
1114
}
1115
1116
ceph_put_snap_context(ceph_wbc->last_snapc);
1117
ceph_wbc->last_snapc = ceph_wbc->snapc;
1118
1119
return 0;
1120
}
1121
1122
static inline
1123
bool has_writeback_done(struct ceph_writeback_ctl *ceph_wbc)
1124
{
1125
return ceph_wbc->done && ceph_wbc->index > ceph_wbc->end;
1126
}
1127
1128
static inline
1129
bool can_next_page_be_processed(struct ceph_writeback_ctl *ceph_wbc,
1130
unsigned index)
1131
{
1132
return index < ceph_wbc->nr_folios &&
1133
ceph_wbc->locked_pages < ceph_wbc->max_pages;
1134
}
1135
1136
static
1137
int ceph_check_page_before_write(struct address_space *mapping,
1138
struct writeback_control *wbc,
1139
struct ceph_writeback_ctl *ceph_wbc,
1140
struct folio *folio)
1141
{
1142
struct inode *inode = mapping->host;
1143
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1144
struct ceph_client *cl = fsc->client;
1145
struct ceph_snap_context *pgsnapc;
1146
1147
/* only dirty folios, or our accounting breaks */
1148
if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) {
1149
doutc(cl, "!dirty or !mapping %p\n", folio);
1150
return -ENODATA;
1151
}
1152
1153
/* only if matching snap context */
1154
pgsnapc = page_snap_context(&folio->page);
1155
if (pgsnapc != ceph_wbc->snapc) {
1156
doutc(cl, "folio snapc %p %lld != oldest %p %lld\n",
1157
pgsnapc, pgsnapc->seq,
1158
ceph_wbc->snapc, ceph_wbc->snapc->seq);
1159
1160
if (!ceph_wbc->should_loop && !ceph_wbc->head_snapc &&
1161
wbc->sync_mode != WB_SYNC_NONE)
1162
ceph_wbc->should_loop = true;
1163
1164
return -ENODATA;
1165
}
1166
1167
if (folio_pos(folio) >= ceph_wbc->i_size) {
1168
doutc(cl, "folio at %lu beyond eof %llu\n",
1169
folio->index, ceph_wbc->i_size);
1170
1171
if ((ceph_wbc->size_stable ||
1172
folio_pos(folio) >= i_size_read(inode)) &&
1173
folio_clear_dirty_for_io(folio))
1174
folio_invalidate(folio, 0, folio_size(folio));
1175
1176
return -ENODATA;
1177
}
1178
1179
if (ceph_wbc->strip_unit_end &&
1180
(folio->index > ceph_wbc->strip_unit_end)) {
1181
doutc(cl, "end of strip unit %p\n", folio);
1182
return -E2BIG;
1183
}
1184
1185
return 0;
1186
}
1187
1188
static inline
1189
void __ceph_allocate_page_array(struct ceph_writeback_ctl *ceph_wbc,
1190
unsigned int max_pages)
1191
{
1192
ceph_wbc->pages = kmalloc_array(max_pages,
1193
sizeof(*ceph_wbc->pages),
1194
GFP_NOFS);
1195
if (!ceph_wbc->pages) {
1196
ceph_wbc->from_pool = true;
1197
ceph_wbc->pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1198
BUG_ON(!ceph_wbc->pages);
1199
}
1200
}
1201
1202
static inline
1203
void ceph_allocate_page_array(struct address_space *mapping,
1204
struct ceph_writeback_ctl *ceph_wbc,
1205
struct folio *folio)
1206
{
1207
struct inode *inode = mapping->host;
1208
struct ceph_inode_info *ci = ceph_inode(inode);
1209
u64 objnum;
1210
u64 objoff;
1211
u32 xlen;
1212
1213
/* prepare async write request */
1214
ceph_wbc->offset = (u64)folio_pos(folio);
1215
ceph_calc_file_object_mapping(&ci->i_layout,
1216
ceph_wbc->offset, ceph_wbc->wsize,
1217
&objnum, &objoff, &xlen);
1218
1219
ceph_wbc->num_ops = 1;
1220
ceph_wbc->strip_unit_end = folio->index + ((xlen - 1) >> PAGE_SHIFT);
1221
1222
BUG_ON(ceph_wbc->pages);
1223
ceph_wbc->max_pages = calc_pages_for(0, (u64)xlen);
1224
__ceph_allocate_page_array(ceph_wbc, ceph_wbc->max_pages);
1225
1226
ceph_wbc->len = 0;
1227
}
1228
1229
static inline
1230
bool is_folio_index_contiguous(const struct ceph_writeback_ctl *ceph_wbc,
1231
const struct folio *folio)
1232
{
1233
return folio->index == (ceph_wbc->offset + ceph_wbc->len) >> PAGE_SHIFT;
1234
}
1235
1236
static inline
1237
bool is_num_ops_too_big(struct ceph_writeback_ctl *ceph_wbc)
1238
{
1239
return ceph_wbc->num_ops >=
1240
(ceph_wbc->from_pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS);
1241
}
1242
1243
static inline
1244
bool is_write_congestion_happened(struct ceph_fs_client *fsc)
1245
{
1246
return atomic_long_inc_return(&fsc->writeback_count) >
1247
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb);
1248
}
1249
1250
static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
1251
struct writeback_control *wbc,
1252
struct ceph_writeback_ctl *ceph_wbc, struct folio *folio)
1253
{
1254
struct inode *inode = mapping->host;
1255
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1256
struct ceph_client *cl = fsc->client;
1257
struct page **pages = ceph_wbc->pages;
1258
unsigned int index = ceph_wbc->locked_pages;
1259
gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS;
1260
1261
if (IS_ENCRYPTED(inode)) {
1262
pages[index] = fscrypt_encrypt_pagecache_blocks(folio,
1263
PAGE_SIZE,
1264
0,
1265
gfp_flags);
1266
if (IS_ERR(pages[index])) {
1267
if (PTR_ERR(pages[index]) == -EINVAL) {
1268
pr_err_client(cl, "inode->i_blkbits=%hhu\n",
1269
inode->i_blkbits);
1270
}
1271
1272
/* better not fail on first page! */
1273
BUG_ON(ceph_wbc->locked_pages == 0);
1274
1275
pages[index] = NULL;
1276
return PTR_ERR(pages[index]);
1277
}
1278
} else {
1279
pages[index] = &folio->page;
1280
}
1281
1282
ceph_wbc->locked_pages++;
1283
1284
return 0;
1285
}
1286
1287
static
1288
int ceph_process_folio_batch(struct address_space *mapping,
1289
struct writeback_control *wbc,
1290
struct ceph_writeback_ctl *ceph_wbc)
1291
{
1292
struct inode *inode = mapping->host;
1293
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1294
struct ceph_client *cl = fsc->client;
1295
struct folio *folio = NULL;
1296
unsigned i;
1297
int rc = 0;
1298
1299
for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
1300
folio = ceph_wbc->fbatch.folios[i];
1301
1302
if (!folio)
1303
continue;
1304
1305
doutc(cl, "? %p idx %lu, folio_test_writeback %#x, "
1306
"folio_test_dirty %#x, folio_test_locked %#x\n",
1307
folio, folio->index, folio_test_writeback(folio),
1308
folio_test_dirty(folio),
1309
folio_test_locked(folio));
1310
1311
if (folio_test_writeback(folio) ||
1312
folio_test_private_2(folio) /* [DEPRECATED] */) {
1313
doutc(cl, "waiting on writeback %p\n", folio);
1314
folio_wait_writeback(folio);
1315
folio_wait_private_2(folio); /* [DEPRECATED] */
1316
continue;
1317
}
1318
1319
if (ceph_wbc->locked_pages == 0)
1320
folio_lock(folio);
1321
else if (!folio_trylock(folio))
1322
break;
1323
1324
rc = ceph_check_page_before_write(mapping, wbc,
1325
ceph_wbc, folio);
1326
if (rc == -ENODATA) {
1327
rc = 0;
1328
folio_unlock(folio);
1329
ceph_wbc->fbatch.folios[i] = NULL;
1330
continue;
1331
} else if (rc == -E2BIG) {
1332
rc = 0;
1333
folio_unlock(folio);
1334
ceph_wbc->fbatch.folios[i] = NULL;
1335
break;
1336
}
1337
1338
if (!folio_clear_dirty_for_io(folio)) {
1339
doutc(cl, "%p !folio_clear_dirty_for_io\n", folio);
1340
folio_unlock(folio);
1341
ceph_wbc->fbatch.folios[i] = NULL;
1342
continue;
1343
}
1344
1345
/*
1346
* We have something to write. If this is
1347
* the first locked page this time through,
1348
* calculate max possible write size and
1349
* allocate a page array
1350
*/
1351
if (ceph_wbc->locked_pages == 0) {
1352
ceph_allocate_page_array(mapping, ceph_wbc, folio);
1353
} else if (!is_folio_index_contiguous(ceph_wbc, folio)) {
1354
if (is_num_ops_too_big(ceph_wbc)) {
1355
folio_redirty_for_writepage(wbc, folio);
1356
folio_unlock(folio);
1357
break;
1358
}
1359
1360
ceph_wbc->num_ops++;
1361
ceph_wbc->offset = (u64)folio_pos(folio);
1362
ceph_wbc->len = 0;
1363
}
1364
1365
/* note position of first page in fbatch */
1366
doutc(cl, "%llx.%llx will write folio %p idx %lu\n",
1367
ceph_vinop(inode), folio, folio->index);
1368
1369
fsc->write_congested = is_write_congestion_happened(fsc);
1370
1371
rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
1372
folio);
1373
if (rc) {
1374
folio_redirty_for_writepage(wbc, folio);
1375
folio_unlock(folio);
1376
break;
1377
}
1378
1379
ceph_wbc->fbatch.folios[i] = NULL;
1380
ceph_wbc->len += folio_size(folio);
1381
}
1382
1383
ceph_wbc->processed_in_fbatch = i;
1384
1385
return rc;
1386
}
1387
1388
static inline
1389
void ceph_shift_unused_folios_left(struct folio_batch *fbatch)
1390
{
1391
unsigned j, n = 0;
1392
1393
/* shift unused page to beginning of fbatch */
1394
for (j = 0; j < folio_batch_count(fbatch); j++) {
1395
if (!fbatch->folios[j])
1396
continue;
1397
1398
if (n < j) {
1399
fbatch->folios[n] = fbatch->folios[j];
1400
}
1401
1402
n++;
1403
}
1404
1405
fbatch->nr = n;
1406
}
1407
1408
static
1409
int ceph_submit_write(struct address_space *mapping,
1410
struct writeback_control *wbc,
1411
struct ceph_writeback_ctl *ceph_wbc)
1412
{
1413
struct inode *inode = mapping->host;
1414
struct ceph_inode_info *ci = ceph_inode(inode);
1415
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1416
struct ceph_client *cl = fsc->client;
1417
struct ceph_vino vino = ceph_vino(inode);
1418
struct ceph_osd_request *req = NULL;
1419
struct page *page = NULL;
1420
bool caching = ceph_is_cache_enabled(inode);
1421
u64 offset;
1422
u64 len;
1423
unsigned i;
1424
1425
new_request:
1426
offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]);
1427
len = ceph_wbc->wsize;
1428
1429
req = ceph_osdc_new_request(&fsc->client->osdc,
1430
&ci->i_layout, vino,
1431
offset, &len, 0, ceph_wbc->num_ops,
1432
CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1433
ceph_wbc->snapc, ceph_wbc->truncate_seq,
1434
ceph_wbc->truncate_size, false);
1435
if (IS_ERR(req)) {
1436
req = ceph_osdc_new_request(&fsc->client->osdc,
1437
&ci->i_layout, vino,
1438
offset, &len, 0,
1439
min(ceph_wbc->num_ops,
1440
CEPH_OSD_SLAB_OPS),
1441
CEPH_OSD_OP_WRITE,
1442
CEPH_OSD_FLAG_WRITE,
1443
ceph_wbc->snapc,
1444
ceph_wbc->truncate_seq,
1445
ceph_wbc->truncate_size,
1446
true);
1447
BUG_ON(IS_ERR(req));
1448
}
1449
1450
page = ceph_wbc->pages[ceph_wbc->locked_pages - 1];
1451
BUG_ON(len < ceph_fscrypt_page_offset(page) + thp_size(page) - offset);
1452
1453
if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
1454
for (i = 0; i < folio_batch_count(&ceph_wbc->fbatch); i++) {
1455
struct folio *folio = ceph_wbc->fbatch.folios[i];
1456
1457
if (!folio)
1458
continue;
1459
1460
page = &folio->page;
1461
redirty_page_for_writepage(wbc, page);
1462
unlock_page(page);
1463
}
1464
1465
for (i = 0; i < ceph_wbc->locked_pages; i++) {
1466
page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
1467
1468
if (!page)
1469
continue;
1470
1471
redirty_page_for_writepage(wbc, page);
1472
unlock_page(page);
1473
}
1474
1475
ceph_osdc_put_request(req);
1476
return -EIO;
1477
}
1478
1479
req->r_callback = writepages_finish;
1480
req->r_inode = inode;
1481
1482
/* Format the osd request message and submit the write */
1483
len = 0;
1484
ceph_wbc->data_pages = ceph_wbc->pages;
1485
ceph_wbc->op_idx = 0;
1486
for (i = 0; i < ceph_wbc->locked_pages; i++) {
1487
u64 cur_offset;
1488
1489
page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
1490
cur_offset = page_offset(page);
1491
1492
/*
1493
* Discontinuity in page range? Ceph can handle that by just passing
1494
* multiple extents in the write op.
1495
*/
1496
if (offset + len != cur_offset) {
1497
/* If it's full, stop here */
1498
if (ceph_wbc->op_idx + 1 == req->r_num_ops)
1499
break;
1500
1501
/* Kick off an fscache write with what we have so far. */
1502
ceph_fscache_write_to_cache(inode, offset, len, caching);
1503
1504
/* Start a new extent */
1505
osd_req_op_extent_dup_last(req, ceph_wbc->op_idx,
1506
cur_offset - offset);
1507
1508
doutc(cl, "got pages at %llu~%llu\n", offset, len);
1509
1510
osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
1511
ceph_wbc->data_pages,
1512
len, 0,
1513
ceph_wbc->from_pool,
1514
false);
1515
osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
1516
1517
len = 0;
1518
offset = cur_offset;
1519
ceph_wbc->data_pages = ceph_wbc->pages + i;
1520
ceph_wbc->op_idx++;
1521
}
1522
1523
set_page_writeback(page);
1524
1525
if (caching)
1526
ceph_set_page_fscache(page);
1527
1528
len += thp_size(page);
1529
}
1530
1531
ceph_fscache_write_to_cache(inode, offset, len, caching);
1532
1533
if (ceph_wbc->size_stable) {
1534
len = min(len, ceph_wbc->i_size - offset);
1535
} else if (i == ceph_wbc->locked_pages) {
1536
/* writepages_finish() clears writeback pages
1537
* according to the data length, so make sure
1538
* data length covers all locked pages */
1539
u64 min_len = len + 1 - thp_size(page);
1540
len = get_writepages_data_length(inode,
1541
ceph_wbc->pages[i - 1],
1542
offset);
1543
len = max(len, min_len);
1544
}
1545
1546
if (IS_ENCRYPTED(inode))
1547
len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
1548
1549
doutc(cl, "got pages at %llu~%llu\n", offset, len);
1550
1551
if (IS_ENCRYPTED(inode) &&
1552
((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) {
1553
pr_warn_client(cl,
1554
"bad encrypted write offset=%lld len=%llu\n",
1555
offset, len);
1556
}
1557
1558
osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
1559
ceph_wbc->data_pages, len,
1560
0, ceph_wbc->from_pool, false);
1561
osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
1562
1563
BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops);
1564
1565
ceph_wbc->from_pool = false;
1566
if (i < ceph_wbc->locked_pages) {
1567
BUG_ON(ceph_wbc->num_ops <= req->r_num_ops);
1568
ceph_wbc->num_ops -= req->r_num_ops;
1569
ceph_wbc->locked_pages -= i;
1570
1571
/* allocate new pages array for next request */
1572
ceph_wbc->data_pages = ceph_wbc->pages;
1573
__ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages);
1574
memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i,
1575
ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
1576
memset(ceph_wbc->data_pages + i, 0,
1577
ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
1578
} else {
1579
BUG_ON(ceph_wbc->num_ops != req->r_num_ops);
1580
/* request message now owns the pages array */
1581
ceph_wbc->pages = NULL;
1582
}
1583
1584
req->r_mtime = inode_get_mtime(inode);
1585
ceph_osdc_start_request(&fsc->client->osdc, req);
1586
req = NULL;
1587
1588
wbc->nr_to_write -= i;
1589
if (ceph_wbc->pages)
1590
goto new_request;
1591
1592
return 0;
1593
}
1594
1595
static
1596
void ceph_wait_until_current_writes_complete(struct address_space *mapping,
1597
struct writeback_control *wbc,
1598
struct ceph_writeback_ctl *ceph_wbc)
1599
{
1600
struct page *page;
1601
unsigned i, nr;
1602
1603
if (wbc->sync_mode != WB_SYNC_NONE &&
1604
ceph_wbc->start_index == 0 && /* all dirty pages were checked */
1605
!ceph_wbc->head_snapc) {
1606
ceph_wbc->index = 0;
1607
1608
while ((ceph_wbc->index <= ceph_wbc->end) &&
1609
(nr = filemap_get_folios_tag(mapping,
1610
&ceph_wbc->index,
1611
(pgoff_t)-1,
1612
PAGECACHE_TAG_WRITEBACK,
1613
&ceph_wbc->fbatch))) {
1614
for (i = 0; i < nr; i++) {
1615
page = &ceph_wbc->fbatch.folios[i]->page;
1616
if (page_snap_context(page) != ceph_wbc->snapc)
1617
continue;
1618
wait_on_page_writeback(page);
1619
}
1620
1621
folio_batch_release(&ceph_wbc->fbatch);
1622
cond_resched();
1623
}
1624
}
1625
}
1626
1627
/*
1628
* initiate async writeback
1629
*/
1630
static int ceph_writepages_start(struct address_space *mapping,
1631
struct writeback_control *wbc)
1632
{
1633
struct inode *inode = mapping->host;
1634
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1635
struct ceph_client *cl = fsc->client;
1636
struct ceph_writeback_ctl ceph_wbc;
1637
int rc = 0;
1638
1639
if (wbc->sync_mode == WB_SYNC_NONE && fsc->write_congested)
1640
return 0;
1641
1642
doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
1643
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
1644
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
1645
1646
if (is_forced_umount(mapping)) {
1647
/* we're in a forced umount, don't write! */
1648
return -EIO;
1649
}
1650
1651
ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc);
1652
1653
if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
1654
rc = -EIO;
1655
goto out;
1656
}
1657
1658
retry:
1659
rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc);
1660
if (rc == -ENODATA) {
1661
/* hmm, why does writepages get called when there
1662
is no dirty data? */
1663
rc = 0;
1664
goto dec_osd_stopping_blocker;
1665
}
1666
1667
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1668
tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
1669
1670
while (!has_writeback_done(&ceph_wbc)) {
1671
ceph_wbc.locked_pages = 0;
1672
ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT;
1673
1674
get_more_pages:
1675
ceph_folio_batch_reinit(&ceph_wbc);
1676
1677
ceph_wbc.nr_folios = filemap_get_folios_tag(mapping,
1678
&ceph_wbc.index,
1679
ceph_wbc.end,
1680
ceph_wbc.tag,
1681
&ceph_wbc.fbatch);
1682
doutc(cl, "pagevec_lookup_range_tag for tag %#x got %d\n",
1683
ceph_wbc.tag, ceph_wbc.nr_folios);
1684
1685
if (!ceph_wbc.nr_folios && !ceph_wbc.locked_pages)
1686
break;
1687
1688
process_folio_batch:
1689
rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
1690
if (rc)
1691
goto release_folios;
1692
1693
/* did we get anything? */
1694
if (!ceph_wbc.locked_pages)
1695
goto release_folios;
1696
1697
if (ceph_wbc.processed_in_fbatch) {
1698
ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
1699
1700
if (folio_batch_count(&ceph_wbc.fbatch) == 0 &&
1701
ceph_wbc.locked_pages < ceph_wbc.max_pages) {
1702
doutc(cl, "reached end fbatch, trying for more\n");
1703
goto get_more_pages;
1704
}
1705
}
1706
1707
rc = ceph_submit_write(mapping, wbc, &ceph_wbc);
1708
if (rc)
1709
goto release_folios;
1710
1711
ceph_wbc.locked_pages = 0;
1712
ceph_wbc.strip_unit_end = 0;
1713
1714
if (folio_batch_count(&ceph_wbc.fbatch) > 0) {
1715
ceph_wbc.nr_folios =
1716
folio_batch_count(&ceph_wbc.fbatch);
1717
goto process_folio_batch;
1718
}
1719
1720
/*
1721
* We stop writing back only if we are not doing
1722
* integrity sync. In case of integrity sync we have to
1723
* keep going until we have written all the pages
1724
* we tagged for writeback prior to entering this loop.
1725
*/
1726
if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1727
ceph_wbc.done = true;
1728
1729
release_folios:
1730
doutc(cl, "folio_batch release on %d folios (%p)\n",
1731
(int)ceph_wbc.fbatch.nr,
1732
ceph_wbc.fbatch.nr ? ceph_wbc.fbatch.folios[0] : NULL);
1733
folio_batch_release(&ceph_wbc.fbatch);
1734
}
1735
1736
if (ceph_wbc.should_loop && !ceph_wbc.done) {
1737
/* more to do; loop back to beginning of file */
1738
doutc(cl, "looping back to beginning of file\n");
1739
/* OK even when start_index == 0 */
1740
ceph_wbc.end = ceph_wbc.start_index - 1;
1741
1742
/* to write dirty pages associated with next snapc,
1743
* we need to wait until current writes complete */
1744
ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc);
1745
1746
ceph_wbc.start_index = 0;
1747
ceph_wbc.index = 0;
1748
goto retry;
1749
}
1750
1751
if (wbc->range_cyclic || (ceph_wbc.range_whole && wbc->nr_to_write > 0))
1752
mapping->writeback_index = ceph_wbc.index;
1753
1754
dec_osd_stopping_blocker:
1755
ceph_dec_osd_stopping_blocker(fsc->mdsc);
1756
1757
out:
1758
ceph_put_snap_context(ceph_wbc.last_snapc);
1759
doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
1760
rc);
1761
1762
return rc;
1763
}
1764
1765
/*
1766
* See if a given @snapc is either writeable, or already written.
1767
*/
1768
static int context_is_writeable_or_written(struct inode *inode,
1769
struct ceph_snap_context *snapc)
1770
{
1771
struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1772
int ret = !oldest || snapc->seq <= oldest->seq;
1773
1774
ceph_put_snap_context(oldest);
1775
return ret;
1776
}
1777
1778
/**
1779
* ceph_find_incompatible - find an incompatible context and return it
1780
* @folio: folio being dirtied
1781
*
1782
* We are only allowed to write into/dirty a folio if the folio is
1783
* clean, or already dirty within the same snap context. Returns a
1784
* conflicting context if there is one, NULL if there isn't, or a
1785
* negative error code on other errors.
1786
*
1787
* Must be called with folio lock held.
1788
*/
1789
static struct ceph_snap_context *
1790
ceph_find_incompatible(struct folio *folio)
1791
{
1792
struct inode *inode = folio->mapping->host;
1793
struct ceph_client *cl = ceph_inode_to_client(inode);
1794
struct ceph_inode_info *ci = ceph_inode(inode);
1795
1796
if (ceph_inode_is_shutdown(inode)) {
1797
doutc(cl, " %llx.%llx folio %p is shutdown\n",
1798
ceph_vinop(inode), folio);
1799
return ERR_PTR(-ESTALE);
1800
}
1801
1802
for (;;) {
1803
struct ceph_snap_context *snapc, *oldest;
1804
1805
folio_wait_writeback(folio);
1806
1807
snapc = page_snap_context(&folio->page);
1808
if (!snapc || snapc == ci->i_head_snapc)
1809
break;
1810
1811
/*
1812
* this folio is already dirty in another (older) snap
1813
* context! is it writeable now?
1814
*/
1815
oldest = get_oldest_context(inode, NULL, NULL);
1816
if (snapc->seq > oldest->seq) {
1817
/* not writeable -- return it for the caller to deal with */
1818
ceph_put_snap_context(oldest);
1819
doutc(cl, " %llx.%llx folio %p snapc %p not current or oldest\n",
1820
ceph_vinop(inode), folio, snapc);
1821
return ceph_get_snap_context(snapc);
1822
}
1823
ceph_put_snap_context(oldest);
1824
1825
/* yay, writeable, do it now (without dropping folio lock) */
1826
doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n",
1827
ceph_vinop(inode), folio, snapc);
1828
if (folio_clear_dirty_for_io(folio)) {
1829
int r = write_folio_nounlock(folio, NULL);
1830
if (r < 0)
1831
return ERR_PTR(r);
1832
}
1833
}
1834
return NULL;
1835
}
1836
1837
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1838
struct folio **foliop, void **_fsdata)
1839
{
1840
struct inode *inode = file_inode(file);
1841
struct ceph_inode_info *ci = ceph_inode(inode);
1842
struct ceph_snap_context *snapc;
1843
1844
snapc = ceph_find_incompatible(*foliop);
1845
if (snapc) {
1846
int r;
1847
1848
folio_unlock(*foliop);
1849
folio_put(*foliop);
1850
*foliop = NULL;
1851
if (IS_ERR(snapc))
1852
return PTR_ERR(snapc);
1853
1854
ceph_queue_writeback(inode);
1855
r = wait_event_killable(ci->i_cap_wq,
1856
context_is_writeable_or_written(inode, snapc));
1857
ceph_put_snap_context(snapc);
1858
return r == 0 ? -EAGAIN : r;
1859
}
1860
return 0;
1861
}
1862
1863
/*
1864
* We are only allowed to write into/dirty the page if the page is
1865
* clean, or already dirty within the same snap context.
1866
*/
1867
static int ceph_write_begin(const struct kiocb *iocb,
1868
struct address_space *mapping,
1869
loff_t pos, unsigned len,
1870
struct folio **foliop, void **fsdata)
1871
{
1872
struct file *file = iocb->ki_filp;
1873
struct inode *inode = file_inode(file);
1874
struct ceph_inode_info *ci = ceph_inode(inode);
1875
int r;
1876
1877
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL);
1878
if (r < 0)
1879
return r;
1880
1881
folio_wait_private_2(*foliop); /* [DEPRECATED] */
1882
WARN_ON_ONCE(!folio_test_locked(*foliop));
1883
return 0;
1884
}
1885
1886
/*
1887
* we don't do anything in here that simple_write_end doesn't do
1888
* except adjust dirty page accounting
1889
*/
1890
static int ceph_write_end(const struct kiocb *iocb,
1891
struct address_space *mapping, loff_t pos,
1892
unsigned len, unsigned copied,
1893
struct folio *folio, void *fsdata)
1894
{
1895
struct file *file = iocb->ki_filp;
1896
struct inode *inode = file_inode(file);
1897
struct ceph_client *cl = ceph_inode_to_client(inode);
1898
bool check_cap = false;
1899
1900
doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
1901
file, folio, (int)pos, (int)copied, (int)len);
1902
1903
if (!folio_test_uptodate(folio)) {
1904
/* just return that nothing was copied on a short copy */
1905
if (copied < len) {
1906
copied = 0;
1907
goto out;
1908
}
1909
folio_mark_uptodate(folio);
1910
}
1911
1912
/* did file size increase? */
1913
if (pos+copied > i_size_read(inode))
1914
check_cap = ceph_inode_set_size(inode, pos+copied);
1915
1916
folio_mark_dirty(folio);
1917
1918
out:
1919
folio_unlock(folio);
1920
folio_put(folio);
1921
1922
if (check_cap)
1923
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY);
1924
1925
return copied;
1926
}
1927
1928
const struct address_space_operations ceph_aops = {
1929
.read_folio = netfs_read_folio,
1930
.readahead = netfs_readahead,
1931
.writepages = ceph_writepages_start,
1932
.write_begin = ceph_write_begin,
1933
.write_end = ceph_write_end,
1934
.dirty_folio = ceph_dirty_folio,
1935
.invalidate_folio = ceph_invalidate_folio,
1936
.release_folio = netfs_release_folio,
1937
.direct_IO = noop_direct_IO,
1938
.migrate_folio = filemap_migrate_folio,
1939
};
1940
1941
static void ceph_block_sigs(sigset_t *oldset)
1942
{
1943
sigset_t mask;
1944
siginitsetinv(&mask, sigmask(SIGKILL));
1945
sigprocmask(SIG_BLOCK, &mask, oldset);
1946
}
1947
1948
static void ceph_restore_sigs(sigset_t *oldset)
1949
{
1950
sigprocmask(SIG_SETMASK, oldset, NULL);
1951
}
1952
1953
/*
1954
* vm ops
1955
*/
1956
static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1957
{
1958
struct vm_area_struct *vma = vmf->vma;
1959
struct inode *inode = file_inode(vma->vm_file);
1960
struct ceph_inode_info *ci = ceph_inode(inode);
1961
struct ceph_client *cl = ceph_inode_to_client(inode);
1962
struct ceph_file_info *fi = vma->vm_file->private_data;
1963
loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
1964
int want, got, err;
1965
sigset_t oldset;
1966
vm_fault_t ret = VM_FAULT_SIGBUS;
1967
1968
if (ceph_inode_is_shutdown(inode))
1969
return ret;
1970
1971
ceph_block_sigs(&oldset);
1972
1973
doutc(cl, "%llx.%llx %llu trying to get caps\n",
1974
ceph_vinop(inode), off);
1975
if (fi->fmode & CEPH_FILE_MODE_LAZY)
1976
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1977
else
1978
want = CEPH_CAP_FILE_CACHE;
1979
1980
got = 0;
1981
err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
1982
if (err < 0)
1983
goto out_restore;
1984
1985
doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
1986
off, ceph_cap_string(got));
1987
1988
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1989
!ceph_has_inline_data(ci)) {
1990
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1991
ceph_add_rw_context(fi, &rw_ctx);
1992
ret = filemap_fault(vmf);
1993
ceph_del_rw_context(fi, &rw_ctx);
1994
doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
1995
ceph_vinop(inode), off, ceph_cap_string(got), ret);
1996
} else
1997
err = -EAGAIN;
1998
1999
ceph_put_cap_refs(ci, got);
2000
2001
if (err != -EAGAIN)
2002
goto out_restore;
2003
2004
/* read inline data */
2005
if (off >= PAGE_SIZE) {
2006
/* does not support inline data > PAGE_SIZE */
2007
ret = VM_FAULT_SIGBUS;
2008
} else {
2009
struct address_space *mapping = inode->i_mapping;
2010
struct page *page;
2011
2012
filemap_invalidate_lock_shared(mapping);
2013
page = find_or_create_page(mapping, 0,
2014
mapping_gfp_constraint(mapping, ~__GFP_FS));
2015
if (!page) {
2016
ret = VM_FAULT_OOM;
2017
goto out_inline;
2018
}
2019
err = __ceph_do_getattr(inode, page,
2020
CEPH_STAT_CAP_INLINE_DATA, true);
2021
if (err < 0 || off >= i_size_read(inode)) {
2022
unlock_page(page);
2023
put_page(page);
2024
ret = vmf_error(err);
2025
goto out_inline;
2026
}
2027
if (err < PAGE_SIZE)
2028
zero_user_segment(page, err, PAGE_SIZE);
2029
else
2030
flush_dcache_page(page);
2031
SetPageUptodate(page);
2032
vmf->page = page;
2033
ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
2034
out_inline:
2035
filemap_invalidate_unlock_shared(mapping);
2036
doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
2037
ceph_vinop(inode), off, ret);
2038
}
2039
out_restore:
2040
ceph_restore_sigs(&oldset);
2041
if (err < 0)
2042
ret = vmf_error(err);
2043
2044
return ret;
2045
}
2046
2047
static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
2048
{
2049
struct vm_area_struct *vma = vmf->vma;
2050
struct inode *inode = file_inode(vma->vm_file);
2051
struct ceph_client *cl = ceph_inode_to_client(inode);
2052
struct ceph_inode_info *ci = ceph_inode(inode);
2053
struct ceph_file_info *fi = vma->vm_file->private_data;
2054
struct ceph_cap_flush *prealloc_cf;
2055
struct folio *folio = page_folio(vmf->page);
2056
loff_t off = folio_pos(folio);
2057
loff_t size = i_size_read(inode);
2058
size_t len;
2059
int want, got, err;
2060
sigset_t oldset;
2061
vm_fault_t ret = VM_FAULT_SIGBUS;
2062
2063
if (ceph_inode_is_shutdown(inode))
2064
return ret;
2065
2066
prealloc_cf = ceph_alloc_cap_flush();
2067
if (!prealloc_cf)
2068
return VM_FAULT_OOM;
2069
2070
sb_start_pagefault(inode->i_sb);
2071
ceph_block_sigs(&oldset);
2072
2073
if (off + folio_size(folio) <= size)
2074
len = folio_size(folio);
2075
else
2076
len = offset_in_folio(folio, size);
2077
2078
doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
2079
ceph_vinop(inode), off, len, size);
2080
if (fi->fmode & CEPH_FILE_MODE_LAZY)
2081
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2082
else
2083
want = CEPH_CAP_FILE_BUFFER;
2084
2085
got = 0;
2086
err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
2087
if (err < 0)
2088
goto out_free;
2089
2090
doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
2091
off, len, ceph_cap_string(got));
2092
2093
/* Update time before taking folio lock */
2094
file_update_time(vma->vm_file);
2095
inode_inc_iversion_raw(inode);
2096
2097
do {
2098
struct ceph_snap_context *snapc;
2099
2100
folio_lock(folio);
2101
2102
if (folio_mkwrite_check_truncate(folio, inode) < 0) {
2103
folio_unlock(folio);
2104
ret = VM_FAULT_NOPAGE;
2105
break;
2106
}
2107
2108
snapc = ceph_find_incompatible(folio);
2109
if (!snapc) {
2110
/* success. we'll keep the folio locked. */
2111
folio_mark_dirty(folio);
2112
ret = VM_FAULT_LOCKED;
2113
break;
2114
}
2115
2116
folio_unlock(folio);
2117
2118
if (IS_ERR(snapc)) {
2119
ret = VM_FAULT_SIGBUS;
2120
break;
2121
}
2122
2123
ceph_queue_writeback(inode);
2124
err = wait_event_killable(ci->i_cap_wq,
2125
context_is_writeable_or_written(inode, snapc));
2126
ceph_put_snap_context(snapc);
2127
} while (err == 0);
2128
2129
if (ret == VM_FAULT_LOCKED) {
2130
int dirty;
2131
spin_lock(&ci->i_ceph_lock);
2132
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2133
&prealloc_cf);
2134
spin_unlock(&ci->i_ceph_lock);
2135
if (dirty)
2136
__mark_inode_dirty(inode, dirty);
2137
}
2138
2139
doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
2140
ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
2141
ceph_put_cap_refs_async(ci, got);
2142
out_free:
2143
ceph_restore_sigs(&oldset);
2144
sb_end_pagefault(inode->i_sb);
2145
ceph_free_cap_flush(prealloc_cf);
2146
if (err < 0)
2147
ret = vmf_error(err);
2148
return ret;
2149
}
2150
2151
void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
2152
char *data, size_t len)
2153
{
2154
struct ceph_client *cl = ceph_inode_to_client(inode);
2155
struct address_space *mapping = inode->i_mapping;
2156
struct page *page;
2157
2158
if (locked_page) {
2159
page = locked_page;
2160
} else {
2161
if (i_size_read(inode) == 0)
2162
return;
2163
page = find_or_create_page(mapping, 0,
2164
mapping_gfp_constraint(mapping,
2165
~__GFP_FS));
2166
if (!page)
2167
return;
2168
if (PageUptodate(page)) {
2169
unlock_page(page);
2170
put_page(page);
2171
return;
2172
}
2173
}
2174
2175
doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
2176
ceph_vinop(inode), len, locked_page);
2177
2178
if (len > 0) {
2179
void *kaddr = kmap_atomic(page);
2180
memcpy(kaddr, data, len);
2181
kunmap_atomic(kaddr);
2182
}
2183
2184
if (page != locked_page) {
2185
if (len < PAGE_SIZE)
2186
zero_user_segment(page, len, PAGE_SIZE);
2187
else
2188
flush_dcache_page(page);
2189
2190
SetPageUptodate(page);
2191
unlock_page(page);
2192
put_page(page);
2193
}
2194
}
2195
2196
int ceph_uninline_data(struct file *file)
2197
{
2198
struct inode *inode = file_inode(file);
2199
struct ceph_inode_info *ci = ceph_inode(inode);
2200
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2201
struct ceph_client *cl = fsc->client;
2202
struct ceph_osd_request *req = NULL;
2203
struct ceph_cap_flush *prealloc_cf = NULL;
2204
struct folio *folio = NULL;
2205
u64 inline_version = CEPH_INLINE_NONE;
2206
struct page *pages[1];
2207
int err = 0;
2208
u64 len;
2209
2210
spin_lock(&ci->i_ceph_lock);
2211
inline_version = ci->i_inline_version;
2212
spin_unlock(&ci->i_ceph_lock);
2213
2214
doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
2215
inline_version);
2216
2217
if (ceph_inode_is_shutdown(inode)) {
2218
err = -EIO;
2219
goto out;
2220
}
2221
2222
if (inline_version == CEPH_INLINE_NONE)
2223
return 0;
2224
2225
prealloc_cf = ceph_alloc_cap_flush();
2226
if (!prealloc_cf)
2227
return -ENOMEM;
2228
2229
if (inline_version == 1) /* initial version, no data */
2230
goto out_uninline;
2231
2232
folio = read_mapping_folio(inode->i_mapping, 0, file);
2233
if (IS_ERR(folio)) {
2234
err = PTR_ERR(folio);
2235
goto out;
2236
}
2237
2238
folio_lock(folio);
2239
2240
len = i_size_read(inode);
2241
if (len > folio_size(folio))
2242
len = folio_size(folio);
2243
2244
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2245
ceph_vino(inode), 0, &len, 0, 1,
2246
CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
2247
NULL, 0, 0, false);
2248
if (IS_ERR(req)) {
2249
err = PTR_ERR(req);
2250
goto out_unlock;
2251
}
2252
2253
req->r_mtime = inode_get_mtime(inode);
2254
ceph_osdc_start_request(&fsc->client->osdc, req);
2255
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
2256
ceph_osdc_put_request(req);
2257
if (err < 0)
2258
goto out_unlock;
2259
2260
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2261
ceph_vino(inode), 0, &len, 1, 3,
2262
CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
2263
NULL, ci->i_truncate_seq,
2264
ci->i_truncate_size, false);
2265
if (IS_ERR(req)) {
2266
err = PTR_ERR(req);
2267
goto out_unlock;
2268
}
2269
2270
pages[0] = folio_page(folio, 0);
2271
osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
2272
2273
{
2274
__le64 xattr_buf = cpu_to_le64(inline_version);
2275
err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
2276
"inline_version", &xattr_buf,
2277
sizeof(xattr_buf),
2278
CEPH_OSD_CMPXATTR_OP_GT,
2279
CEPH_OSD_CMPXATTR_MODE_U64);
2280
if (err)
2281
goto out_put_req;
2282
}
2283
2284
{
2285
char xattr_buf[32];
2286
int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
2287
"%llu", inline_version);
2288
err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
2289
"inline_version",
2290
xattr_buf, xattr_len, 0, 0);
2291
if (err)
2292
goto out_put_req;
2293
}
2294
2295
req->r_mtime = inode_get_mtime(inode);
2296
ceph_osdc_start_request(&fsc->client->osdc, req);
2297
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
2298
2299
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
2300
req->r_end_latency, len, err);
2301
2302
out_uninline:
2303
if (!err) {
2304
int dirty;
2305
2306
/* Set to CAP_INLINE_NONE and dirty the caps */
2307
down_read(&fsc->mdsc->snap_rwsem);
2308
spin_lock(&ci->i_ceph_lock);
2309
ci->i_inline_version = CEPH_INLINE_NONE;
2310
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2311
spin_unlock(&ci->i_ceph_lock);
2312
up_read(&fsc->mdsc->snap_rwsem);
2313
if (dirty)
2314
__mark_inode_dirty(inode, dirty);
2315
}
2316
out_put_req:
2317
ceph_osdc_put_request(req);
2318
if (err == -ECANCELED)
2319
err = 0;
2320
out_unlock:
2321
if (folio) {
2322
folio_unlock(folio);
2323
folio_put(folio);
2324
}
2325
out:
2326
ceph_free_cap_flush(prealloc_cf);
2327
doutc(cl, "%llx.%llx inline_version %llu = %d\n",
2328
ceph_vinop(inode), inline_version, err);
2329
return err;
2330
}
2331
2332
static const struct vm_operations_struct ceph_vmops = {
2333
.fault = ceph_filemap_fault,
2334
.page_mkwrite = ceph_page_mkwrite,
2335
};
2336
2337
int ceph_mmap_prepare(struct vm_area_desc *desc)
2338
{
2339
struct address_space *mapping = desc->file->f_mapping;
2340
2341
if (!mapping->a_ops->read_folio)
2342
return -ENOEXEC;
2343
desc->vm_ops = &ceph_vmops;
2344
return 0;
2345
}
2346
2347
enum {
2348
POOL_READ = 1,
2349
POOL_WRITE = 2,
2350
};
2351
2352
static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
2353
s64 pool, struct ceph_string *pool_ns)
2354
{
2355
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
2356
struct ceph_mds_client *mdsc = fsc->mdsc;
2357
struct ceph_client *cl = fsc->client;
2358
struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
2359
struct rb_node **p, *parent;
2360
struct ceph_pool_perm *perm;
2361
struct page **pages;
2362
size_t pool_ns_len;
2363
int err = 0, err2 = 0, have = 0;
2364
2365
down_read(&mdsc->pool_perm_rwsem);
2366
p = &mdsc->pool_perm_tree.rb_node;
2367
while (*p) {
2368
perm = rb_entry(*p, struct ceph_pool_perm, node);
2369
if (pool < perm->pool)
2370
p = &(*p)->rb_left;
2371
else if (pool > perm->pool)
2372
p = &(*p)->rb_right;
2373
else {
2374
int ret = ceph_compare_string(pool_ns,
2375
perm->pool_ns,
2376
perm->pool_ns_len);
2377
if (ret < 0)
2378
p = &(*p)->rb_left;
2379
else if (ret > 0)
2380
p = &(*p)->rb_right;
2381
else {
2382
have = perm->perm;
2383
break;
2384
}
2385
}
2386
}
2387
up_read(&mdsc->pool_perm_rwsem);
2388
if (*p)
2389
goto out;
2390
2391
if (pool_ns)
2392
doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
2393
(int)pool_ns->len, pool_ns->str);
2394
else
2395
doutc(cl, "pool %lld no perm cached\n", pool);
2396
2397
down_write(&mdsc->pool_perm_rwsem);
2398
p = &mdsc->pool_perm_tree.rb_node;
2399
parent = NULL;
2400
while (*p) {
2401
parent = *p;
2402
perm = rb_entry(parent, struct ceph_pool_perm, node);
2403
if (pool < perm->pool)
2404
p = &(*p)->rb_left;
2405
else if (pool > perm->pool)
2406
p = &(*p)->rb_right;
2407
else {
2408
int ret = ceph_compare_string(pool_ns,
2409
perm->pool_ns,
2410
perm->pool_ns_len);
2411
if (ret < 0)
2412
p = &(*p)->rb_left;
2413
else if (ret > 0)
2414
p = &(*p)->rb_right;
2415
else {
2416
have = perm->perm;
2417
break;
2418
}
2419
}
2420
}
2421
if (*p) {
2422
up_write(&mdsc->pool_perm_rwsem);
2423
goto out;
2424
}
2425
2426
rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2427
1, false, GFP_NOFS);
2428
if (!rd_req) {
2429
err = -ENOMEM;
2430
goto out_unlock;
2431
}
2432
2433
rd_req->r_flags = CEPH_OSD_FLAG_READ;
2434
osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
2435
rd_req->r_base_oloc.pool = pool;
2436
if (pool_ns)
2437
rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
2438
ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
2439
2440
err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
2441
if (err)
2442
goto out_unlock;
2443
2444
wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2445
1, false, GFP_NOFS);
2446
if (!wr_req) {
2447
err = -ENOMEM;
2448
goto out_unlock;
2449
}
2450
2451
wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
2452
osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
2453
ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
2454
ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
2455
2456
err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
2457
if (err)
2458
goto out_unlock;
2459
2460
/* one page should be large enough for STAT data */
2461
pages = ceph_alloc_page_vector(1, GFP_KERNEL);
2462
if (IS_ERR(pages)) {
2463
err = PTR_ERR(pages);
2464
goto out_unlock;
2465
}
2466
2467
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
2468
0, false, true);
2469
ceph_osdc_start_request(&fsc->client->osdc, rd_req);
2470
2471
wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode);
2472
ceph_osdc_start_request(&fsc->client->osdc, wr_req);
2473
2474
err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
2475
err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
2476
2477
if (err >= 0 || err == -ENOENT)
2478
have |= POOL_READ;
2479
else if (err != -EPERM) {
2480
if (err == -EBLOCKLISTED)
2481
fsc->blocklisted = true;
2482
goto out_unlock;
2483
}
2484
2485
if (err2 == 0 || err2 == -EEXIST)
2486
have |= POOL_WRITE;
2487
else if (err2 != -EPERM) {
2488
if (err2 == -EBLOCKLISTED)
2489
fsc->blocklisted = true;
2490
err = err2;
2491
goto out_unlock;
2492
}
2493
2494
pool_ns_len = pool_ns ? pool_ns->len : 0;
2495
perm = kmalloc(struct_size(perm, pool_ns, pool_ns_len + 1), GFP_NOFS);
2496
if (!perm) {
2497
err = -ENOMEM;
2498
goto out_unlock;
2499
}
2500
2501
perm->pool = pool;
2502
perm->perm = have;
2503
perm->pool_ns_len = pool_ns_len;
2504
if (pool_ns_len > 0)
2505
memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
2506
perm->pool_ns[pool_ns_len] = 0;
2507
2508
rb_link_node(&perm->node, parent, p);
2509
rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
2510
err = 0;
2511
out_unlock:
2512
up_write(&mdsc->pool_perm_rwsem);
2513
2514
ceph_osdc_put_request(rd_req);
2515
ceph_osdc_put_request(wr_req);
2516
out:
2517
if (!err)
2518
err = have;
2519
if (pool_ns)
2520
doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
2521
(int)pool_ns->len, pool_ns->str, err);
2522
else
2523
doutc(cl, "pool %lld result = %d\n", pool, err);
2524
return err;
2525
}
2526
2527
int ceph_pool_perm_check(struct inode *inode, int need)
2528
{
2529
struct ceph_client *cl = ceph_inode_to_client(inode);
2530
struct ceph_inode_info *ci = ceph_inode(inode);
2531
struct ceph_string *pool_ns;
2532
s64 pool;
2533
int ret, flags;
2534
2535
/* Only need to do this for regular files */
2536
if (!S_ISREG(inode->i_mode))
2537
return 0;
2538
2539
if (ci->i_vino.snap != CEPH_NOSNAP) {
2540
/*
2541
* Pool permission check needs to write to the first object.
2542
* But for snapshot, head of the first object may have already
2543
* been deleted. Skip check to avoid creating orphan object.
2544
*/
2545
return 0;
2546
}
2547
2548
if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
2549
NOPOOLPERM))
2550
return 0;
2551
2552
spin_lock(&ci->i_ceph_lock);
2553
flags = ci->i_ceph_flags;
2554
pool = ci->i_layout.pool_id;
2555
spin_unlock(&ci->i_ceph_lock);
2556
check:
2557
if (flags & CEPH_I_POOL_PERM) {
2558
if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
2559
doutc(cl, "pool %lld no read perm\n", pool);
2560
return -EPERM;
2561
}
2562
if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
2563
doutc(cl, "pool %lld no write perm\n", pool);
2564
return -EPERM;
2565
}
2566
return 0;
2567
}
2568
2569
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2570
ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2571
ceph_put_string(pool_ns);
2572
if (ret < 0)
2573
return ret;
2574
2575
flags = CEPH_I_POOL_PERM;
2576
if (ret & POOL_READ)
2577
flags |= CEPH_I_POOL_RD;
2578
if (ret & POOL_WRITE)
2579
flags |= CEPH_I_POOL_WR;
2580
2581
spin_lock(&ci->i_ceph_lock);
2582
if (pool == ci->i_layout.pool_id &&
2583
pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2584
ci->i_ceph_flags |= flags;
2585
} else {
2586
pool = ci->i_layout.pool_id;
2587
flags = ci->i_ceph_flags;
2588
}
2589
spin_unlock(&ci->i_ceph_lock);
2590
goto check;
2591
}
2592
2593
void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2594
{
2595
struct ceph_pool_perm *perm;
2596
struct rb_node *n;
2597
2598
while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2599
n = rb_first(&mdsc->pool_perm_tree);
2600
perm = rb_entry(n, struct ceph_pool_perm, node);
2601
rb_erase(n, &mdsc->pool_perm_tree);
2602
kfree(perm);
2603
}
2604
}
2605
2606