Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/erofs/fileio.c
50475 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2024, Alibaba Cloud
4
*/
5
#include "internal.h"
6
#include <trace/events/erofs.h>
7
8
struct erofs_fileio_rq {
9
struct bio_vec bvecs[16];
10
struct bio bio;
11
struct kiocb iocb;
12
struct super_block *sb;
13
refcount_t ref;
14
};
15
16
struct erofs_fileio {
17
struct erofs_map_blocks map;
18
struct erofs_map_dev dev;
19
struct erofs_fileio_rq *rq;
20
};
21
22
static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
23
{
24
struct erofs_fileio_rq *rq =
25
container_of(iocb, struct erofs_fileio_rq, iocb);
26
struct folio_iter fi;
27
28
if (ret >= 0 && ret != rq->bio.bi_iter.bi_size) {
29
bio_advance(&rq->bio, ret);
30
zero_fill_bio(&rq->bio);
31
}
32
if (!rq->bio.bi_end_io) {
33
bio_for_each_folio_all(fi, &rq->bio) {
34
DBG_BUGON(folio_test_uptodate(fi.folio));
35
erofs_onlinefolio_end(fi.folio, ret < 0, false);
36
}
37
} else if (ret < 0 && !rq->bio.bi_status) {
38
rq->bio.bi_status = errno_to_blk_status(ret);
39
}
40
bio_endio(&rq->bio);
41
bio_uninit(&rq->bio);
42
if (refcount_dec_and_test(&rq->ref))
43
kfree(rq);
44
}
45
46
static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
47
{
48
struct iov_iter iter;
49
ssize_t ret;
50
51
if (!rq)
52
return;
53
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
54
rq->iocb.ki_ioprio = get_current_ioprio();
55
rq->iocb.ki_complete = erofs_fileio_ki_complete;
56
if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
57
rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
58
rq->iocb.ki_flags = IOCB_DIRECT;
59
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
60
rq->bio.bi_iter.bi_size);
61
scoped_with_creds(rq->iocb.ki_filp->f_cred)
62
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
63
if (ret != -EIOCBQUEUED)
64
erofs_fileio_ki_complete(&rq->iocb, ret);
65
if (refcount_dec_and_test(&rq->ref))
66
kfree(rq);
67
}
68
69
static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
70
{
71
struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
72
GFP_KERNEL | __GFP_NOFAIL);
73
74
bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
75
rq->iocb.ki_filp = mdev->m_dif->file;
76
rq->sb = mdev->m_sb;
77
refcount_set(&rq->ref, 2);
78
return rq;
79
}
80
81
struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
82
{
83
return &erofs_fileio_rq_alloc(mdev)->bio;
84
}
85
86
void erofs_fileio_submit_bio(struct bio *bio)
87
{
88
return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
89
bio));
90
}
91
92
static int erofs_fileio_scan_folio(struct erofs_fileio *io,
93
struct inode *inode, struct folio *folio)
94
{
95
struct erofs_map_blocks *map = &io->map;
96
unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
97
loff_t pos = folio_pos(folio), ofs;
98
int err = 0;
99
100
erofs_onlinefolio_init(folio);
101
while (cur < end) {
102
if (!in_range(pos + cur, map->m_la, map->m_llen)) {
103
map->m_la = pos + cur;
104
map->m_llen = end - cur;
105
err = erofs_map_blocks(inode, map);
106
if (err)
107
break;
108
}
109
110
ofs = folio_pos(folio) + cur - map->m_la;
111
len = min_t(loff_t, map->m_llen - ofs, end - cur);
112
if (map->m_flags & EROFS_MAP_META) {
113
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
114
void *src;
115
116
src = erofs_read_metabuf(&buf, inode->i_sb,
117
map->m_pa + ofs, erofs_inode_in_metabox(inode));
118
if (IS_ERR(src)) {
119
err = PTR_ERR(src);
120
break;
121
}
122
memcpy_to_folio(folio, cur, src, len);
123
erofs_put_metabuf(&buf);
124
} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
125
folio_zero_segment(folio, cur, cur + len);
126
attached = 0;
127
} else {
128
if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
129
map->m_deviceid != io->dev.m_deviceid)) {
130
io_retry:
131
erofs_fileio_rq_submit(io->rq);
132
io->rq = NULL;
133
}
134
135
if (!io->rq) {
136
io->dev = (struct erofs_map_dev) {
137
.m_pa = io->map.m_pa + ofs,
138
.m_deviceid = io->map.m_deviceid,
139
};
140
err = erofs_map_dev(inode->i_sb, &io->dev);
141
if (err)
142
break;
143
io->rq = erofs_fileio_rq_alloc(&io->dev);
144
io->rq->bio.bi_iter.bi_sector =
145
(io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
146
attached = 0;
147
}
148
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
149
goto io_retry;
150
if (!attached++)
151
erofs_onlinefolio_split(folio);
152
io->dev.m_pa += len;
153
}
154
cur += len;
155
}
156
erofs_onlinefolio_end(folio, err, false);
157
return err;
158
}
159
160
static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
161
{
162
bool need_iput;
163
struct inode *realinode = erofs_real_inode(folio_inode(folio), &need_iput);
164
struct erofs_fileio io = {};
165
int err;
166
167
trace_erofs_read_folio(realinode, folio, true);
168
err = erofs_fileio_scan_folio(&io, realinode, folio);
169
erofs_fileio_rq_submit(io.rq);
170
if (need_iput)
171
iput(realinode);
172
return err;
173
}
174
175
static void erofs_fileio_readahead(struct readahead_control *rac)
176
{
177
bool need_iput;
178
struct inode *realinode = erofs_real_inode(rac->mapping->host, &need_iput);
179
struct erofs_fileio io = {};
180
struct folio *folio;
181
int err;
182
183
trace_erofs_readahead(realinode, readahead_index(rac),
184
readahead_count(rac), true);
185
while ((folio = readahead_folio(rac))) {
186
err = erofs_fileio_scan_folio(&io, realinode, folio);
187
if (err && err != -EINTR)
188
erofs_err(realinode->i_sb, "readahead error at folio %lu @ nid %llu",
189
folio->index, EROFS_I(realinode)->nid);
190
}
191
erofs_fileio_rq_submit(io.rq);
192
if (need_iput)
193
iput(realinode);
194
}
195
196
const struct address_space_operations erofs_fileio_aops = {
197
.read_folio = erofs_fileio_read_folio,
198
.readahead = erofs_fileio_readahead,
199
};
200
201