Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/erofs/fileio.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2024, Alibaba Cloud
4
*/
5
#include "internal.h"
6
#include <trace/events/erofs.h>
7
8
struct erofs_fileio_rq {
9
struct bio_vec bvecs[16];
10
struct bio bio;
11
struct kiocb iocb;
12
struct super_block *sb;
13
};
14
15
struct erofs_fileio {
16
struct erofs_map_blocks map;
17
struct erofs_map_dev dev;
18
struct erofs_fileio_rq *rq;
19
};
20
21
static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
22
{
23
struct erofs_fileio_rq *rq =
24
container_of(iocb, struct erofs_fileio_rq, iocb);
25
struct folio_iter fi;
26
27
if (ret > 0) {
28
if (ret != rq->bio.bi_iter.bi_size) {
29
bio_advance(&rq->bio, ret);
30
zero_fill_bio(&rq->bio);
31
}
32
ret = 0;
33
}
34
if (rq->bio.bi_end_io) {
35
if (ret < 0 && !rq->bio.bi_status)
36
rq->bio.bi_status = errno_to_blk_status(ret);
37
rq->bio.bi_end_io(&rq->bio);
38
} else {
39
bio_for_each_folio_all(fi, &rq->bio) {
40
DBG_BUGON(folio_test_uptodate(fi.folio));
41
erofs_onlinefolio_end(fi.folio, ret, false);
42
}
43
}
44
bio_uninit(&rq->bio);
45
kfree(rq);
46
}
47
48
static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
49
{
50
const struct cred *old_cred;
51
struct iov_iter iter;
52
int ret;
53
54
if (!rq)
55
return;
56
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
57
rq->iocb.ki_ioprio = get_current_ioprio();
58
rq->iocb.ki_complete = erofs_fileio_ki_complete;
59
if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
60
rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
61
rq->iocb.ki_flags = IOCB_DIRECT;
62
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
63
rq->bio.bi_iter.bi_size);
64
old_cred = override_creds(rq->iocb.ki_filp->f_cred);
65
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
66
revert_creds(old_cred);
67
if (ret != -EIOCBQUEUED)
68
erofs_fileio_ki_complete(&rq->iocb, ret);
69
}
70
71
static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
72
{
73
struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
74
GFP_KERNEL | __GFP_NOFAIL);
75
76
bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
77
rq->iocb.ki_filp = mdev->m_dif->file;
78
rq->sb = mdev->m_sb;
79
return rq;
80
}
81
82
struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
83
{
84
return &erofs_fileio_rq_alloc(mdev)->bio;
85
}
86
87
void erofs_fileio_submit_bio(struct bio *bio)
88
{
89
return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
90
bio));
91
}
92
93
static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
94
{
95
struct inode *inode = folio_inode(folio);
96
struct erofs_map_blocks *map = &io->map;
97
unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
98
loff_t pos = folio_pos(folio), ofs;
99
int err = 0;
100
101
erofs_onlinefolio_init(folio);
102
while (cur < end) {
103
if (!in_range(pos + cur, map->m_la, map->m_llen)) {
104
map->m_la = pos + cur;
105
map->m_llen = end - cur;
106
err = erofs_map_blocks(inode, map);
107
if (err)
108
break;
109
}
110
111
ofs = folio_pos(folio) + cur - map->m_la;
112
len = min_t(loff_t, map->m_llen - ofs, end - cur);
113
if (map->m_flags & EROFS_MAP_META) {
114
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
115
void *src;
116
117
src = erofs_read_metabuf(&buf, inode->i_sb,
118
map->m_pa + ofs, erofs_inode_in_metabox(inode));
119
if (IS_ERR(src)) {
120
err = PTR_ERR(src);
121
break;
122
}
123
memcpy_to_folio(folio, cur, src, len);
124
erofs_put_metabuf(&buf);
125
} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
126
folio_zero_segment(folio, cur, cur + len);
127
attached = 0;
128
} else {
129
if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
130
map->m_deviceid != io->dev.m_deviceid)) {
131
io_retry:
132
erofs_fileio_rq_submit(io->rq);
133
io->rq = NULL;
134
}
135
136
if (!io->rq) {
137
io->dev = (struct erofs_map_dev) {
138
.m_pa = io->map.m_pa + ofs,
139
.m_deviceid = io->map.m_deviceid,
140
};
141
err = erofs_map_dev(inode->i_sb, &io->dev);
142
if (err)
143
break;
144
io->rq = erofs_fileio_rq_alloc(&io->dev);
145
io->rq->bio.bi_iter.bi_sector =
146
(io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
147
attached = 0;
148
}
149
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
150
goto io_retry;
151
if (!attached++)
152
erofs_onlinefolio_split(folio);
153
io->dev.m_pa += len;
154
}
155
cur += len;
156
}
157
erofs_onlinefolio_end(folio, err, false);
158
return err;
159
}
160
161
static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
162
{
163
struct erofs_fileio io = {};
164
int err;
165
166
trace_erofs_read_folio(folio, true);
167
err = erofs_fileio_scan_folio(&io, folio);
168
erofs_fileio_rq_submit(io.rq);
169
return err;
170
}
171
172
static void erofs_fileio_readahead(struct readahead_control *rac)
173
{
174
struct inode *inode = rac->mapping->host;
175
struct erofs_fileio io = {};
176
struct folio *folio;
177
int err;
178
179
trace_erofs_readahead(inode, readahead_index(rac),
180
readahead_count(rac), true);
181
while ((folio = readahead_folio(rac))) {
182
err = erofs_fileio_scan_folio(&io, folio);
183
if (err && err != -EINTR)
184
erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
185
folio->index, EROFS_I(inode)->nid);
186
}
187
erofs_fileio_rq_submit(io.rq);
188
}
189
190
const struct address_space_operations erofs_fileio_aops = {
191
.read_folio = erofs_fileio_read_folio,
192
.readahead = erofs_fileio_readahead,
193
};
194
195