Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/erofs/zmap.c
49564 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2018-2019 HUAWEI, Inc.
4
* https://www.huawei.com/
5
*/
6
#include "internal.h"
7
#include <linux/unaligned.h>
8
#include <trace/events/erofs.h>
9
10
struct z_erofs_maprecorder {
11
struct inode *inode;
12
struct erofs_map_blocks *map;
13
unsigned long lcn;
14
/* compression extent information gathered */
15
u8 type, headtype;
16
u16 clusterofs;
17
u16 delta[2];
18
erofs_blk_t pblk, compressedblks;
19
erofs_off_t nextpackoff;
20
bool partialref, in_mbox;
21
};
22
23
static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
24
unsigned long lcn)
25
{
26
struct inode *const inode = m->inode;
27
struct erofs_inode *const vi = EROFS_I(inode);
28
const erofs_off_t pos = Z_EROFS_FULL_INDEX_START(erofs_iloc(inode) +
29
vi->inode_isize + vi->xattr_isize) +
30
lcn * sizeof(struct z_erofs_lcluster_index);
31
struct z_erofs_lcluster_index *di;
32
unsigned int advise;
33
34
di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
35
if (IS_ERR(di))
36
return PTR_ERR(di);
37
m->lcn = lcn;
38
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
39
40
advise = le16_to_cpu(di->di_advise);
41
m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
42
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
43
m->clusterofs = 1 << vi->z_lclusterbits;
44
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
45
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
46
if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
47
Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
48
DBG_BUGON(1);
49
return -EFSCORRUPTED;
50
}
51
m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
52
m->delta[0] = 1;
53
}
54
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
55
} else {
56
m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
57
m->clusterofs = le16_to_cpu(di->di_clusterofs);
58
m->pblk = le32_to_cpu(di->di_u.blkaddr);
59
}
60
return 0;
61
}
62
63
static unsigned int decode_compactedbits(unsigned int lobits,
64
u8 *in, unsigned int pos, u8 *type)
65
{
66
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
67
const unsigned int lo = v & ((1 << lobits) - 1);
68
69
*type = (v >> lobits) & 3;
70
return lo;
71
}
72
73
static int get_compacted_la_distance(unsigned int lobits,
74
unsigned int encodebits,
75
unsigned int vcnt, u8 *in, int i)
76
{
77
unsigned int lo, d1 = 0;
78
u8 type;
79
80
DBG_BUGON(i >= vcnt);
81
82
do {
83
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
84
85
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
86
return d1;
87
++d1;
88
} while (++i < vcnt);
89
90
/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
91
if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
92
d1 += lo - 1;
93
return d1;
94
}
95
96
static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
97
unsigned long lcn, bool lookahead)
98
{
99
struct inode *const inode = m->inode;
100
struct erofs_inode *const vi = EROFS_I(inode);
101
const erofs_off_t ebase = Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
102
vi->inode_isize + vi->xattr_isize);
103
const unsigned int lclusterbits = vi->z_lclusterbits;
104
const unsigned int totalidx = erofs_iblks(inode);
105
unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
106
unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
107
bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
108
erofs_off_t pos;
109
u8 *in, type;
110
int i;
111
112
if (lcn >= totalidx || lclusterbits > 14)
113
return -EINVAL;
114
115
m->lcn = lcn;
116
/* used to align to 32-byte (compacted_2b) alignment */
117
compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
118
compacted_2b = 0;
119
if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
120
compacted_4b_initial < totalidx)
121
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
122
123
pos = ebase;
124
amortizedshift = 2; /* compact_4b */
125
if (lcn >= compacted_4b_initial) {
126
pos += compacted_4b_initial * 4;
127
lcn -= compacted_4b_initial;
128
if (lcn < compacted_2b) {
129
amortizedshift = 1;
130
} else {
131
pos += compacted_2b * 2;
132
lcn -= compacted_2b;
133
}
134
}
135
pos += lcn * (1 << amortizedshift);
136
137
/* figure out the lcluster count in this pack */
138
if (1 << amortizedshift == 4 && lclusterbits <= 14)
139
vcnt = 2;
140
else if (1 << amortizedshift == 2 && lclusterbits <= 12)
141
vcnt = 16;
142
else
143
return -EOPNOTSUPP;
144
145
in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
146
if (IS_ERR(in))
147
return PTR_ERR(in);
148
149
/* it doesn't equal to round_up(..) */
150
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
151
(vcnt << amortizedshift);
152
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
153
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
154
bytes = pos & ((vcnt << amortizedshift) - 1);
155
in -= bytes;
156
i = bytes >> amortizedshift;
157
158
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
159
m->type = type;
160
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
161
m->clusterofs = 1 << lclusterbits;
162
163
/* figure out lookahead_distance: delta[1] if needed */
164
if (lookahead)
165
m->delta[1] = get_compacted_la_distance(lobits,
166
encodebits, vcnt, in, i);
167
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
168
if (!big_pcluster) {
169
DBG_BUGON(1);
170
return -EFSCORRUPTED;
171
}
172
m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
173
m->delta[0] = 1;
174
return 0;
175
} else if (i + 1 != (int)vcnt) {
176
m->delta[0] = lo;
177
return 0;
178
}
179
/*
180
* since the last lcluster in the pack is special,
181
* of which lo saves delta[1] rather than delta[0].
182
* Hence, get delta[0] by the previous lcluster indirectly.
183
*/
184
lo = decode_compactedbits(lobits, in,
185
encodebits * (i - 1), &type);
186
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
187
lo = 0;
188
else if (lo & Z_EROFS_LI_D0_CBLKCNT)
189
lo = 1;
190
m->delta[0] = lo + 1;
191
return 0;
192
}
193
m->clusterofs = lo;
194
m->delta[0] = 0;
195
/* figout out blkaddr (pblk) for HEAD lclusters */
196
if (!big_pcluster) {
197
nblk = 1;
198
while (i > 0) {
199
--i;
200
lo = decode_compactedbits(lobits, in,
201
encodebits * i, &type);
202
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
203
i -= lo;
204
205
if (i >= 0)
206
++nblk;
207
}
208
} else {
209
nblk = 0;
210
while (i > 0) {
211
--i;
212
lo = decode_compactedbits(lobits, in,
213
encodebits * i, &type);
214
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
215
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
216
--i;
217
nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
218
continue;
219
}
220
/* bigpcluster shouldn't have plain d0 == 1 */
221
if (lo <= 1) {
222
DBG_BUGON(1);
223
return -EFSCORRUPTED;
224
}
225
i -= lo - 2;
226
continue;
227
}
228
++nblk;
229
}
230
}
231
in += (vcnt << amortizedshift) - sizeof(__le32);
232
m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
233
return 0;
234
}
235
236
static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
237
unsigned int lcn, bool lookahead)
238
{
239
struct erofs_inode *vi = EROFS_I(m->inode);
240
int err;
241
242
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
243
err = z_erofs_load_compact_lcluster(m, lcn, lookahead);
244
} else {
245
DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL);
246
err = z_erofs_load_full_lcluster(m, lcn);
247
}
248
if (err)
249
return err;
250
251
if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
252
erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
253
m->type, lcn, EROFS_I(m->inode)->nid);
254
DBG_BUGON(1);
255
return -EOPNOTSUPP;
256
} else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD &&
257
m->clusterofs >= (1 << vi->z_lclusterbits)) {
258
DBG_BUGON(1);
259
return -EFSCORRUPTED;
260
}
261
return 0;
262
}
263
264
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
265
unsigned int lookback_distance)
266
{
267
struct super_block *sb = m->inode->i_sb;
268
struct erofs_inode *const vi = EROFS_I(m->inode);
269
const unsigned int lclusterbits = vi->z_lclusterbits;
270
271
while (m->lcn >= lookback_distance) {
272
unsigned long lcn = m->lcn - lookback_distance;
273
int err;
274
275
if (!lookback_distance)
276
break;
277
278
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
279
if (err)
280
return err;
281
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
282
lookback_distance = m->delta[0];
283
continue;
284
}
285
m->headtype = m->type;
286
m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
287
return 0;
288
}
289
erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
290
lookback_distance, m->lcn, vi->nid);
291
DBG_BUGON(1);
292
return -EFSCORRUPTED;
293
}
294
295
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
296
unsigned int initial_lcn)
297
{
298
struct inode *inode = m->inode;
299
struct super_block *sb = inode->i_sb;
300
struct erofs_inode *vi = EROFS_I(inode);
301
bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
302
bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
303
unsigned long lcn = m->lcn + 1;
304
int err;
305
306
DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
307
DBG_BUGON(m->type != m->headtype);
308
309
if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
310
((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
311
m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
312
(lcn << vi->z_lclusterbits) >= inode->i_size)
313
m->compressedblks = 1;
314
315
if (m->compressedblks)
316
goto out;
317
318
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
319
if (err)
320
return err;
321
322
/*
323
* If the 1st NONHEAD lcluster has already been handled initially w/o
324
* valid compressedblks, which means at least it mustn't be CBLKCNT, or
325
* an internal implemenatation error is detected.
326
*
327
* The following code can also handle it properly anyway, but let's
328
* BUG_ON in the debugging mode only for developers to notice that.
329
*/
330
DBG_BUGON(lcn == initial_lcn &&
331
m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
332
333
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD && m->delta[0] != 1) {
334
erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
335
DBG_BUGON(1);
336
return -EFSCORRUPTED;
337
}
338
339
/*
340
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type rather
341
* than CBLKCNT, it's a 1 block-sized pcluster.
342
*/
343
if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD || !m->compressedblks)
344
m->compressedblks = 1;
345
out:
346
m->map->m_plen = erofs_pos(sb, m->compressedblks);
347
return 0;
348
}
349
350
static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
351
{
352
struct inode *inode = m->inode;
353
struct erofs_inode *vi = EROFS_I(inode);
354
struct erofs_map_blocks *map = m->map;
355
unsigned int lclusterbits = vi->z_lclusterbits;
356
u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
357
int err;
358
359
while (1) {
360
/* handle the last EOF pcluster (no next HEAD lcluster) */
361
if ((lcn << lclusterbits) >= inode->i_size) {
362
map->m_llen = inode->i_size - map->m_la;
363
return 0;
364
}
365
366
err = z_erofs_load_lcluster_from_disk(m, lcn, true);
367
if (err)
368
return err;
369
370
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
371
/* work around invalid d1 generated by pre-1.0 mkfs */
372
if (unlikely(!m->delta[1])) {
373
m->delta[1] = 1;
374
DBG_BUGON(1);
375
}
376
} else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
377
if (lcn != headlcn)
378
break; /* ends at the next HEAD lcluster */
379
m->delta[1] = 1;
380
}
381
lcn += m->delta[1];
382
}
383
map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
384
return 0;
385
}
386
387
static int z_erofs_map_blocks_fo(struct inode *inode,
388
struct erofs_map_blocks *map, int flags)
389
{
390
struct erofs_inode *vi = EROFS_I(inode);
391
struct super_block *sb = inode->i_sb;
392
bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
393
bool ztailpacking = vi->z_idata_size;
394
unsigned int lclusterbits = vi->z_lclusterbits;
395
struct z_erofs_maprecorder m = {
396
.inode = inode,
397
.map = map,
398
.in_mbox = erofs_inode_in_metabox(inode),
399
};
400
unsigned int endoff;
401
unsigned long initial_lcn;
402
unsigned long long ofs, end;
403
int err;
404
405
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
406
if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL) &&
407
!vi->z_tailextent_headlcn) {
408
map->m_la = 0;
409
map->m_llen = inode->i_size;
410
map->m_flags = EROFS_MAP_FRAGMENT;
411
return 0;
412
}
413
initial_lcn = ofs >> lclusterbits;
414
endoff = ofs & ((1 << lclusterbits) - 1);
415
416
err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
417
if (err)
418
goto unmap_out;
419
420
if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
421
vi->z_fragmentoff = m.nextpackoff;
422
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
423
end = (m.lcn + 1ULL) << lclusterbits;
424
425
if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) {
426
m.headtype = m.type;
427
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
428
/*
429
* For ztailpacking files, in order to inline data more
430
* effectively, special EOF lclusters are now supported
431
* which can have three parts at most.
432
*/
433
if (ztailpacking && end > inode->i_size)
434
end = inode->i_size;
435
} else {
436
if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
437
end = (m.lcn << lclusterbits) | m.clusterofs;
438
map->m_flags |= EROFS_MAP_FULL_MAPPED;
439
m.delta[0] = 1;
440
}
441
/* get the corresponding first chunk */
442
err = z_erofs_extent_lookback(&m, m.delta[0]);
443
if (err)
444
goto unmap_out;
445
}
446
if (m.partialref)
447
map->m_flags |= EROFS_MAP_PARTIAL_REF;
448
map->m_llen = end - map->m_la;
449
450
if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
451
vi->z_tailextent_headlcn = m.lcn;
452
/* for non-compact indexes, fragmentoff is 64 bits */
453
if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
454
vi->z_fragmentoff |= (u64)m.pblk << 32;
455
}
456
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
457
map->m_flags |= EROFS_MAP_META;
458
map->m_pa = vi->z_fragmentoff;
459
map->m_plen = vi->z_idata_size;
460
if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
461
erofs_err(sb, "ztailpacking inline data across blocks @ nid %llu",
462
vi->nid);
463
err = -EFSCORRUPTED;
464
goto unmap_out;
465
}
466
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
467
map->m_flags = EROFS_MAP_FRAGMENT;
468
} else {
469
map->m_pa = erofs_pos(sb, m.pblk);
470
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
471
if (err)
472
goto unmap_out;
473
}
474
475
if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
476
if (map->m_llen > map->m_plen) {
477
DBG_BUGON(1);
478
err = -EFSCORRUPTED;
479
goto unmap_out;
480
}
481
if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
482
map->m_algorithmformat = Z_EROFS_COMPRESSION_INTERLACED;
483
else
484
map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
485
} else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
486
map->m_algorithmformat = vi->z_algorithmtype[1];
487
} else {
488
map->m_algorithmformat = vi->z_algorithmtype[0];
489
}
490
491
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
492
((flags & EROFS_GET_BLOCKS_READMORE) &&
493
(map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
494
map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
495
map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
496
map->m_llen >= i_blocksize(inode))) {
497
err = z_erofs_get_extent_decompressedlen(&m);
498
if (!err)
499
map->m_flags |= EROFS_MAP_FULL_MAPPED;
500
}
501
502
unmap_out:
503
erofs_unmap_metabuf(&m.map->buf);
504
return err;
505
}
506
507
static int z_erofs_map_blocks_ext(struct inode *inode,
508
struct erofs_map_blocks *map, int flags)
509
{
510
struct erofs_inode *vi = EROFS_I(inode);
511
struct super_block *sb = inode->i_sb;
512
bool interlaced = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
513
unsigned int recsz = z_erofs_extent_recsize(vi->z_advise);
514
erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
515
vi->inode_isize + vi->xattr_isize), recsz);
516
bool in_mbox = erofs_inode_in_metabox(inode);
517
erofs_off_t lend = inode->i_size;
518
erofs_off_t l, r, mid, pa, la, lstart;
519
struct z_erofs_extent *ext;
520
unsigned int fmt;
521
bool last;
522
523
map->m_flags = 0;
524
if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) {
525
if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) {
526
ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
527
if (IS_ERR(ext))
528
return PTR_ERR(ext);
529
pa = le64_to_cpu(*(__le64 *)ext);
530
pos += sizeof(__le64);
531
lstart = 0;
532
} else {
533
lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
534
pos += (lstart >> vi->z_lclusterbits) * recsz;
535
pa = EROFS_NULL_ADDR;
536
}
537
538
for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
539
ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
540
if (IS_ERR(ext))
541
return PTR_ERR(ext);
542
map->m_plen = le32_to_cpu(ext->plen);
543
if (pa != EROFS_NULL_ADDR) {
544
map->m_pa = pa;
545
pa += map->m_plen & Z_EROFS_EXTENT_PLEN_MASK;
546
} else {
547
map->m_pa = le32_to_cpu(ext->pstart_lo);
548
}
549
pos += recsz;
550
}
551
last = (lstart >= round_up(lend, 1 << vi->z_lclusterbits));
552
lend = min(lstart, lend);
553
lstart -= 1 << vi->z_lclusterbits;
554
} else {
555
lstart = lend;
556
for (l = 0, r = vi->z_extents; l < r; ) {
557
mid = l + (r - l) / 2;
558
ext = erofs_read_metabuf(&map->buf, sb,
559
pos + mid * recsz, in_mbox);
560
if (IS_ERR(ext))
561
return PTR_ERR(ext);
562
563
la = le32_to_cpu(ext->lstart_lo);
564
pa = le32_to_cpu(ext->pstart_lo) |
565
(u64)le32_to_cpu(ext->pstart_hi) << 32;
566
if (recsz > offsetof(struct z_erofs_extent, lstart_hi))
567
la |= (u64)le32_to_cpu(ext->lstart_hi) << 32;
568
569
if (la > map->m_la) {
570
r = mid;
571
if (la > lend) {
572
DBG_BUGON(1);
573
return -EFSCORRUPTED;
574
}
575
lend = la;
576
} else {
577
l = mid + 1;
578
if (map->m_la == la)
579
r = min(l + 1, r);
580
lstart = la;
581
map->m_plen = le32_to_cpu(ext->plen);
582
map->m_pa = pa;
583
}
584
}
585
last = (l >= vi->z_extents);
586
}
587
588
if (lstart < lend) {
589
map->m_la = lstart;
590
if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
591
map->m_flags = EROFS_MAP_FRAGMENT;
592
vi->z_fragmentoff = map->m_plen;
593
if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
594
vi->z_fragmentoff |= map->m_pa << 32;
595
} else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
596
map->m_flags |= EROFS_MAP_MAPPED |
597
EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
598
fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
599
if (fmt)
600
map->m_algorithmformat = fmt - 1;
601
else if (interlaced && !erofs_blkoff(sb, map->m_pa))
602
map->m_algorithmformat =
603
Z_EROFS_COMPRESSION_INTERLACED;
604
else
605
map->m_algorithmformat =
606
Z_EROFS_COMPRESSION_SHIFTED;
607
if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
608
map->m_flags |= EROFS_MAP_PARTIAL_REF;
609
map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
610
}
611
}
612
map->m_llen = lend - map->m_la;
613
return 0;
614
}
615
616
static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
617
{
618
struct erofs_inode *const vi = EROFS_I(inode);
619
struct super_block *const sb = inode->i_sb;
620
struct z_erofs_map_header *h;
621
erofs_off_t pos;
622
int err = 0;
623
624
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
625
/*
626
* paired with smp_mb() at the end of the function to ensure
627
* fields will only be observed after the bit is set.
628
*/
629
smp_mb();
630
return 0;
631
}
632
633
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
634
return -ERESTARTSYS;
635
636
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
637
goto out_unlock;
638
639
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
640
h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode));
641
if (IS_ERR(h)) {
642
err = PTR_ERR(h);
643
goto out_unlock;
644
}
645
646
/*
647
* if the highest bit of the 8-byte map header is set, the whole file
648
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
649
*/
650
if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
651
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
652
vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
653
vi->z_tailextent_headlcn = 0;
654
goto done;
655
}
656
vi->z_advise = le16_to_cpu(h->h_advise);
657
vi->z_lclusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 15);
658
if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
659
(vi->z_advise & Z_EROFS_ADVISE_EXTENTS)) {
660
vi->z_extents = le32_to_cpu(h->h_extents_lo) |
661
((u64)le16_to_cpu(h->h_extents_hi) << 32);
662
goto done;
663
}
664
665
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
666
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
667
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
668
vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
669
else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
670
vi->z_idata_size = le16_to_cpu(h->h_idata_size);
671
672
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
673
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
674
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
675
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
676
vi->nid);
677
err = -EFSCORRUPTED;
678
goto out_unlock;
679
}
680
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
681
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
682
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
683
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
684
vi->nid);
685
err = -EFSCORRUPTED;
686
goto out_unlock;
687
}
688
689
if (vi->z_idata_size ||
690
(vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
691
struct erofs_map_blocks tm = {
692
.buf = __EROFS_BUF_INITIALIZER
693
};
694
695
err = z_erofs_map_blocks_fo(inode, &tm,
696
EROFS_GET_BLOCKS_FINDTAIL);
697
erofs_put_metabuf(&tm.buf);
698
if (err < 0)
699
goto out_unlock;
700
}
701
done:
702
/* paired with smp_mb() at the beginning of the function */
703
smp_mb();
704
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
705
out_unlock:
706
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
707
return err;
708
}
709
710
static int z_erofs_map_sanity_check(struct inode *inode,
711
struct erofs_map_blocks *map)
712
{
713
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
714
u64 pend;
715
716
if (!(map->m_flags & EROFS_MAP_ENCODED))
717
return 0;
718
if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
719
erofs_err(inode->i_sb, "unknown algorithm %d @ pos %llu for nid %llu, please upgrade kernel",
720
map->m_algorithmformat, map->m_la, EROFS_I(inode)->nid);
721
return -EOPNOTSUPP;
722
}
723
if (unlikely(map->m_algorithmformat < Z_EROFS_COMPRESSION_MAX &&
724
!(sbi->available_compr_algs & (1 << map->m_algorithmformat)))) {
725
erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
726
map->m_algorithmformat, EROFS_I(inode)->nid);
727
return -EFSCORRUPTED;
728
}
729
if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
730
map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
731
return -EOPNOTSUPP;
732
/* Filesystems beyond 48-bit physical block addresses are invalid */
733
if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
734
(pend >> sbi->blkszbits) >= BIT_ULL(48)))
735
return -EFSCORRUPTED;
736
return 0;
737
}
738
739
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
740
int flags)
741
{
742
struct erofs_inode *const vi = EROFS_I(inode);
743
int err = 0;
744
745
trace_erofs_map_blocks_enter(inode, map, flags);
746
if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
747
map->m_llen = map->m_la + 1 - inode->i_size;
748
map->m_la = inode->i_size;
749
map->m_flags = 0;
750
} else {
751
err = z_erofs_fill_inode(inode, map);
752
if (!err) {
753
if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
754
(vi->z_advise & Z_EROFS_ADVISE_EXTENTS))
755
err = z_erofs_map_blocks_ext(inode, map, flags);
756
else
757
err = z_erofs_map_blocks_fo(inode, map, flags);
758
}
759
if (!err)
760
err = z_erofs_map_sanity_check(inode, map);
761
if (err)
762
map->m_llen = 0;
763
}
764
trace_erofs_map_blocks_exit(inode, map, flags, err);
765
return err;
766
}
767
768
static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
769
loff_t length, unsigned int flags,
770
struct iomap *iomap, struct iomap *srcmap)
771
{
772
int ret;
773
struct erofs_map_blocks map = { .m_la = offset };
774
775
ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
776
erofs_put_metabuf(&map.buf);
777
if (ret < 0)
778
return ret;
779
780
iomap->bdev = inode->i_sb->s_bdev;
781
iomap->offset = map.m_la;
782
iomap->length = map.m_llen;
783
if (map.m_flags & EROFS_MAP_MAPPED) {
784
iomap->type = IOMAP_MAPPED;
785
iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
786
IOMAP_NULL_ADDR : map.m_pa;
787
} else {
788
iomap->type = IOMAP_HOLE;
789
iomap->addr = IOMAP_NULL_ADDR;
790
/*
791
* No strict rule on how to describe extents for post EOF, yet
792
* we need to do like below. Otherwise, iomap itself will get
793
* into an endless loop on post EOF.
794
*
795
* Calculate the effective offset by subtracting extent start
796
* (map.m_la) from the requested offset, and add it to length.
797
* (NB: offset >= map.m_la always)
798
*/
799
if (iomap->offset >= inode->i_size)
800
iomap->length = length + offset - map.m_la;
801
}
802
iomap->flags = 0;
803
return 0;
804
}
805
806
const struct iomap_ops z_erofs_iomap_report_ops = {
807
.iomap_begin = z_erofs_iomap_begin_report,
808
};
809
810