1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <asm/unaligned.h>
8 #include <trace/events/erofs.h>
10 static int z_erofs_do_map_blocks(struct inode *inode,
11 struct erofs_map_blocks *map,
14 int z_erofs_fill_inode(struct inode *inode)
16 struct erofs_inode *const vi = EROFS_I(inode);
17 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
19 if (!erofs_sb_has_big_pcluster(sbi) &&
20 !erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) &&
21 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
23 vi->z_algorithmtype[0] = 0;
24 vi->z_algorithmtype[1] = 0;
25 vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
26 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
28 inode->i_mapping->a_ops = &z_erofs_aops;
32 static int z_erofs_fill_inode_lazy(struct inode *inode)
34 struct erofs_inode *const vi = EROFS_I(inode);
35 struct super_block *const sb = inode->i_sb;
38 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
40 struct z_erofs_map_header *h;
42 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
44 * paired with smp_mb() at the end of the function to ensure
45 * fields will only be observed after the bit is set.
51 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
55 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
58 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
59 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
65 h = kaddr + erofs_blkoff(pos);
67 * if the highest bit of the 8-byte map header is set, the whole file
68 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
70 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
71 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
72 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
73 vi->z_tailextent_headlcn = 0;
76 vi->z_advise = le16_to_cpu(h->h_advise);
77 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
78 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
81 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
82 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
83 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
84 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
89 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
90 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
91 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
92 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
93 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
98 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
99 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
100 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
101 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
104 goto out_put_metabuf;
107 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
108 struct erofs_map_blocks map = {
109 .buf = __EROFS_BUF_INITIALIZER
112 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
113 err = z_erofs_do_map_blocks(inode, &map,
114 EROFS_GET_BLOCKS_FINDTAIL);
115 erofs_put_metabuf(&map.buf);
118 erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
119 erofs_err(sb, "invalid tail-packing pclustersize %llu",
124 goto out_put_metabuf;
127 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
128 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
129 struct erofs_map_blocks map = {
130 .buf = __EROFS_BUF_INITIALIZER
133 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
134 err = z_erofs_do_map_blocks(inode, &map,
135 EROFS_GET_BLOCKS_FINDTAIL);
136 erofs_put_metabuf(&map.buf);
138 goto out_put_metabuf;
141 /* paired with smp_mb() at the beginning of the function */
143 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
145 erofs_put_metabuf(&buf);
147 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
151 struct z_erofs_maprecorder {
153 struct erofs_map_blocks *map;
157 /* compression extent information gathered */
161 erofs_blk_t pblk, compressedblks;
162 erofs_off_t nextpackoff;
166 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
169 struct inode *const inode = m->inode;
170 struct erofs_inode *const vi = EROFS_I(inode);
171 const erofs_off_t pos =
172 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(erofs_iloc(inode) +
173 vi->inode_isize + vi->xattr_isize) +
174 lcn * sizeof(struct z_erofs_vle_decompressed_index);
175 struct z_erofs_vle_decompressed_index *di;
176 unsigned int advise, type;
178 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
179 erofs_blknr(pos), EROFS_KMAP_ATOMIC);
180 if (IS_ERR(m->kaddr))
181 return PTR_ERR(m->kaddr);
183 m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index);
185 di = m->kaddr + erofs_blkoff(pos);
187 advise = le16_to_cpu(di->di_advise);
188 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
189 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
191 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
192 m->clusterofs = 1 << vi->z_logical_clusterbits;
193 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
194 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
195 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
196 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
198 return -EFSCORRUPTED;
200 m->compressedblks = m->delta[0] &
201 ~Z_EROFS_VLE_DI_D0_CBLKCNT;
204 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
206 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
207 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
208 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
209 if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
210 m->partialref = true;
211 m->clusterofs = le16_to_cpu(di->di_clusterofs);
212 if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
214 return -EFSCORRUPTED;
216 m->pblk = le32_to_cpu(di->di_u.blkaddr);
226 static unsigned int decode_compactedbits(unsigned int lobits,
228 u8 *in, unsigned int pos, u8 *type)
230 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
231 const unsigned int lo = v & lomask;
233 *type = (v >> lobits) & 3;
237 static int get_compacted_la_distance(unsigned int lclusterbits,
238 unsigned int encodebits,
239 unsigned int vcnt, u8 *in, int i)
241 const unsigned int lomask = (1 << lclusterbits) - 1;
242 unsigned int lo, d1 = 0;
245 DBG_BUGON(i >= vcnt);
248 lo = decode_compactedbits(lclusterbits, lomask,
249 in, encodebits * i, &type);
251 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
254 } while (++i < vcnt);
256 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
257 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
262 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
263 unsigned int amortizedshift,
264 erofs_off_t pos, bool lookahead)
266 struct erofs_inode *const vi = EROFS_I(m->inode);
267 const unsigned int lclusterbits = vi->z_logical_clusterbits;
268 const unsigned int lomask = (1 << lclusterbits) - 1;
269 unsigned int vcnt, base, lo, encodebits, nblk, eofs;
274 if (1 << amortizedshift == 4 && lclusterbits <= 14)
276 else if (1 << amortizedshift == 2 && lclusterbits == 12)
281 /* it doesn't equal to round_up(..) */
282 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
283 (vcnt << amortizedshift);
284 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
285 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
286 eofs = erofs_blkoff(pos);
287 base = round_down(eofs, vcnt << amortizedshift);
288 in = m->kaddr + base;
290 i = (eofs - base) >> amortizedshift;
292 lo = decode_compactedbits(lclusterbits, lomask,
293 in, encodebits * i, &type);
295 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
296 m->clusterofs = 1 << lclusterbits;
298 /* figure out lookahead_distance: delta[1] if needed */
300 m->delta[1] = get_compacted_la_distance(lclusterbits,
301 encodebits, vcnt, in, i);
302 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
305 return -EFSCORRUPTED;
307 m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
310 } else if (i + 1 != (int)vcnt) {
315 * since the last lcluster in the pack is special,
316 * of which lo saves delta[1] rather than delta[0].
317 * Hence, get delta[0] by the previous lcluster indirectly.
319 lo = decode_compactedbits(lclusterbits, lomask,
320 in, encodebits * (i - 1), &type);
321 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
323 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
325 m->delta[0] = lo + 1;
330 /* figout out blkaddr (pblk) for HEAD lclusters */
335 lo = decode_compactedbits(lclusterbits, lomask,
336 in, encodebits * i, &type);
337 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
347 lo = decode_compactedbits(lclusterbits, lomask,
348 in, encodebits * i, &type);
349 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
350 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
352 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
355 /* bigpcluster shouldn't have plain d0 == 1 */
358 return -EFSCORRUPTED;
366 in += (vcnt << amortizedshift) - sizeof(__le32);
367 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
371 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
372 unsigned long lcn, bool lookahead)
374 struct inode *const inode = m->inode;
375 struct erofs_inode *const vi = EROFS_I(inode);
376 const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
377 ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
378 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
379 unsigned int compacted_4b_initial, compacted_2b;
380 unsigned int amortizedshift;
387 /* used to align to 32-byte (compacted_2b) alignment */
388 compacted_4b_initial = (32 - ebase % 32) / 4;
389 if (compacted_4b_initial == 32 / 4)
390 compacted_4b_initial = 0;
392 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
393 compacted_4b_initial < totalidx)
394 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
399 if (lcn < compacted_4b_initial) {
403 pos += compacted_4b_initial * 4;
404 lcn -= compacted_4b_initial;
406 if (lcn < compacted_2b) {
410 pos += compacted_2b * 2;
414 pos += lcn * (1 << amortizedshift);
415 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
416 erofs_blknr(pos), EROFS_KMAP_ATOMIC);
417 if (IS_ERR(m->kaddr))
418 return PTR_ERR(m->kaddr);
419 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
422 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
423 unsigned int lcn, bool lookahead)
425 const unsigned int datamode = EROFS_I(m->inode)->datalayout;
427 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
428 return legacy_load_cluster_from_disk(m, lcn);
430 if (datamode == EROFS_INODE_FLAT_COMPRESSION)
431 return compacted_load_cluster_from_disk(m, lcn, lookahead);
436 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
437 unsigned int lookback_distance)
439 struct erofs_inode *const vi = EROFS_I(m->inode);
440 const unsigned int lclusterbits = vi->z_logical_clusterbits;
442 while (m->lcn >= lookback_distance) {
443 unsigned long lcn = m->lcn - lookback_distance;
446 /* load extent head logical cluster if needed */
447 err = z_erofs_load_cluster_from_disk(m, lcn, false);
452 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
454 erofs_err(m->inode->i_sb,
455 "invalid lookback distance 0 @ nid %llu",
458 return -EFSCORRUPTED;
460 lookback_distance = m->delta[0];
462 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
463 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
464 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
465 m->headtype = m->type;
466 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
469 erofs_err(m->inode->i_sb,
470 "unknown type %u @ lcn %lu of nid %llu",
471 m->type, lcn, vi->nid);
477 erofs_err(m->inode->i_sb, "bogus lookback distance @ nid %llu",
480 return -EFSCORRUPTED;
483 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
484 unsigned int initial_lcn)
486 struct erofs_inode *const vi = EROFS_I(m->inode);
487 struct erofs_map_blocks *const map = m->map;
488 const unsigned int lclusterbits = vi->z_logical_clusterbits;
492 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
493 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 &&
494 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2);
495 DBG_BUGON(m->type != m->headtype);
497 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
498 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) &&
499 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
500 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) &&
501 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
502 map->m_plen = 1ULL << lclusterbits;
506 if (m->compressedblks)
509 err = z_erofs_load_cluster_from_disk(m, lcn, false);
514 * If the 1st NONHEAD lcluster has already been handled initially w/o
515 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
516 * an internal implemenatation error is detected.
518 * The following code can also handle it properly anyway, but let's
519 * BUG_ON in the debugging mode only for developers to notice that.
521 DBG_BUGON(lcn == initial_lcn &&
522 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
525 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
526 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
527 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
529 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
530 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
532 m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
534 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
535 if (m->delta[0] != 1)
536 goto err_bonus_cblkcnt;
537 if (m->compressedblks)
541 erofs_err(m->inode->i_sb,
542 "cannot found CBLKCNT @ lcn %lu of nid %llu",
545 return -EFSCORRUPTED;
548 map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
551 erofs_err(m->inode->i_sb,
552 "bogus CBLKCNT @ lcn %lu of nid %llu",
555 return -EFSCORRUPTED;
558 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
560 struct inode *inode = m->inode;
561 struct erofs_inode *vi = EROFS_I(inode);
562 struct erofs_map_blocks *map = m->map;
563 unsigned int lclusterbits = vi->z_logical_clusterbits;
564 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
568 /* handle the last EOF pcluster (no next HEAD lcluster) */
569 if ((lcn << lclusterbits) >= inode->i_size) {
570 map->m_llen = inode->i_size - map->m_la;
574 err = z_erofs_load_cluster_from_disk(m, lcn, true);
578 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
579 DBG_BUGON(!m->delta[1] &&
580 m->clusterofs != 1 << lclusterbits);
581 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
582 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ||
583 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
584 /* go on until the next HEAD lcluster */
589 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
590 m->type, lcn, vi->nid);
595 } while (m->delta[1]);
597 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
601 static int z_erofs_do_map_blocks(struct inode *inode,
602 struct erofs_map_blocks *map,
605 struct erofs_inode *const vi = EROFS_I(inode);
606 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
607 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
608 struct z_erofs_maprecorder m = {
613 unsigned int lclusterbits, endoff;
614 unsigned long initial_lcn;
615 unsigned long long ofs, end;
617 lclusterbits = vi->z_logical_clusterbits;
618 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
619 initial_lcn = ofs >> lclusterbits;
620 endoff = ofs & ((1 << lclusterbits) - 1);
622 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
626 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
627 vi->z_idataoff = m.nextpackoff;
629 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
630 end = (m.lcn + 1ULL) << lclusterbits;
633 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
634 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
635 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
636 if (endoff >= m.clusterofs) {
638 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
640 * For ztailpacking files, in order to inline data more
641 * effectively, special EOF lclusters are now supported
642 * which can have three parts at most.
644 if (ztailpacking && end > inode->i_size)
648 /* m.lcn should be >= 1 if endoff < m.clusterofs */
650 erofs_err(inode->i_sb,
651 "invalid logical cluster 0 at nid %llu",
656 end = (m.lcn << lclusterbits) | m.clusterofs;
657 map->m_flags |= EROFS_MAP_FULL_MAPPED;
660 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
661 /* get the corresponding first chunk */
662 err = z_erofs_extent_lookback(&m, m.delta[0]);
667 erofs_err(inode->i_sb,
668 "unknown type %u @ offset %llu of nid %llu",
669 m.type, ofs, vi->nid);
674 map->m_flags |= EROFS_MAP_PARTIAL_REF;
675 map->m_llen = end - map->m_la;
677 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
678 vi->z_tailextent_headlcn = m.lcn;
679 /* for non-compact indexes, fragmentoff is 64 bits */
681 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
682 vi->z_fragmentoff |= (u64)m.pblk << 32;
684 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
685 map->m_flags |= EROFS_MAP_META;
686 map->m_pa = vi->z_idataoff;
687 map->m_plen = vi->z_idata_size;
688 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
689 map->m_flags |= EROFS_MAP_FRAGMENT;
691 map->m_pa = blknr_to_addr(m.pblk);
692 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
697 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) {
698 if (map->m_llen > map->m_plen) {
703 if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
704 map->m_algorithmformat =
705 Z_EROFS_COMPRESSION_INTERLACED;
707 map->m_algorithmformat =
708 Z_EROFS_COMPRESSION_SHIFTED;
709 } else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
710 map->m_algorithmformat = vi->z_algorithmtype[1];
712 map->m_algorithmformat = vi->z_algorithmtype[0];
715 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
716 ((flags & EROFS_GET_BLOCKS_READMORE) &&
717 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
718 map->m_llen >= EROFS_BLKSIZ)) {
719 err = z_erofs_get_extent_decompressedlen(&m);
721 map->m_flags |= EROFS_MAP_FULL_MAPPED;
725 erofs_unmap_metabuf(&m.map->buf);
726 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
727 __func__, map->m_la, map->m_pa,
728 map->m_llen, map->m_plen, map->m_flags);
732 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
735 struct erofs_inode *const vi = EROFS_I(inode);
738 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
740 /* when trying to read beyond EOF, leave it unmapped */
741 if (map->m_la >= inode->i_size) {
742 map->m_llen = map->m_la + 1 - inode->i_size;
743 map->m_la = inode->i_size;
748 err = z_erofs_fill_inode_lazy(inode);
752 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
753 !vi->z_tailextent_headlcn) {
755 map->m_llen = inode->i_size;
756 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
761 err = z_erofs_do_map_blocks(inode, map, flags);
763 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
765 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
766 DBG_BUGON(err < 0 && err != -ENOMEM);
770 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
771 loff_t length, unsigned int flags,
772 struct iomap *iomap, struct iomap *srcmap)
775 struct erofs_map_blocks map = { .m_la = offset };
777 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
778 erofs_put_metabuf(&map.buf);
782 iomap->bdev = inode->i_sb->s_bdev;
783 iomap->offset = map.m_la;
784 iomap->length = map.m_llen;
785 if (map.m_flags & EROFS_MAP_MAPPED) {
786 iomap->type = IOMAP_MAPPED;
787 iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
788 IOMAP_NULL_ADDR : map.m_pa;
790 iomap->type = IOMAP_HOLE;
791 iomap->addr = IOMAP_NULL_ADDR;
793 * No strict rule on how to describe extents for post EOF, yet
794 * we need to do like below. Otherwise, iomap itself will get
795 * into an endless loop on post EOF.
797 * Calculate the effective offset by subtracting extent start
798 * (map.m_la) from the requested offset, and add it to length.
799 * (NB: offset >= map.m_la always)
801 if (iomap->offset >= inode->i_size)
802 iomap->length = length + offset - map.m_la;
808 const struct iomap_ops z_erofs_iomap_report_ops = {
809 .iomap_begin = z_erofs_iomap_begin_report,