1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <asm/unaligned.h>
8 #include <trace/events/erofs.h>
10 static int z_erofs_do_map_blocks(struct inode *inode,
11 struct erofs_map_blocks *map,
14 int z_erofs_fill_inode(struct inode *inode)
16 struct erofs_inode *const vi = EROFS_I(inode);
17 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
19 if (!erofs_sb_has_big_pcluster(sbi) &&
20 !erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) &&
21 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
23 vi->z_algorithmtype[0] = 0;
24 vi->z_algorithmtype[1] = 0;
25 vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
26 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
28 inode->i_mapping->a_ops = &z_erofs_aops;
32 static int z_erofs_fill_inode_lazy(struct inode *inode)
34 struct erofs_inode *const vi = EROFS_I(inode);
35 struct super_block *const sb = inode->i_sb;
38 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
40 struct z_erofs_map_header *h;
42 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
44 * paired with smp_mb() at the end of the function to ensure
45 * fields will only be observed after the bit is set.
51 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
55 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
58 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
60 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
66 h = kaddr + erofs_blkoff(pos);
68 * if the highest bit of the 8-byte map header is set, the whole file
69 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
71 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
72 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
73 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
74 vi->z_tailextent_headlcn = 0;
77 vi->z_advise = le16_to_cpu(h->h_advise);
78 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
79 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
82 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
83 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
84 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
85 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
90 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
91 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
92 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
93 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
94 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
99 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
100 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
101 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
102 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
105 goto out_put_metabuf;
108 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
109 struct erofs_map_blocks map = {
110 .buf = __EROFS_BUF_INITIALIZER
113 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
114 err = z_erofs_do_map_blocks(inode, &map,
115 EROFS_GET_BLOCKS_FINDTAIL);
116 erofs_put_metabuf(&map.buf);
119 erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
120 erofs_err(sb, "invalid tail-packing pclustersize %llu",
125 goto out_put_metabuf;
128 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
129 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
130 struct erofs_map_blocks map = {
131 .buf = __EROFS_BUF_INITIALIZER
134 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
135 err = z_erofs_do_map_blocks(inode, &map,
136 EROFS_GET_BLOCKS_FINDTAIL);
137 erofs_put_metabuf(&map.buf);
139 goto out_put_metabuf;
142 /* paired with smp_mb() at the beginning of the function */
144 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
146 erofs_put_metabuf(&buf);
148 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
152 struct z_erofs_maprecorder {
154 struct erofs_map_blocks *map;
158 /* compression extent information gathered */
162 erofs_blk_t pblk, compressedblks;
163 erofs_off_t nextpackoff;
167 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
170 struct inode *const inode = m->inode;
171 struct erofs_inode *const vi = EROFS_I(inode);
172 const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
173 const erofs_off_t pos =
174 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
176 lcn * sizeof(struct z_erofs_vle_decompressed_index);
177 struct z_erofs_vle_decompressed_index *di;
178 unsigned int advise, type;
180 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
181 erofs_blknr(pos), EROFS_KMAP_ATOMIC);
182 if (IS_ERR(m->kaddr))
183 return PTR_ERR(m->kaddr);
185 m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index);
187 di = m->kaddr + erofs_blkoff(pos);
189 advise = le16_to_cpu(di->di_advise);
190 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
191 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
193 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
194 m->clusterofs = 1 << vi->z_logical_clusterbits;
195 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
196 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
197 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
198 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
200 return -EFSCORRUPTED;
202 m->compressedblks = m->delta[0] &
203 ~Z_EROFS_VLE_DI_D0_CBLKCNT;
206 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
208 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
209 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
210 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
211 if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
212 m->partialref = true;
213 m->clusterofs = le16_to_cpu(di->di_clusterofs);
214 m->pblk = le32_to_cpu(di->di_u.blkaddr);
224 static unsigned int decode_compactedbits(unsigned int lobits,
226 u8 *in, unsigned int pos, u8 *type)
228 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
229 const unsigned int lo = v & lomask;
231 *type = (v >> lobits) & 3;
235 static int get_compacted_la_distance(unsigned int lclusterbits,
236 unsigned int encodebits,
237 unsigned int vcnt, u8 *in, int i)
239 const unsigned int lomask = (1 << lclusterbits) - 1;
240 unsigned int lo, d1 = 0;
243 DBG_BUGON(i >= vcnt);
246 lo = decode_compactedbits(lclusterbits, lomask,
247 in, encodebits * i, &type);
249 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
252 } while (++i < vcnt);
254 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
255 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
260 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
261 unsigned int amortizedshift,
262 erofs_off_t pos, bool lookahead)
264 struct erofs_inode *const vi = EROFS_I(m->inode);
265 const unsigned int lclusterbits = vi->z_logical_clusterbits;
266 const unsigned int lomask = (1 << lclusterbits) - 1;
267 unsigned int vcnt, base, lo, encodebits, nblk, eofs;
272 if (1 << amortizedshift == 4)
274 else if (1 << amortizedshift == 2 && lclusterbits == 12)
279 /* it doesn't equal to round_up(..) */
280 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
281 (vcnt << amortizedshift);
282 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
283 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
284 eofs = erofs_blkoff(pos);
285 base = round_down(eofs, vcnt << amortizedshift);
286 in = m->kaddr + base;
288 i = (eofs - base) >> amortizedshift;
290 lo = decode_compactedbits(lclusterbits, lomask,
291 in, encodebits * i, &type);
293 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
294 m->clusterofs = 1 << lclusterbits;
296 /* figure out lookahead_distance: delta[1] if needed */
298 m->delta[1] = get_compacted_la_distance(lclusterbits,
299 encodebits, vcnt, in, i);
300 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
303 return -EFSCORRUPTED;
305 m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
308 } else if (i + 1 != (int)vcnt) {
313 * since the last lcluster in the pack is special,
314 * of which lo saves delta[1] rather than delta[0].
315 * Hence, get delta[0] by the previous lcluster indirectly.
317 lo = decode_compactedbits(lclusterbits, lomask,
318 in, encodebits * (i - 1), &type);
319 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
321 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
323 m->delta[0] = lo + 1;
328 /* figout out blkaddr (pblk) for HEAD lclusters */
333 lo = decode_compactedbits(lclusterbits, lomask,
334 in, encodebits * i, &type);
335 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
345 lo = decode_compactedbits(lclusterbits, lomask,
346 in, encodebits * i, &type);
347 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
348 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
350 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
353 /* bigpcluster shouldn't have plain d0 == 1 */
356 return -EFSCORRUPTED;
364 in += (vcnt << amortizedshift) - sizeof(__le32);
365 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
369 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
370 unsigned long lcn, bool lookahead)
372 struct inode *const inode = m->inode;
373 struct erofs_inode *const vi = EROFS_I(inode);
374 const unsigned int lclusterbits = vi->z_logical_clusterbits;
375 const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
376 vi->inode_isize + vi->xattr_isize, 8) +
377 sizeof(struct z_erofs_map_header);
378 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
379 unsigned int compacted_4b_initial, compacted_2b;
380 unsigned int amortizedshift;
383 if (lclusterbits != 12)
390 /* used to align to 32-byte (compacted_2b) alignment */
391 compacted_4b_initial = (32 - ebase % 32) / 4;
392 if (compacted_4b_initial == 32 / 4)
393 compacted_4b_initial = 0;
395 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
396 compacted_4b_initial < totalidx)
397 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
402 if (lcn < compacted_4b_initial) {
406 pos += compacted_4b_initial * 4;
407 lcn -= compacted_4b_initial;
409 if (lcn < compacted_2b) {
413 pos += compacted_2b * 2;
417 pos += lcn * (1 << amortizedshift);
418 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
419 erofs_blknr(pos), EROFS_KMAP_ATOMIC);
420 if (IS_ERR(m->kaddr))
421 return PTR_ERR(m->kaddr);
422 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
425 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
426 unsigned int lcn, bool lookahead)
428 const unsigned int datamode = EROFS_I(m->inode)->datalayout;
430 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
431 return legacy_load_cluster_from_disk(m, lcn);
433 if (datamode == EROFS_INODE_FLAT_COMPRESSION)
434 return compacted_load_cluster_from_disk(m, lcn, lookahead);
439 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
440 unsigned int lookback_distance)
442 struct erofs_inode *const vi = EROFS_I(m->inode);
443 const unsigned int lclusterbits = vi->z_logical_clusterbits;
445 while (m->lcn >= lookback_distance) {
446 unsigned long lcn = m->lcn - lookback_distance;
449 /* load extent head logical cluster if needed */
450 err = z_erofs_load_cluster_from_disk(m, lcn, false);
455 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
457 erofs_err(m->inode->i_sb,
458 "invalid lookback distance 0 @ nid %llu",
461 return -EFSCORRUPTED;
463 lookback_distance = m->delta[0];
465 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
466 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
467 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
468 m->headtype = m->type;
469 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
472 erofs_err(m->inode->i_sb,
473 "unknown type %u @ lcn %lu of nid %llu",
474 m->type, lcn, vi->nid);
480 erofs_err(m->inode->i_sb, "bogus lookback distance @ nid %llu",
483 return -EFSCORRUPTED;
486 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
487 unsigned int initial_lcn)
489 struct erofs_inode *const vi = EROFS_I(m->inode);
490 struct erofs_map_blocks *const map = m->map;
491 const unsigned int lclusterbits = vi->z_logical_clusterbits;
495 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
496 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 &&
497 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2);
498 DBG_BUGON(m->type != m->headtype);
500 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
501 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) &&
502 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
503 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) &&
504 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
505 map->m_plen = 1ULL << lclusterbits;
509 if (m->compressedblks)
512 err = z_erofs_load_cluster_from_disk(m, lcn, false);
517 * If the 1st NONHEAD lcluster has already been handled initially w/o
518 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
519 * an internal implemenatation error is detected.
521 * The following code can also handle it properly anyway, but let's
522 * BUG_ON in the debugging mode only for developers to notice that.
524 DBG_BUGON(lcn == initial_lcn &&
525 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
528 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
529 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
530 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
532 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
533 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
535 m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
537 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
538 if (m->delta[0] != 1)
539 goto err_bonus_cblkcnt;
540 if (m->compressedblks)
544 erofs_err(m->inode->i_sb,
545 "cannot found CBLKCNT @ lcn %lu of nid %llu",
548 return -EFSCORRUPTED;
551 map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
554 erofs_err(m->inode->i_sb,
555 "bogus CBLKCNT @ lcn %lu of nid %llu",
558 return -EFSCORRUPTED;
561 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
563 struct inode *inode = m->inode;
564 struct erofs_inode *vi = EROFS_I(inode);
565 struct erofs_map_blocks *map = m->map;
566 unsigned int lclusterbits = vi->z_logical_clusterbits;
567 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
571 /* handle the last EOF pcluster (no next HEAD lcluster) */
572 if ((lcn << lclusterbits) >= inode->i_size) {
573 map->m_llen = inode->i_size - map->m_la;
577 err = z_erofs_load_cluster_from_disk(m, lcn, true);
581 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
582 DBG_BUGON(!m->delta[1] &&
583 m->clusterofs != 1 << lclusterbits);
584 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
585 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ||
586 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
587 /* go on until the next HEAD lcluster */
592 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
593 m->type, lcn, vi->nid);
598 } while (m->delta[1]);
600 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
604 static int z_erofs_do_map_blocks(struct inode *inode,
605 struct erofs_map_blocks *map,
608 struct erofs_inode *const vi = EROFS_I(inode);
609 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
610 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
611 struct z_erofs_maprecorder m = {
616 unsigned int lclusterbits, endoff;
617 unsigned long initial_lcn;
618 unsigned long long ofs, end;
620 lclusterbits = vi->z_logical_clusterbits;
621 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
622 initial_lcn = ofs >> lclusterbits;
623 endoff = ofs & ((1 << lclusterbits) - 1);
625 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
629 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
630 vi->z_idataoff = m.nextpackoff;
632 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
633 end = (m.lcn + 1ULL) << lclusterbits;
636 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
637 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
638 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
639 if (endoff >= m.clusterofs) {
641 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
643 * For ztailpacking files, in order to inline data more
644 * effectively, special EOF lclusters are now supported
645 * which can have three parts at most.
647 if (ztailpacking && end > inode->i_size)
651 /* m.lcn should be >= 1 if endoff < m.clusterofs */
653 erofs_err(inode->i_sb,
654 "invalid logical cluster 0 at nid %llu",
659 end = (m.lcn << lclusterbits) | m.clusterofs;
660 map->m_flags |= EROFS_MAP_FULL_MAPPED;
663 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
664 /* get the corresponding first chunk */
665 err = z_erofs_extent_lookback(&m, m.delta[0]);
670 erofs_err(inode->i_sb,
671 "unknown type %u @ offset %llu of nid %llu",
672 m.type, ofs, vi->nid);
677 map->m_flags |= EROFS_MAP_PARTIAL_REF;
678 map->m_llen = end - map->m_la;
680 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
681 vi->z_tailextent_headlcn = m.lcn;
682 /* for non-compact indexes, fragmentoff is 64 bits */
684 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
685 vi->z_fragmentoff |= (u64)m.pblk << 32;
687 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
688 map->m_flags |= EROFS_MAP_META;
689 map->m_pa = vi->z_idataoff;
690 map->m_plen = vi->z_idata_size;
691 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
692 map->m_flags |= EROFS_MAP_FRAGMENT;
694 map->m_pa = blknr_to_addr(m.pblk);
695 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
700 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) {
701 if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
702 map->m_algorithmformat =
703 Z_EROFS_COMPRESSION_INTERLACED;
705 map->m_algorithmformat =
706 Z_EROFS_COMPRESSION_SHIFTED;
707 } else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
708 map->m_algorithmformat = vi->z_algorithmtype[1];
710 map->m_algorithmformat = vi->z_algorithmtype[0];
713 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
714 ((flags & EROFS_GET_BLOCKS_READMORE) &&
715 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
716 map->m_llen >= EROFS_BLKSIZ)) {
717 err = z_erofs_get_extent_decompressedlen(&m);
719 map->m_flags |= EROFS_MAP_FULL_MAPPED;
722 erofs_unmap_metabuf(&m.map->buf);
725 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
726 __func__, map->m_la, map->m_pa,
727 map->m_llen, map->m_plen, map->m_flags);
732 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
735 struct erofs_inode *const vi = EROFS_I(inode);
738 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
740 /* when trying to read beyond EOF, leave it unmapped */
741 if (map->m_la >= inode->i_size) {
742 map->m_llen = map->m_la + 1 - inode->i_size;
743 map->m_la = inode->i_size;
748 err = z_erofs_fill_inode_lazy(inode);
752 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
753 !vi->z_tailextent_headlcn) {
755 map->m_llen = inode->i_size;
756 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
761 err = z_erofs_do_map_blocks(inode, map, flags);
763 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
765 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
766 DBG_BUGON(err < 0 && err != -ENOMEM);
770 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
771 loff_t length, unsigned int flags,
772 struct iomap *iomap, struct iomap *srcmap)
775 struct erofs_map_blocks map = { .m_la = offset };
777 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
778 erofs_put_metabuf(&map.buf);
782 iomap->bdev = inode->i_sb->s_bdev;
783 iomap->offset = map.m_la;
784 iomap->length = map.m_llen;
785 if (map.m_flags & EROFS_MAP_MAPPED) {
786 iomap->type = IOMAP_MAPPED;
787 iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
788 IOMAP_NULL_ADDR : map.m_pa;
790 iomap->type = IOMAP_HOLE;
791 iomap->addr = IOMAP_NULL_ADDR;
793 * No strict rule how to describe extents for post EOF, yet
794 * we need do like below. Otherwise, iomap itself will get
795 * into an endless loop on post EOF.
797 if (iomap->offset >= inode->i_size)
798 iomap->length = length + map.m_la - offset;
804 const struct iomap_ops z_erofs_iomap_report_ops = {
805 .iomap_begin = z_erofs_iomap_begin_report,