1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <asm/unaligned.h>
8 #include <trace/events/erofs.h>
10 struct z_erofs_maprecorder {
12 struct erofs_map_blocks *map;
16 /* compression extent information gathered */
20 erofs_blk_t pblk, compressedblks;
21 erofs_off_t nextpackoff;
25 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
28 struct inode *const inode = m->inode;
29 struct erofs_inode *const vi = EROFS_I(inode);
30 const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
31 vi->inode_isize + vi->xattr_isize) +
32 lcn * sizeof(struct z_erofs_lcluster_index);
33 struct z_erofs_lcluster_index *di;
34 unsigned int advise, type;
36 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
37 erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
39 return PTR_ERR(m->kaddr);
41 m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
43 di = m->kaddr + erofs_blkoff(inode->i_sb, pos);
45 advise = le16_to_cpu(di->di_advise);
46 type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
47 ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
49 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
50 m->clusterofs = 1 << vi->z_logical_clusterbits;
51 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
52 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
53 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
54 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
58 m->compressedblks = m->delta[0] &
59 ~Z_EROFS_LI_D0_CBLKCNT;
62 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
64 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
65 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
66 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
67 if (advise & Z_EROFS_LI_PARTIAL_REF)
69 m->clusterofs = le16_to_cpu(di->di_clusterofs);
70 if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
74 m->pblk = le32_to_cpu(di->di_u.blkaddr);
84 static unsigned int decode_compactedbits(unsigned int lobits,
86 u8 *in, unsigned int pos, u8 *type)
88 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
89 const unsigned int lo = v & lomask;
91 *type = (v >> lobits) & 3;
95 static int get_compacted_la_distance(unsigned int lclusterbits,
96 unsigned int encodebits,
97 unsigned int vcnt, u8 *in, int i)
99 const unsigned int lomask = (1 << lclusterbits) - 1;
100 unsigned int lo, d1 = 0;
103 DBG_BUGON(i >= vcnt);
106 lo = decode_compactedbits(lclusterbits, lomask,
107 in, encodebits * i, &type);
109 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
112 } while (++i < vcnt);
114 /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
115 if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
120 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
121 unsigned int amortizedshift,
122 erofs_off_t pos, bool lookahead)
124 struct erofs_inode *const vi = EROFS_I(m->inode);
125 const unsigned int lclusterbits = vi->z_logical_clusterbits;
126 const unsigned int lomask = (1 << lclusterbits) - 1;
127 unsigned int vcnt, base, lo, encodebits, nblk, eofs;
132 if (1 << amortizedshift == 4 && lclusterbits <= 14)
134 else if (1 << amortizedshift == 2 && lclusterbits == 12)
139 /* it doesn't equal to round_up(..) */
140 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
141 (vcnt << amortizedshift);
142 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
143 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
144 eofs = erofs_blkoff(m->inode->i_sb, pos);
145 base = round_down(eofs, vcnt << amortizedshift);
146 in = m->kaddr + base;
148 i = (eofs - base) >> amortizedshift;
150 lo = decode_compactedbits(lclusterbits, lomask,
151 in, encodebits * i, &type);
153 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
154 m->clusterofs = 1 << lclusterbits;
156 /* figure out lookahead_distance: delta[1] if needed */
158 m->delta[1] = get_compacted_la_distance(lclusterbits,
159 encodebits, vcnt, in, i);
160 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
163 return -EFSCORRUPTED;
165 m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
168 } else if (i + 1 != (int)vcnt) {
173 * since the last lcluster in the pack is special,
174 * of which lo saves delta[1] rather than delta[0].
175 * Hence, get delta[0] by the previous lcluster indirectly.
177 lo = decode_compactedbits(lclusterbits, lomask,
178 in, encodebits * (i - 1), &type);
179 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
181 else if (lo & Z_EROFS_LI_D0_CBLKCNT)
183 m->delta[0] = lo + 1;
188 /* figout out blkaddr (pblk) for HEAD lclusters */
193 lo = decode_compactedbits(lclusterbits, lomask,
194 in, encodebits * i, &type);
195 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
205 lo = decode_compactedbits(lclusterbits, lomask,
206 in, encodebits * i, &type);
207 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
208 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
210 nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
213 /* bigpcluster shouldn't have plain d0 == 1 */
216 return -EFSCORRUPTED;
224 in += (vcnt << amortizedshift) - sizeof(__le32);
225 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
229 static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
230 unsigned long lcn, bool lookahead)
232 struct inode *const inode = m->inode;
233 struct erofs_inode *const vi = EROFS_I(inode);
234 const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
235 ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
236 unsigned int totalidx = erofs_iblks(inode);
237 unsigned int compacted_4b_initial, compacted_2b;
238 unsigned int amortizedshift;
245 /* used to align to 32-byte (compacted_2b) alignment */
246 compacted_4b_initial = (32 - ebase % 32) / 4;
247 if (compacted_4b_initial == 32 / 4)
248 compacted_4b_initial = 0;
250 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
251 compacted_4b_initial < totalidx)
252 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
257 if (lcn < compacted_4b_initial) {
261 pos += compacted_4b_initial * 4;
262 lcn -= compacted_4b_initial;
264 if (lcn < compacted_2b) {
268 pos += compacted_2b * 2;
272 pos += lcn * (1 << amortizedshift);
273 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
274 erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
275 if (IS_ERR(m->kaddr))
276 return PTR_ERR(m->kaddr);
277 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
280 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
281 unsigned int lcn, bool lookahead)
283 switch (EROFS_I(m->inode)->datalayout) {
284 case EROFS_INODE_COMPRESSED_FULL:
285 return z_erofs_load_full_lcluster(m, lcn);
286 case EROFS_INODE_COMPRESSED_COMPACT:
287 return z_erofs_load_compact_lcluster(m, lcn, lookahead);
293 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
294 unsigned int lookback_distance)
296 struct super_block *sb = m->inode->i_sb;
297 struct erofs_inode *const vi = EROFS_I(m->inode);
298 const unsigned int lclusterbits = vi->z_logical_clusterbits;
300 while (m->lcn >= lookback_distance) {
301 unsigned long lcn = m->lcn - lookback_distance;
304 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
309 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
310 lookback_distance = m->delta[0];
311 if (!lookback_distance)
314 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
315 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
316 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
317 m->headtype = m->type;
318 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
321 erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
322 m->type, lcn, vi->nid);
328 erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
329 lookback_distance, m->lcn, vi->nid);
331 return -EFSCORRUPTED;
334 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
335 unsigned int initial_lcn)
337 struct super_block *sb = m->inode->i_sb;
338 struct erofs_inode *const vi = EROFS_I(m->inode);
339 struct erofs_map_blocks *const map = m->map;
340 const unsigned int lclusterbits = vi->z_logical_clusterbits;
344 DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
345 m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
346 m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
347 DBG_BUGON(m->type != m->headtype);
349 if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
350 ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
351 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
352 ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
353 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
354 map->m_plen = 1ULL << lclusterbits;
358 if (m->compressedblks)
361 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
366 * If the 1st NONHEAD lcluster has already been handled initially w/o
367 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
368 * an internal implemenatation error is detected.
370 * The following code can also handle it properly anyway, but let's
371 * BUG_ON in the debugging mode only for developers to notice that.
373 DBG_BUGON(lcn == initial_lcn &&
374 m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
377 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
378 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
379 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
381 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
382 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
384 m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
386 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
387 if (m->delta[0] != 1)
388 goto err_bonus_cblkcnt;
389 if (m->compressedblks)
393 erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
396 return -EFSCORRUPTED;
399 map->m_plen = erofs_pos(sb, m->compressedblks);
402 erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
404 return -EFSCORRUPTED;
407 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
409 struct inode *inode = m->inode;
410 struct erofs_inode *vi = EROFS_I(inode);
411 struct erofs_map_blocks *map = m->map;
412 unsigned int lclusterbits = vi->z_logical_clusterbits;
413 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
417 /* handle the last EOF pcluster (no next HEAD lcluster) */
418 if ((lcn << lclusterbits) >= inode->i_size) {
419 map->m_llen = inode->i_size - map->m_la;
423 err = z_erofs_load_lcluster_from_disk(m, lcn, true);
427 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
428 DBG_BUGON(!m->delta[1] &&
429 m->clusterofs != 1 << lclusterbits);
430 } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
431 m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
432 m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
433 /* go on until the next HEAD lcluster */
438 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
439 m->type, lcn, vi->nid);
444 } while (m->delta[1]);
446 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
450 static int z_erofs_do_map_blocks(struct inode *inode,
451 struct erofs_map_blocks *map, int flags)
453 struct erofs_inode *const vi = EROFS_I(inode);
454 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
455 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
456 struct z_erofs_maprecorder m = {
461 unsigned int lclusterbits, endoff;
462 unsigned long initial_lcn;
463 unsigned long long ofs, end;
465 lclusterbits = vi->z_logical_clusterbits;
466 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
467 initial_lcn = ofs >> lclusterbits;
468 endoff = ofs & ((1 << lclusterbits) - 1);
470 err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
474 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
475 vi->z_idataoff = m.nextpackoff;
477 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
478 end = (m.lcn + 1ULL) << lclusterbits;
481 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
482 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
483 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
484 if (endoff >= m.clusterofs) {
486 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
488 * For ztailpacking files, in order to inline data more
489 * effectively, special EOF lclusters are now supported
490 * which can have three parts at most.
492 if (ztailpacking && end > inode->i_size)
496 /* m.lcn should be >= 1 if endoff < m.clusterofs */
498 erofs_err(inode->i_sb,
499 "invalid logical cluster 0 at nid %llu",
504 end = (m.lcn << lclusterbits) | m.clusterofs;
505 map->m_flags |= EROFS_MAP_FULL_MAPPED;
508 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
509 /* get the corresponding first chunk */
510 err = z_erofs_extent_lookback(&m, m.delta[0]);
515 erofs_err(inode->i_sb,
516 "unknown type %u @ offset %llu of nid %llu",
517 m.type, ofs, vi->nid);
522 map->m_flags |= EROFS_MAP_PARTIAL_REF;
523 map->m_llen = end - map->m_la;
525 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
526 vi->z_tailextent_headlcn = m.lcn;
527 /* for non-compact indexes, fragmentoff is 64 bits */
528 if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
529 vi->z_fragmentoff |= (u64)m.pblk << 32;
531 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
532 map->m_flags |= EROFS_MAP_META;
533 map->m_pa = vi->z_idataoff;
534 map->m_plen = vi->z_idata_size;
535 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
536 map->m_flags |= EROFS_MAP_FRAGMENT;
538 map->m_pa = erofs_pos(inode->i_sb, m.pblk);
539 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
544 if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
545 if (map->m_llen > map->m_plen) {
550 if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
551 map->m_algorithmformat =
552 Z_EROFS_COMPRESSION_INTERLACED;
554 map->m_algorithmformat =
555 Z_EROFS_COMPRESSION_SHIFTED;
556 } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
557 map->m_algorithmformat = vi->z_algorithmtype[1];
559 map->m_algorithmformat = vi->z_algorithmtype[0];
562 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
563 ((flags & EROFS_GET_BLOCKS_READMORE) &&
564 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
565 map->m_llen >= i_blocksize(inode))) {
566 err = z_erofs_get_extent_decompressedlen(&m);
568 map->m_flags |= EROFS_MAP_FULL_MAPPED;
572 erofs_unmap_metabuf(&m.map->buf);
576 static int z_erofs_fill_inode_lazy(struct inode *inode)
578 struct erofs_inode *const vi = EROFS_I(inode);
579 struct super_block *const sb = inode->i_sb;
582 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
584 struct z_erofs_map_header *h;
586 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
588 * paired with smp_mb() at the end of the function to ensure
589 * fields will only be observed after the bit is set.
595 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
599 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
602 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
603 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
605 err = PTR_ERR(kaddr);
609 h = kaddr + erofs_blkoff(sb, pos);
611 * if the highest bit of the 8-byte map header is set, the whole file
612 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
614 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
615 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
616 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
617 vi->z_tailextent_headlcn = 0;
620 vi->z_advise = le16_to_cpu(h->h_advise);
621 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
622 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
625 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
626 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
627 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
628 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
630 goto out_put_metabuf;
633 vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
634 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
635 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
636 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
637 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
640 goto out_put_metabuf;
642 if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
643 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
644 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
645 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
648 goto out_put_metabuf;
651 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
652 struct erofs_map_blocks map = {
653 .buf = __EROFS_BUF_INITIALIZER
656 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
657 err = z_erofs_do_map_blocks(inode, &map,
658 EROFS_GET_BLOCKS_FINDTAIL);
659 erofs_put_metabuf(&map.buf);
662 erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
663 erofs_err(sb, "invalid tail-packing pclustersize %llu",
668 goto out_put_metabuf;
671 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
672 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
673 struct erofs_map_blocks map = {
674 .buf = __EROFS_BUF_INITIALIZER
677 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
678 err = z_erofs_do_map_blocks(inode, &map,
679 EROFS_GET_BLOCKS_FINDTAIL);
680 erofs_put_metabuf(&map.buf);
682 goto out_put_metabuf;
685 /* paired with smp_mb() at the beginning of the function */
687 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
689 erofs_put_metabuf(&buf);
691 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
695 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
698 struct erofs_inode *const vi = EROFS_I(inode);
701 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
703 /* when trying to read beyond EOF, leave it unmapped */
704 if (map->m_la >= inode->i_size) {
705 map->m_llen = map->m_la + 1 - inode->i_size;
706 map->m_la = inode->i_size;
711 err = z_erofs_fill_inode_lazy(inode);
715 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
716 !vi->z_tailextent_headlcn) {
718 map->m_llen = inode->i_size;
719 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
724 err = z_erofs_do_map_blocks(inode, map, flags);
726 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
730 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
731 loff_t length, unsigned int flags,
732 struct iomap *iomap, struct iomap *srcmap)
735 struct erofs_map_blocks map = { .m_la = offset };
737 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
738 erofs_put_metabuf(&map.buf);
742 iomap->bdev = inode->i_sb->s_bdev;
743 iomap->offset = map.m_la;
744 iomap->length = map.m_llen;
745 if (map.m_flags & EROFS_MAP_MAPPED) {
746 iomap->type = IOMAP_MAPPED;
747 iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
748 IOMAP_NULL_ADDR : map.m_pa;
750 iomap->type = IOMAP_HOLE;
751 iomap->addr = IOMAP_NULL_ADDR;
753 * No strict rule on how to describe extents for post EOF, yet
754 * we need to do like below. Otherwise, iomap itself will get
755 * into an endless loop on post EOF.
757 * Calculate the effective offset by subtracting extent start
758 * (map.m_la) from the requested offset, and add it to length.
759 * (NB: offset >= map.m_la always)
761 if (iomap->offset >= inode->i_size)
762 iomap->length = length + offset - map.m_la;
768 const struct iomap_ops z_erofs_iomap_report_ops = {
769 .iomap_begin = z_erofs_iomap_begin_report,