1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <asm/unaligned.h>
8 #include <trace/events/erofs.h>
10 int z_erofs_fill_inode(struct inode *inode)
12 struct erofs_inode *const vi = EROFS_I(inode);
13 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
15 if (!erofs_sb_has_big_pcluster(sbi) &&
16 !erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) &&
17 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
19 vi->z_algorithmtype[0] = 0;
20 vi->z_algorithmtype[1] = 0;
21 vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
22 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
24 inode->i_mapping->a_ops = &z_erofs_aops;
28 struct z_erofs_maprecorder {
30 struct erofs_map_blocks *map;
34 /* compression extent information gathered */
38 erofs_blk_t pblk, compressedblks;
39 erofs_off_t nextpackoff;
43 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
46 struct inode *const inode = m->inode;
47 struct erofs_inode *const vi = EROFS_I(inode);
48 const erofs_off_t pos =
49 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(erofs_iloc(inode) +
50 vi->inode_isize + vi->xattr_isize) +
51 lcn * sizeof(struct z_erofs_vle_decompressed_index);
52 struct z_erofs_vle_decompressed_index *di;
53 unsigned int advise, type;
55 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
56 erofs_blknr(pos), EROFS_KMAP);
58 return PTR_ERR(m->kaddr);
60 m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index);
62 di = m->kaddr + erofs_blkoff(pos);
64 advise = le16_to_cpu(di->di_advise);
65 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
66 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
68 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
69 m->clusterofs = 1 << vi->z_logical_clusterbits;
70 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
71 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
72 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
73 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
77 m->compressedblks = m->delta[0] &
78 ~Z_EROFS_VLE_DI_D0_CBLKCNT;
81 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
83 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
84 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
85 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
86 if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
88 m->clusterofs = le16_to_cpu(di->di_clusterofs);
89 m->pblk = le32_to_cpu(di->di_u.blkaddr);
99 static unsigned int decode_compactedbits(unsigned int lobits,
101 u8 *in, unsigned int pos, u8 *type)
103 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
104 const unsigned int lo = v & lomask;
106 *type = (v >> lobits) & 3;
110 static int get_compacted_la_distance(unsigned int lclusterbits,
111 unsigned int encodebits,
112 unsigned int vcnt, u8 *in, int i)
114 const unsigned int lomask = (1 << lclusterbits) - 1;
115 unsigned int lo, d1 = 0;
118 DBG_BUGON(i >= vcnt);
121 lo = decode_compactedbits(lclusterbits, lomask,
122 in, encodebits * i, &type);
124 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
127 } while (++i < vcnt);
129 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
130 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
135 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
136 unsigned int amortizedshift,
137 erofs_off_t pos, bool lookahead)
139 struct erofs_inode *const vi = EROFS_I(m->inode);
140 const unsigned int lclusterbits = vi->z_logical_clusterbits;
141 const unsigned int lomask = (1 << lclusterbits) - 1;
142 unsigned int vcnt, base, lo, encodebits, nblk, eofs;
147 if (1 << amortizedshift == 4)
149 else if (1 << amortizedshift == 2 && lclusterbits == 12)
154 /* it doesn't equal to round_up(..) */
155 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
156 (vcnt << amortizedshift);
157 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
158 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
159 eofs = erofs_blkoff(pos);
160 base = round_down(eofs, vcnt << amortizedshift);
161 in = m->kaddr + base;
163 i = (eofs - base) >> amortizedshift;
165 lo = decode_compactedbits(lclusterbits, lomask,
166 in, encodebits * i, &type);
168 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
169 m->clusterofs = 1 << lclusterbits;
171 /* figure out lookahead_distance: delta[1] if needed */
173 m->delta[1] = get_compacted_la_distance(lclusterbits,
174 encodebits, vcnt, in, i);
175 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
178 return -EFSCORRUPTED;
180 m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
183 } else if (i + 1 != (int)vcnt) {
188 * since the last lcluster in the pack is special,
189 * of which lo saves delta[1] rather than delta[0].
190 * Hence, get delta[0] by the previous lcluster indirectly.
192 lo = decode_compactedbits(lclusterbits, lomask,
193 in, encodebits * (i - 1), &type);
194 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
196 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
198 m->delta[0] = lo + 1;
203 /* figout out blkaddr (pblk) for HEAD lclusters */
208 lo = decode_compactedbits(lclusterbits, lomask,
209 in, encodebits * i, &type);
210 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
220 lo = decode_compactedbits(lclusterbits, lomask,
221 in, encodebits * i, &type);
222 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
223 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
225 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
228 /* bigpcluster shouldn't have plain d0 == 1 */
231 return -EFSCORRUPTED;
239 in += (vcnt << amortizedshift) - sizeof(__le32);
240 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
244 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
245 unsigned long lcn, bool lookahead)
247 struct inode *const inode = m->inode;
248 struct erofs_inode *const vi = EROFS_I(inode);
249 const unsigned int lclusterbits = vi->z_logical_clusterbits;
250 const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
251 ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
252 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
253 unsigned int compacted_4b_initial, compacted_2b;
254 unsigned int amortizedshift;
257 if (lclusterbits != 12)
264 /* used to align to 32-byte (compacted_2b) alignment */
265 compacted_4b_initial = (32 - ebase % 32) / 4;
266 if (compacted_4b_initial == 32 / 4)
267 compacted_4b_initial = 0;
269 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
270 compacted_4b_initial < totalidx)
271 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
276 if (lcn < compacted_4b_initial) {
280 pos += compacted_4b_initial * 4;
281 lcn -= compacted_4b_initial;
283 if (lcn < compacted_2b) {
287 pos += compacted_2b * 2;
291 pos += lcn * (1 << amortizedshift);
292 m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
293 erofs_blknr(pos), EROFS_KMAP);
294 if (IS_ERR(m->kaddr))
295 return PTR_ERR(m->kaddr);
296 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
299 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
300 unsigned int lcn, bool lookahead)
302 const unsigned int datamode = EROFS_I(m->inode)->datalayout;
304 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
305 return legacy_load_cluster_from_disk(m, lcn);
307 if (datamode == EROFS_INODE_FLAT_COMPRESSION)
308 return compacted_load_cluster_from_disk(m, lcn, lookahead);
313 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
314 unsigned int lookback_distance)
316 struct erofs_inode *const vi = EROFS_I(m->inode);
317 const unsigned int lclusterbits = vi->z_logical_clusterbits;
319 while (m->lcn >= lookback_distance) {
320 unsigned long lcn = m->lcn - lookback_distance;
323 /* load extent head logical cluster if needed */
324 err = z_erofs_load_cluster_from_disk(m, lcn, false);
329 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
331 erofs_err(m->inode->i_sb,
332 "invalid lookback distance 0 @ nid %llu",
335 return -EFSCORRUPTED;
337 lookback_distance = m->delta[0];
339 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
340 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
341 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
342 m->headtype = m->type;
343 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
346 erofs_err(m->inode->i_sb,
347 "unknown type %u @ lcn %lu of nid %llu",
348 m->type, lcn, vi->nid);
354 erofs_err(m->inode->i_sb, "bogus lookback distance @ nid %llu",
357 return -EFSCORRUPTED;
360 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
361 unsigned int initial_lcn)
363 struct erofs_inode *const vi = EROFS_I(m->inode);
364 struct erofs_map_blocks *const map = m->map;
365 const unsigned int lclusterbits = vi->z_logical_clusterbits;
369 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
370 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 &&
371 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2);
372 DBG_BUGON(m->type != m->headtype);
374 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
375 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) &&
376 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
377 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) &&
378 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
379 map->m_plen = 1ULL << lclusterbits;
383 if (m->compressedblks)
386 err = z_erofs_load_cluster_from_disk(m, lcn, false);
391 * If the 1st NONHEAD lcluster has already been handled initially w/o
392 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
393 * an internal implemenatation error is detected.
395 * The following code can also handle it properly anyway, but let's
396 * BUG_ON in the debugging mode only for developers to notice that.
398 DBG_BUGON(lcn == initial_lcn &&
399 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
402 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
403 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
404 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
406 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
407 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
409 m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
411 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
412 if (m->delta[0] != 1)
413 goto err_bonus_cblkcnt;
414 if (m->compressedblks)
418 erofs_err(m->inode->i_sb,
419 "cannot found CBLKCNT @ lcn %lu of nid %llu",
422 return -EFSCORRUPTED;
425 map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
428 erofs_err(m->inode->i_sb,
429 "bogus CBLKCNT @ lcn %lu of nid %llu",
432 return -EFSCORRUPTED;
435 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
437 struct inode *inode = m->inode;
438 struct erofs_inode *vi = EROFS_I(inode);
439 struct erofs_map_blocks *map = m->map;
440 unsigned int lclusterbits = vi->z_logical_clusterbits;
441 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
445 /* handle the last EOF pcluster (no next HEAD lcluster) */
446 if ((lcn << lclusterbits) >= inode->i_size) {
447 map->m_llen = inode->i_size - map->m_la;
451 err = z_erofs_load_cluster_from_disk(m, lcn, true);
455 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
456 DBG_BUGON(!m->delta[1] &&
457 m->clusterofs != 1 << lclusterbits);
458 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
459 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ||
460 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
461 /* go on until the next HEAD lcluster */
466 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
467 m->type, lcn, vi->nid);
472 } while (m->delta[1]);
474 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
478 static int z_erofs_do_map_blocks(struct inode *inode,
479 struct erofs_map_blocks *map,
482 struct erofs_inode *const vi = EROFS_I(inode);
483 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
484 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
485 struct z_erofs_maprecorder m = {
490 unsigned int lclusterbits, endoff;
491 unsigned long initial_lcn;
492 unsigned long long ofs, end;
494 lclusterbits = vi->z_logical_clusterbits;
495 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
496 initial_lcn = ofs >> lclusterbits;
497 endoff = ofs & ((1 << lclusterbits) - 1);
499 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
503 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
504 vi->z_idataoff = m.nextpackoff;
506 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
507 end = (m.lcn + 1ULL) << lclusterbits;
510 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
511 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
512 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
513 if (endoff >= m.clusterofs) {
515 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
517 * For ztailpacking files, in order to inline data more
518 * effectively, special EOF lclusters are now supported
519 * which can have three parts at most.
521 if (ztailpacking && end > inode->i_size)
525 /* m.lcn should be >= 1 if endoff < m.clusterofs */
527 erofs_err(inode->i_sb,
528 "invalid logical cluster 0 at nid %llu",
533 end = (m.lcn << lclusterbits) | m.clusterofs;
534 map->m_flags |= EROFS_MAP_FULL_MAPPED;
537 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
538 /* get the corresponding first chunk */
539 err = z_erofs_extent_lookback(&m, m.delta[0]);
544 erofs_err(inode->i_sb,
545 "unknown type %u @ offset %llu of nid %llu",
546 m.type, ofs, vi->nid);
551 map->m_flags |= EROFS_MAP_PARTIAL_REF;
552 map->m_llen = end - map->m_la;
554 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
555 vi->z_tailextent_headlcn = m.lcn;
556 /* for non-compact indexes, fragmentoff is 64 bits */
558 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
559 vi->z_fragmentoff |= (u64)m.pblk << 32;
561 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
562 map->m_flags |= EROFS_MAP_META;
563 map->m_pa = vi->z_idataoff;
564 map->m_plen = vi->z_idata_size;
565 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
566 map->m_flags |= EROFS_MAP_FRAGMENT;
568 map->m_pa = blknr_to_addr(m.pblk);
569 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
574 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) {
575 if (map->m_llen > map->m_plen) {
580 if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
581 map->m_algorithmformat =
582 Z_EROFS_COMPRESSION_INTERLACED;
584 map->m_algorithmformat =
585 Z_EROFS_COMPRESSION_SHIFTED;
586 } else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
587 map->m_algorithmformat = vi->z_algorithmtype[1];
589 map->m_algorithmformat = vi->z_algorithmtype[0];
592 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
593 ((flags & EROFS_GET_BLOCKS_READMORE) &&
594 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
595 map->m_llen >= EROFS_BLKSIZ)) {
596 err = z_erofs_get_extent_decompressedlen(&m);
598 map->m_flags |= EROFS_MAP_FULL_MAPPED;
602 erofs_unmap_metabuf(&m.map->buf);
603 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
604 __func__, map->m_la, map->m_pa,
605 map->m_llen, map->m_plen, map->m_flags);
609 static int z_erofs_fill_inode_lazy(struct inode *inode)
611 struct erofs_inode *const vi = EROFS_I(inode);
612 struct super_block *const sb = inode->i_sb;
615 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
617 struct z_erofs_map_header *h;
619 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
621 * paired with smp_mb() at the end of the function to ensure
622 * fields will only be observed after the bit is set.
628 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
632 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
635 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
636 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
638 err = PTR_ERR(kaddr);
642 h = kaddr + erofs_blkoff(pos);
644 * if the highest bit of the 8-byte map header is set, the whole file
645 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
647 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
648 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
649 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
650 vi->z_tailextent_headlcn = 0;
653 vi->z_advise = le16_to_cpu(h->h_advise);
654 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
655 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
658 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
659 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
660 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
661 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
663 goto out_put_metabuf;
666 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
667 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
668 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
669 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
670 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
673 goto out_put_metabuf;
675 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
676 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
677 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
678 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
681 goto out_put_metabuf;
684 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
685 struct erofs_map_blocks map = {
686 .buf = __EROFS_BUF_INITIALIZER
689 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
690 err = z_erofs_do_map_blocks(inode, &map,
691 EROFS_GET_BLOCKS_FINDTAIL);
692 erofs_put_metabuf(&map.buf);
695 erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
696 erofs_err(sb, "invalid tail-packing pclustersize %llu",
701 goto out_put_metabuf;
704 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
705 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
706 struct erofs_map_blocks map = {
707 .buf = __EROFS_BUF_INITIALIZER
710 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
711 err = z_erofs_do_map_blocks(inode, &map,
712 EROFS_GET_BLOCKS_FINDTAIL);
713 erofs_put_metabuf(&map.buf);
715 goto out_put_metabuf;
718 /* paired with smp_mb() at the beginning of the function */
720 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
722 erofs_put_metabuf(&buf);
724 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
728 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
731 struct erofs_inode *const vi = EROFS_I(inode);
734 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
736 /* when trying to read beyond EOF, leave it unmapped */
737 if (map->m_la >= inode->i_size) {
738 map->m_llen = map->m_la + 1 - inode->i_size;
739 map->m_la = inode->i_size;
744 err = z_erofs_fill_inode_lazy(inode);
748 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
749 !vi->z_tailextent_headlcn) {
751 map->m_llen = inode->i_size;
752 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
757 err = z_erofs_do_map_blocks(inode, map, flags);
759 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
761 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
762 DBG_BUGON(err < 0 && err != -ENOMEM);
766 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
767 loff_t length, unsigned int flags,
768 struct iomap *iomap, struct iomap *srcmap)
771 struct erofs_map_blocks map = { .m_la = offset };
773 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
774 erofs_put_metabuf(&map.buf);
778 iomap->bdev = inode->i_sb->s_bdev;
779 iomap->offset = map.m_la;
780 iomap->length = map.m_llen;
781 if (map.m_flags & EROFS_MAP_MAPPED) {
782 iomap->type = IOMAP_MAPPED;
783 iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
784 IOMAP_NULL_ADDR : map.m_pa;
786 iomap->type = IOMAP_HOLE;
787 iomap->addr = IOMAP_NULL_ADDR;
789 * No strict rule on how to describe extents for post EOF, yet
790 * we need to do like below. Otherwise, iomap itself will get
791 * into an endless loop on post EOF.
793 * Calculate the effective offset by subtracting extent start
794 * (map.m_la) from the requested offset, and add it to length.
795 * (NB: offset >= map.m_la always)
797 if (iomap->offset >= inode->i_size)
798 iomap->length = length + offset - map.m_la;
804 const struct iomap_ops z_erofs_iomap_report_ops = {
805 .iomap_begin = z_erofs_iomap_begin_report,