1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_alloc.h"
15 #include "xfs_btree.h"
16 #include "xfs_btree_staging.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_trace.h"
20 #include "xfs_error.h"
21 #include "xfs_extent_busy.h"
23 #include "xfs_ag_resv.h"
25 static struct kmem_cache *xfs_rmapbt_cur_cache;
30 * This is a per-ag tree used to track the owner(s) of a given extent. With
31 * reflink it is possible for there to be multiple owners, which is a departure
32 * from classic XFS. Owner records for data extents are inserted when the
33 * extent is mapped and removed when an extent is unmapped. Owner records for
34 * all other block types (i.e. metadata) are inserted when an extent is
35 * allocated and removed when an extent is freed. There can only be one owner
36 * of a metadata extent, usually an inode or some other metadata structure like
39 * The rmap btree is part of the free space management, so blocks for the tree
40 * are sourced from the agfl. Hence we need transaction reservation support for
41 * this tree so that the freelist is always large enough. This also impacts on
42 * the minimum space we need to leave free in the AG.
44 * The tree is ordered by [ag block, owner, offset]. This is a large key size,
45 * but it is the only way to enforce unique keys when a block can be owned by
46 * multiple files at any offset. There's no need to order/search by extent
47 * size for online updating/management of the tree. It is intended that most
48 * reverse lookups will be to find the owner(s) of a particular block, or to
49 * try to recover tree and file data from corrupt primary metadata.
52 static struct xfs_btree_cur *
53 xfs_rmapbt_dup_cursor(
54 struct xfs_btree_cur *cur)
56 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
57 cur->bc_ag.agbp, cur->bc_ag.pag);
62 struct xfs_btree_cur *cur,
63 const union xfs_btree_ptr *ptr,
66 struct xfs_buf *agbp = cur->bc_ag.agbp;
67 struct xfs_agf *agf = agbp->b_addr;
68 int btnum = cur->bc_btnum;
72 agf->agf_roots[btnum] = ptr->s;
73 be32_add_cpu(&agf->agf_levels[btnum], inc);
74 cur->bc_ag.pag->pagf_levels[btnum] += inc;
76 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
80 xfs_rmapbt_alloc_block(
81 struct xfs_btree_cur *cur,
82 const union xfs_btree_ptr *start,
83 union xfs_btree_ptr *new,
86 struct xfs_buf *agbp = cur->bc_ag.agbp;
87 struct xfs_agf *agf = agbp->b_addr;
88 struct xfs_perag *pag = cur->bc_ag.pag;
92 /* Allocate the new block from the freelist. If we can't, give up. */
93 error = xfs_alloc_get_freelist(pag, cur->bc_tp, cur->bc_ag.agbp,
98 trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
99 if (bno == NULLAGBLOCK) {
104 xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
106 new->s = cpu_to_be32(bno);
107 be32_add_cpu(&agf->agf_rmap_blocks, 1);
108 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
110 xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
117 xfs_rmapbt_free_block(
118 struct xfs_btree_cur *cur,
121 struct xfs_buf *agbp = cur->bc_ag.agbp;
122 struct xfs_agf *agf = agbp->b_addr;
123 struct xfs_perag *pag = cur->bc_ag.pag;
127 bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
128 trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
130 be32_add_cpu(&agf->agf_rmap_blocks, -1);
131 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
132 error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
136 xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
137 XFS_EXTENT_BUSY_SKIP_DISCARD);
139 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
144 xfs_rmapbt_get_minrecs(
145 struct xfs_btree_cur *cur,
148 return cur->bc_mp->m_rmap_mnr[level != 0];
152 xfs_rmapbt_get_maxrecs(
153 struct xfs_btree_cur *cur,
156 return cur->bc_mp->m_rmap_mxr[level != 0];
160 * Convert the ondisk record's offset field into the ondisk key's offset field.
161 * Fork and bmbt are significant parts of the rmap record key, but written
162 * status is merely a record attribute.
164 static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec)
166 return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
170 xfs_rmapbt_init_key_from_rec(
171 union xfs_btree_key *key,
172 const union xfs_btree_rec *rec)
174 key->rmap.rm_startblock = rec->rmap.rm_startblock;
175 key->rmap.rm_owner = rec->rmap.rm_owner;
176 key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
180 * The high key for a reverse mapping record can be computed by shifting
181 * the startblock and offset to the highest value that would still map
182 * to that record. In practice this means that we add blockcount-1 to
183 * the startblock for all records, and if the record is for a data/attr
184 * fork mapping, we add blockcount-1 to the offset too.
187 xfs_rmapbt_init_high_key_from_rec(
188 union xfs_btree_key *key,
189 const union xfs_btree_rec *rec)
194 adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
196 key->rmap.rm_startblock = rec->rmap.rm_startblock;
197 be32_add_cpu(&key->rmap.rm_startblock, adj);
198 key->rmap.rm_owner = rec->rmap.rm_owner;
199 key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
200 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
201 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
203 off = be64_to_cpu(key->rmap.rm_offset);
204 off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
205 key->rmap.rm_offset = cpu_to_be64(off);
209 xfs_rmapbt_init_rec_from_cur(
210 struct xfs_btree_cur *cur,
211 union xfs_btree_rec *rec)
213 rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
214 rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
215 rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
216 rec->rmap.rm_offset = cpu_to_be64(
217 xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
221 xfs_rmapbt_init_ptr_from_cur(
222 struct xfs_btree_cur *cur,
223 union xfs_btree_ptr *ptr)
225 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
227 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
229 ptr->s = agf->agf_roots[cur->bc_btnum];
233 * Mask the appropriate parts of the ondisk key field for a key comparison.
234 * Fork and bmbt are significant parts of the rmap record key, but written
235 * status is merely a record attribute.
237 static inline uint64_t offset_keymask(uint64_t offset)
239 return offset & ~XFS_RMAP_OFF_UNWRITTEN;
244 struct xfs_btree_cur *cur,
245 const union xfs_btree_key *key)
247 struct xfs_rmap_irec *rec = &cur->bc_rec.r;
248 const struct xfs_rmap_key *kp = &key->rmap;
252 d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
256 x = be64_to_cpu(kp->rm_owner);
263 x = offset_keymask(be64_to_cpu(kp->rm_offset));
264 y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
273 xfs_rmapbt_diff_two_keys(
274 struct xfs_btree_cur *cur,
275 const union xfs_btree_key *k1,
276 const union xfs_btree_key *k2,
277 const union xfs_btree_key *mask)
279 const struct xfs_rmap_key *kp1 = &k1->rmap;
280 const struct xfs_rmap_key *kp2 = &k2->rmap;
284 /* Doesn't make sense to mask off the physical space part */
285 ASSERT(!mask || mask->rmap.rm_startblock);
287 d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
288 be32_to_cpu(kp2->rm_startblock);
292 if (!mask || mask->rmap.rm_owner) {
293 x = be64_to_cpu(kp1->rm_owner);
294 y = be64_to_cpu(kp2->rm_owner);
301 if (!mask || mask->rmap.rm_offset) {
302 /* Doesn't make sense to allow offset but not owner */
303 ASSERT(!mask || mask->rmap.rm_owner);
305 x = offset_keymask(be64_to_cpu(kp1->rm_offset));
306 y = offset_keymask(be64_to_cpu(kp2->rm_offset));
316 static xfs_failaddr_t
320 struct xfs_mount *mp = bp->b_mount;
321 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
322 struct xfs_perag *pag = bp->b_pag;
327 * magic number and level verification
329 * During growfs operations, we can't verify the exact level or owner as
330 * the perag is not fully initialised and hence not attached to the
331 * buffer. In this case, check against the maximum tree depth.
333 * Similarly, during log recovery we will have a perag structure
334 * attached, but the agf information will not yet have been initialised
335 * from the on disk AGF. Again, we can only check against maximum limits
338 if (!xfs_verify_magic(bp, block->bb_magic))
339 return __this_address;
341 if (!xfs_has_rmapbt(mp))
342 return __this_address;
343 fa = xfs_btree_sblock_v5hdr_verify(bp);
347 level = be16_to_cpu(block->bb_level);
348 if (pag && xfs_perag_initialised_agf(pag)) {
349 if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
350 return __this_address;
351 } else if (level >= mp->m_rmap_maxlevels)
352 return __this_address;
354 return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
358 xfs_rmapbt_read_verify(
363 if (!xfs_btree_sblock_verify_crc(bp))
364 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
366 fa = xfs_rmapbt_verify(bp);
368 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
372 trace_xfs_btree_corrupt(bp, _RET_IP_);
376 xfs_rmapbt_write_verify(
381 fa = xfs_rmapbt_verify(bp);
383 trace_xfs_btree_corrupt(bp, _RET_IP_);
384 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
387 xfs_btree_sblock_calc_crc(bp);
391 const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
392 .name = "xfs_rmapbt",
393 .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
394 .verify_read = xfs_rmapbt_read_verify,
395 .verify_write = xfs_rmapbt_write_verify,
396 .verify_struct = xfs_rmapbt_verify,
400 xfs_rmapbt_keys_inorder(
401 struct xfs_btree_cur *cur,
402 const union xfs_btree_key *k1,
403 const union xfs_btree_key *k2)
410 x = be32_to_cpu(k1->rmap.rm_startblock);
411 y = be32_to_cpu(k2->rmap.rm_startblock);
416 a = be64_to_cpu(k1->rmap.rm_owner);
417 b = be64_to_cpu(k2->rmap.rm_owner);
422 a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
423 b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset));
430 xfs_rmapbt_recs_inorder(
431 struct xfs_btree_cur *cur,
432 const union xfs_btree_rec *r1,
433 const union xfs_btree_rec *r2)
440 x = be32_to_cpu(r1->rmap.rm_startblock);
441 y = be32_to_cpu(r2->rmap.rm_startblock);
446 a = be64_to_cpu(r1->rmap.rm_owner);
447 b = be64_to_cpu(r2->rmap.rm_owner);
452 a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
453 b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset));
459 STATIC enum xbtree_key_contig
460 xfs_rmapbt_keys_contiguous(
461 struct xfs_btree_cur *cur,
462 const union xfs_btree_key *key1,
463 const union xfs_btree_key *key2,
464 const union xfs_btree_key *mask)
466 ASSERT(!mask || mask->rmap.rm_startblock);
469 * We only support checking contiguity of the physical space component.
470 * If any callers ever need more specificity than that, they'll have to
473 ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
475 return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
476 be32_to_cpu(key2->rmap.rm_startblock));
479 static const struct xfs_btree_ops xfs_rmapbt_ops = {
480 .rec_len = sizeof(struct xfs_rmap_rec),
481 .key_len = 2 * sizeof(struct xfs_rmap_key),
483 .dup_cursor = xfs_rmapbt_dup_cursor,
484 .set_root = xfs_rmapbt_set_root,
485 .alloc_block = xfs_rmapbt_alloc_block,
486 .free_block = xfs_rmapbt_free_block,
487 .get_minrecs = xfs_rmapbt_get_minrecs,
488 .get_maxrecs = xfs_rmapbt_get_maxrecs,
489 .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
490 .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
491 .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
492 .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
493 .key_diff = xfs_rmapbt_key_diff,
494 .buf_ops = &xfs_rmapbt_buf_ops,
495 .diff_two_keys = xfs_rmapbt_diff_two_keys,
496 .keys_inorder = xfs_rmapbt_keys_inorder,
497 .recs_inorder = xfs_rmapbt_recs_inorder,
498 .keys_contiguous = xfs_rmapbt_keys_contiguous,
501 static struct xfs_btree_cur *
502 xfs_rmapbt_init_common(
503 struct xfs_mount *mp,
504 struct xfs_trans *tp,
505 struct xfs_perag *pag)
507 struct xfs_btree_cur *cur;
509 /* Overlapping btree; 2 keys per pointer. */
510 cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
511 mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
512 cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
513 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
514 cur->bc_ops = &xfs_rmapbt_ops;
516 cur->bc_ag.pag = xfs_perag_hold(pag);
520 /* Create a new reverse mapping btree cursor. */
521 struct xfs_btree_cur *
522 xfs_rmapbt_init_cursor(
523 struct xfs_mount *mp,
524 struct xfs_trans *tp,
525 struct xfs_buf *agbp,
526 struct xfs_perag *pag)
528 struct xfs_agf *agf = agbp->b_addr;
529 struct xfs_btree_cur *cur;
531 cur = xfs_rmapbt_init_common(mp, tp, pag);
532 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
533 cur->bc_ag.agbp = agbp;
537 /* Create a new reverse mapping btree cursor with a fake root for staging. */
538 struct xfs_btree_cur *
539 xfs_rmapbt_stage_cursor(
540 struct xfs_mount *mp,
541 struct xbtree_afakeroot *afake,
542 struct xfs_perag *pag)
544 struct xfs_btree_cur *cur;
546 cur = xfs_rmapbt_init_common(mp, NULL, pag);
547 xfs_btree_stage_afakeroot(cur, afake);
552 * Install a new reverse mapping btree root. Caller is responsible for
553 * invalidating and freeing the old btree blocks.
556 xfs_rmapbt_commit_staged_btree(
557 struct xfs_btree_cur *cur,
558 struct xfs_trans *tp,
559 struct xfs_buf *agbp)
561 struct xfs_agf *agf = agbp->b_addr;
562 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
564 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
566 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
567 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
568 agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
569 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
570 XFS_AGF_RMAP_BLOCKS);
571 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
574 /* Calculate number of records in a reverse mapping btree block. */
575 static inline unsigned int
576 xfs_rmapbt_block_maxrecs(
577 unsigned int blocklen,
581 return blocklen / sizeof(struct xfs_rmap_rec);
583 (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
587 * Calculate number of records in an rmap btree block.
594 blocklen -= XFS_RMAP_BLOCK_LEN;
595 return xfs_rmapbt_block_maxrecs(blocklen, leaf);
598 /* Compute the max possible height for reverse mapping btrees. */
600 xfs_rmapbt_maxlevels_ondisk(void)
602 unsigned int minrecs[2];
603 unsigned int blocklen;
605 blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
607 minrecs[0] = xfs_rmapbt_block_maxrecs(blocklen, true) / 2;
608 minrecs[1] = xfs_rmapbt_block_maxrecs(blocklen, false) / 2;
611 * Compute the asymptotic maxlevels for an rmapbt on any reflink fs.
613 * On a reflink filesystem, each AG block can have up to 2^32 (per the
614 * refcount record format) owners, which means that theoretically we
615 * could face up to 2^64 rmap records. However, we're likely to run
616 * out of blocks in the AG long before that happens, which means that
617 * we must compute the max height based on what the btree will look
618 * like if it consumes almost all the blocks in the AG due to maximal
621 return xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS);
624 /* Compute the maximum height of an rmap btree. */
626 xfs_rmapbt_compute_maxlevels(
627 struct xfs_mount *mp)
629 if (!xfs_has_rmapbt(mp)) {
630 mp->m_rmap_maxlevels = 0;
634 if (xfs_has_reflink(mp)) {
636 * Compute the asymptotic maxlevels for an rmap btree on a
637 * filesystem that supports reflink.
639 * On a reflink filesystem, each AG block can have up to 2^32
640 * (per the refcount record format) owners, which means that
641 * theoretically we could face up to 2^64 rmap records.
642 * However, we're likely to run out of blocks in the AG long
643 * before that happens, which means that we must compute the
644 * max height based on what the btree will look like if it
645 * consumes almost all the blocks in the AG due to maximal
648 mp->m_rmap_maxlevels = xfs_btree_space_to_height(mp->m_rmap_mnr,
649 mp->m_sb.sb_agblocks);
652 * If there's no block sharing, compute the maximum rmapbt
653 * height assuming one rmap record per AG block.
655 mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
656 mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
658 ASSERT(mp->m_rmap_maxlevels <= xfs_rmapbt_maxlevels_ondisk());
661 /* Calculate the refcount btree size for some records. */
663 xfs_rmapbt_calc_size(
664 struct xfs_mount *mp,
665 unsigned long long len)
667 return xfs_btree_calc_size(mp->m_rmap_mnr, len);
671 * Calculate the maximum refcount btree size.
675 struct xfs_mount *mp,
676 xfs_agblock_t agblocks)
678 /* Bail out if we're uninitialized, which can happen in mkfs. */
679 if (mp->m_rmap_mxr[0] == 0)
682 return xfs_rmapbt_calc_size(mp, agblocks);
686 * Figure out how many blocks to reserve and how many are used by this btree.
689 xfs_rmapbt_calc_reserves(
690 struct xfs_mount *mp,
691 struct xfs_trans *tp,
692 struct xfs_perag *pag,
696 struct xfs_buf *agbp;
698 xfs_agblock_t agblocks;
699 xfs_extlen_t tree_len;
702 if (!xfs_has_rmapbt(mp))
705 error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
710 agblocks = be32_to_cpu(agf->agf_length);
711 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
712 xfs_trans_brelse(tp, agbp);
715 * The log is permanently allocated, so the space it occupies will
716 * never be available for the kinds of things that would require btree
717 * expansion. We therefore can pretend the space isn't there.
719 if (xfs_ag_contains_log(mp, pag->pag_agno))
720 agblocks -= mp->m_sb.sb_logblocks;
722 /* Reserve 1% of the AG or enough for 1 block per record. */
723 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
730 xfs_rmapbt_init_cur_cache(void)
732 xfs_rmapbt_cur_cache = kmem_cache_create("xfs_rmapbt_cur",
733 xfs_btree_cur_sizeof(xfs_rmapbt_maxlevels_ondisk()),
736 if (!xfs_rmapbt_cur_cache)
742 xfs_rmapbt_destroy_cur_cache(void)
744 kmem_cache_destroy(xfs_rmapbt_cur_cache);
745 xfs_rmapbt_cur_cache = NULL;