1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2018 Red Hat, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
23 #include "xfs_ag_resv.h"
24 #include "xfs_health.h"
25 #include "xfs_error.h"
27 #include "xfs_defer.h"
28 #include "xfs_log_format.h"
29 #include "xfs_trans.h"
30 #include "xfs_trace.h"
31 #include "xfs_inode.h"
32 #include "xfs_icache.h"
36 * Passive reference counting access wrappers to the perag structures. If the
37 * per-ag structure is to be freed, the freeing code is responsible for cleaning
38 * up objects with passive references before freeing the structure. This is
39 * things like cached buffers.
46 struct xfs_perag *pag;
50 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
52 ASSERT(atomic_read(&pag->pag_ref) >= 0);
53 ref = atomic_inc_return(&pag->pag_ref);
56 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
61 * search from @first to find the next perag with the given tag set.
69 struct xfs_perag *pag;
74 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
75 (void **)&pag, first, 1, tag);
80 ref = atomic_inc_return(&pag->pag_ref);
82 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
88 struct xfs_perag *pag)
92 ASSERT(atomic_read(&pag->pag_ref) > 0);
93 ref = atomic_dec_return(&pag->pag_ref);
94 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
98 * Active references for perag structures. This is for short term access to the
99 * per ag structures for walking trees or accessing state. If an AG is being
100 * shrunk or is offline, then this will fail to find that AG and return NULL
105 struct xfs_mount *mp,
108 struct xfs_perag *pag;
111 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
113 trace_xfs_perag_grab(mp, pag->pag_agno,
114 atomic_read(&pag->pag_active_ref), _RET_IP_);
115 if (!atomic_inc_not_zero(&pag->pag_active_ref))
123 * search from @first to find the next perag with the given tag set.
127 struct xfs_mount *mp,
128 xfs_agnumber_t first,
131 struct xfs_perag *pag;
135 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
136 (void **)&pag, first, 1, tag);
141 trace_xfs_perag_grab_tag(mp, pag->pag_agno,
142 atomic_read(&pag->pag_active_ref), _RET_IP_);
143 if (!atomic_inc_not_zero(&pag->pag_active_ref))
151 struct xfs_perag *pag)
153 trace_xfs_perag_rele(pag->pag_mount, pag->pag_agno,
154 atomic_read(&pag->pag_active_ref), _RET_IP_);
155 if (atomic_dec_and_test(&pag->pag_active_ref))
156 wake_up(&pag->pag_active_wq);
160 * xfs_initialize_perag_data
162 * Read in each per-ag structure so we can count up the number of
163 * allocated inodes, free inodes and used filesystem blocks as this
164 * information is no longer persistent in the superblock. Once we have
165 * this information, write it into the in-core superblock structure.
168 xfs_initialize_perag_data(
169 struct xfs_mount *mp,
170 xfs_agnumber_t agcount)
172 xfs_agnumber_t index;
173 struct xfs_perag *pag;
174 struct xfs_sb *sbp = &mp->m_sb;
178 uint64_t bfreelst = 0;
183 for (index = 0; index < agcount; index++) {
185 * Read the AGF and AGI buffers to populate the per-ag
188 pag = xfs_perag_get(mp, index);
189 error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
191 error = xfs_ialloc_read_agi(pag, NULL, NULL);
197 ifree += pag->pagi_freecount;
198 ialloc += pag->pagi_count;
199 bfree += pag->pagf_freeblks;
200 bfreelst += pag->pagf_flcount;
201 btree += pag->pagf_btreeblks;
204 fdblocks = bfree + bfreelst + btree;
207 * If the new summary counts are obviously incorrect, fail the
208 * mount operation because that implies the AGFs are also corrupt.
209 * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
210 * will prevent xfs_repair from fixing anything.
212 if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
213 xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
214 error = -EFSCORRUPTED;
218 /* Overwrite incore superblock counters with just-read data */
219 spin_lock(&mp->m_sb_lock);
220 sbp->sb_ifree = ifree;
221 sbp->sb_icount = ialloc;
222 sbp->sb_fdblocks = fdblocks;
223 spin_unlock(&mp->m_sb_lock);
225 xfs_reinit_percpu_counters(mp);
227 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
233 struct rcu_head *head)
235 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
237 ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
242 * Free up the per-ag resources associated with the mount structure.
246 struct xfs_mount *mp)
248 struct xfs_perag *pag;
251 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
252 spin_lock(&mp->m_perag_lock);
253 pag = radix_tree_delete(&mp->m_perag_tree, agno);
254 spin_unlock(&mp->m_perag_lock);
256 XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
258 cancel_delayed_work_sync(&pag->pag_blockgc_work);
259 xfs_buf_hash_destroy(pag);
261 /* drop the mount's active reference */
263 XFS_IS_CORRUPT(pag->pag_mount,
264 atomic_read(&pag->pag_active_ref) != 0);
265 call_rcu(&pag->rcu_head, __xfs_free_perag);
269 /* Find the size of the AG, in blocks. */
271 __xfs_ag_block_count(
272 struct xfs_mount *mp,
274 xfs_agnumber_t agcount,
275 xfs_rfsblock_t dblocks)
277 ASSERT(agno < agcount);
279 if (agno < agcount - 1)
280 return mp->m_sb.sb_agblocks;
281 return dblocks - (agno * mp->m_sb.sb_agblocks);
286 struct xfs_mount *mp,
289 return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount,
290 mp->m_sb.sb_dblocks);
293 /* Calculate the first and last possible inode number in an AG. */
296 struct xfs_mount *mp,
304 * Calculate the first inode, which will be in the first
305 * cluster-aligned block after the AGFL.
307 bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
308 *first = XFS_AGB_TO_AGINO(mp, bno);
311 * Calculate the last inode, which will be at the end of the
312 * last (aligned) cluster that can be allocated in the AG.
314 bno = round_down(eoag, M_IGEO(mp)->cluster_align);
315 *last = XFS_AGB_TO_AGINO(mp, bno) - 1;
320 struct xfs_mount *mp,
325 return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
329 xfs_initialize_perag(
330 struct xfs_mount *mp,
331 xfs_agnumber_t agcount,
332 xfs_rfsblock_t dblocks,
333 xfs_agnumber_t *maxagi)
335 struct xfs_perag *pag;
336 xfs_agnumber_t index;
337 xfs_agnumber_t first_initialised = NULLAGNUMBER;
341 * Walk the current per-ag tree so we don't try to initialise AGs
342 * that already exist (growfs case). Allocate and insert all the
343 * AGs we don't find ready for initialisation.
345 for (index = 0; index < agcount; index++) {
346 pag = xfs_perag_get(mp, index);
352 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
355 goto out_unwind_new_pags;
357 pag->pag_agno = index;
360 error = radix_tree_preload(GFP_NOFS);
364 spin_lock(&mp->m_perag_lock);
365 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
367 spin_unlock(&mp->m_perag_lock);
368 radix_tree_preload_end();
372 spin_unlock(&mp->m_perag_lock);
373 radix_tree_preload_end();
376 /* Place kernel structure only init below this point. */
377 spin_lock_init(&pag->pag_ici_lock);
378 spin_lock_init(&pag->pagb_lock);
379 spin_lock_init(&pag->pag_state_lock);
380 INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
381 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
382 init_waitqueue_head(&pag->pagb_wait);
383 init_waitqueue_head(&pag->pag_active_wq);
385 pag->pagb_tree = RB_ROOT;
386 #endif /* __KERNEL__ */
388 error = xfs_buf_hash_init(pag);
392 /* Active ref owned by mount indicates AG is online. */
393 atomic_set(&pag->pag_active_ref, 1);
395 /* first new pag is fully initialized */
396 if (first_initialised == NULLAGNUMBER)
397 first_initialised = index;
400 * Pre-calculated geometry
402 pag->block_count = __xfs_ag_block_count(mp, index, agcount,
404 pag->min_block = XFS_AGFL_BLOCK(mp);
405 __xfs_agino_range(mp, pag->block_count, &pag->agino_min,
409 index = xfs_set_inode_alloc(mp, agcount);
414 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
418 radix_tree_delete(&mp->m_perag_tree, index);
422 /* unwind any prior newly initialized pags */
423 for (index = first_initialised; index < agcount; index++) {
424 pag = radix_tree_delete(&mp->m_perag_tree, index);
427 xfs_buf_hash_destroy(pag);
435 struct xfs_mount *mp,
438 struct xfs_buf **bpp,
439 const struct xfs_buf_ops *ops)
444 error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
448 bp->b_maps[0].bm_bn = blkno;
456 * Generic btree root block init function
460 struct xfs_mount *mp,
462 struct aghdr_init_data *id)
464 xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
467 /* Finish initializing a free space btree. */
469 xfs_freesp_init_recs(
470 struct xfs_mount *mp,
472 struct aghdr_init_data *id)
474 struct xfs_alloc_rec *arec;
475 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
477 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
478 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
480 if (xfs_ag_contains_log(mp, id->agno)) {
481 struct xfs_alloc_rec *nrec;
482 xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp,
483 mp->m_sb.sb_logstart);
485 ASSERT(start >= mp->m_ag_prealloc_blocks);
486 if (start != mp->m_ag_prealloc_blocks) {
488 * Modify first record to pad stripe align of log
490 arec->ar_blockcount = cpu_to_be32(start -
491 mp->m_ag_prealloc_blocks);
495 * Insert second record at start of internal log
496 * which then gets trimmed.
498 nrec->ar_startblock = cpu_to_be32(
499 be32_to_cpu(arec->ar_startblock) +
500 be32_to_cpu(arec->ar_blockcount));
502 be16_add_cpu(&block->bb_numrecs, 1);
505 * Change record start to after the internal log
507 be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
511 * Calculate the record block count and check for the case where
512 * the log might have consumed all available space in the AG. If
513 * so, reset the record count to 0 to avoid exposure of an invalid
514 * record start block.
516 arec->ar_blockcount = cpu_to_be32(id->agsize -
517 be32_to_cpu(arec->ar_startblock));
518 if (!arec->ar_blockcount)
519 block->bb_numrecs = 0;
523 * Alloc btree root block init functions
527 struct xfs_mount *mp,
529 struct aghdr_init_data *id)
531 xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
532 xfs_freesp_init_recs(mp, bp, id);
537 struct xfs_mount *mp,
539 struct aghdr_init_data *id)
541 xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
542 xfs_freesp_init_recs(mp, bp, id);
546 * Reverse map root block init
550 struct xfs_mount *mp,
552 struct aghdr_init_data *id)
554 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
555 struct xfs_rmap_rec *rrec;
557 xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
560 * mark the AG header regions as static metadata The BNO
561 * btree block is the first block after the headers, so
562 * it's location defines the size of region the static
565 * Note: unlike mkfs, we never have to account for log
566 * space when growing the data regions
568 rrec = XFS_RMAP_REC_ADDR(block, 1);
569 rrec->rm_startblock = 0;
570 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
571 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
574 /* account freespace btree root blocks */
575 rrec = XFS_RMAP_REC_ADDR(block, 2);
576 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
577 rrec->rm_blockcount = cpu_to_be32(2);
578 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
581 /* account inode btree root blocks */
582 rrec = XFS_RMAP_REC_ADDR(block, 3);
583 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
584 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
586 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
589 /* account for rmap btree root */
590 rrec = XFS_RMAP_REC_ADDR(block, 4);
591 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
592 rrec->rm_blockcount = cpu_to_be32(1);
593 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
596 /* account for refc btree root */
597 if (xfs_has_reflink(mp)) {
598 rrec = XFS_RMAP_REC_ADDR(block, 5);
599 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
600 rrec->rm_blockcount = cpu_to_be32(1);
601 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
603 be16_add_cpu(&block->bb_numrecs, 1);
606 /* account for the log space */
607 if (xfs_ag_contains_log(mp, id->agno)) {
608 rrec = XFS_RMAP_REC_ADDR(block,
609 be16_to_cpu(block->bb_numrecs) + 1);
610 rrec->rm_startblock = cpu_to_be32(
611 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
612 rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
613 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
615 be16_add_cpu(&block->bb_numrecs, 1);
620 * Initialise new secondary superblocks with the pre-grow geometry, but mark
621 * them as "in progress" so we know they haven't yet been activated. This will
622 * get cleared when the update with the new geometry information is done after
623 * changes to the primary are committed. This isn't strictly necessary, but we
624 * get it for free with the delayed buffer write lists and it means we can tell
625 * if a grow operation didn't complete properly after the fact.
629 struct xfs_mount *mp,
631 struct aghdr_init_data *id)
633 struct xfs_dsb *dsb = bp->b_addr;
635 xfs_sb_to_disk(dsb, &mp->m_sb);
636 dsb->sb_inprogress = 1;
641 struct xfs_mount *mp,
643 struct aghdr_init_data *id)
645 struct xfs_agf *agf = bp->b_addr;
646 xfs_extlen_t tmpsize;
648 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
649 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
650 agf->agf_seqno = cpu_to_be32(id->agno);
651 agf->agf_length = cpu_to_be32(id->agsize);
652 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
653 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
654 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
655 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
656 if (xfs_has_rmapbt(mp)) {
657 agf->agf_roots[XFS_BTNUM_RMAPi] =
658 cpu_to_be32(XFS_RMAP_BLOCK(mp));
659 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
660 agf->agf_rmap_blocks = cpu_to_be32(1);
663 agf->agf_flfirst = cpu_to_be32(1);
665 agf->agf_flcount = 0;
666 tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
667 agf->agf_freeblks = cpu_to_be32(tmpsize);
668 agf->agf_longest = cpu_to_be32(tmpsize);
670 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
671 if (xfs_has_reflink(mp)) {
672 agf->agf_refcount_root = cpu_to_be32(
674 agf->agf_refcount_level = cpu_to_be32(1);
675 agf->agf_refcount_blocks = cpu_to_be32(1);
678 if (xfs_ag_contains_log(mp, id->agno)) {
679 int64_t logblocks = mp->m_sb.sb_logblocks;
681 be32_add_cpu(&agf->agf_freeblks, -logblocks);
682 agf->agf_longest = cpu_to_be32(id->agsize -
683 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
689 struct xfs_mount *mp,
691 struct aghdr_init_data *id)
693 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
697 if (xfs_has_crc(mp)) {
698 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
699 agfl->agfl_seqno = cpu_to_be32(id->agno);
700 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
703 agfl_bno = xfs_buf_to_agfl_bno(bp);
704 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
705 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
710 struct xfs_mount *mp,
712 struct aghdr_init_data *id)
714 struct xfs_agi *agi = bp->b_addr;
717 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
718 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
719 agi->agi_seqno = cpu_to_be32(id->agno);
720 agi->agi_length = cpu_to_be32(id->agsize);
722 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
723 agi->agi_level = cpu_to_be32(1);
724 agi->agi_freecount = 0;
725 agi->agi_newino = cpu_to_be32(NULLAGINO);
726 agi->agi_dirino = cpu_to_be32(NULLAGINO);
728 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
729 if (xfs_has_finobt(mp)) {
730 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
731 agi->agi_free_level = cpu_to_be32(1);
733 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
734 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
735 if (xfs_has_inobtcounts(mp)) {
736 agi->agi_iblocks = cpu_to_be32(1);
737 if (xfs_has_finobt(mp))
738 agi->agi_fblocks = cpu_to_be32(1);
742 typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
743 struct aghdr_init_data *id);
746 struct xfs_mount *mp,
747 struct aghdr_init_data *id,
748 aghdr_init_work_f work,
749 const struct xfs_buf_ops *ops)
754 error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
760 xfs_buf_delwri_queue(bp, &id->buffer_list);
765 struct xfs_aghdr_grow_data {
768 const struct xfs_buf_ops *ops;
769 aghdr_init_work_f work;
775 * Prepare new AG headers to be written to disk. We use uncached buffers here,
776 * as it is assumed these new AG headers are currently beyond the currently
777 * valid filesystem address space. Using cached buffers would trip over EOFS
778 * corruption detection alogrithms in the buffer cache lookup routines.
780 * This is a non-transactional function, but the prepared buffers are added to a
781 * delayed write buffer list supplied by the caller so they can submit them to
782 * disk and wait on them as required.
786 struct xfs_mount *mp,
787 struct aghdr_init_data *id)
790 struct xfs_aghdr_grow_data aghdr_data[] = {
792 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
793 .numblks = XFS_FSS_TO_BB(mp, 1),
794 .ops = &xfs_sb_buf_ops,
795 .work = &xfs_sbblock_init,
799 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
800 .numblks = XFS_FSS_TO_BB(mp, 1),
801 .ops = &xfs_agf_buf_ops,
802 .work = &xfs_agfblock_init,
806 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
807 .numblks = XFS_FSS_TO_BB(mp, 1),
808 .ops = &xfs_agfl_buf_ops,
809 .work = &xfs_agflblock_init,
813 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
814 .numblks = XFS_FSS_TO_BB(mp, 1),
815 .ops = &xfs_agi_buf_ops,
816 .work = &xfs_agiblock_init,
819 { /* BNO root block */
820 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
821 .numblks = BTOBB(mp->m_sb.sb_blocksize),
822 .ops = &xfs_bnobt_buf_ops,
823 .work = &xfs_bnoroot_init,
826 { /* CNT root block */
827 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
828 .numblks = BTOBB(mp->m_sb.sb_blocksize),
829 .ops = &xfs_cntbt_buf_ops,
830 .work = &xfs_cntroot_init,
833 { /* INO root block */
834 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
835 .numblks = BTOBB(mp->m_sb.sb_blocksize),
836 .ops = &xfs_inobt_buf_ops,
837 .work = &xfs_btroot_init,
838 .type = XFS_BTNUM_INO,
841 { /* FINO root block */
842 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
843 .numblks = BTOBB(mp->m_sb.sb_blocksize),
844 .ops = &xfs_finobt_buf_ops,
845 .work = &xfs_btroot_init,
846 .type = XFS_BTNUM_FINO,
847 .need_init = xfs_has_finobt(mp)
849 { /* RMAP root block */
850 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
851 .numblks = BTOBB(mp->m_sb.sb_blocksize),
852 .ops = &xfs_rmapbt_buf_ops,
853 .work = &xfs_rmaproot_init,
854 .need_init = xfs_has_rmapbt(mp)
856 { /* REFC root block */
857 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
858 .numblks = BTOBB(mp->m_sb.sb_blocksize),
859 .ops = &xfs_refcountbt_buf_ops,
860 .work = &xfs_btroot_init,
861 .type = XFS_BTNUM_REFC,
862 .need_init = xfs_has_reflink(mp)
864 { /* NULL terminating block */
865 .daddr = XFS_BUF_DADDR_NULL,
868 struct xfs_aghdr_grow_data *dp;
871 /* Account for AG free space in new AG */
872 id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
873 for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
877 id->daddr = dp->daddr;
878 id->numblks = dp->numblks;
880 error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
889 struct xfs_perag *pag,
890 struct xfs_trans **tpp,
893 struct xfs_mount *mp = pag->pag_mount;
894 struct xfs_alloc_arg args = {
897 .type = XFS_ALLOCTYPE_THIS_BNO,
900 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
901 .resv = XFS_AG_RESV_NONE,
904 struct xfs_buf *agibp, *agfbp;
910 ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1);
911 error = xfs_ialloc_read_agi(pag, *tpp, &agibp);
917 error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp);
922 aglen = be32_to_cpu(agi->agi_length);
923 /* some extra paranoid checks before we shrink the ag */
924 if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length))
925 return -EFSCORRUPTED;
929 args.fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta);
932 * Make sure that the last inode cluster cannot overlap with the new
933 * end of the AG, even if it's sparse.
935 error = xfs_ialloc_check_shrink(*tpp, pag->pag_agno, agibp,
941 * Disable perag reservations so it doesn't cause the allocation request
942 * to fail. We'll reestablish reservation before we return.
944 error = xfs_ag_resv_free(pag);
948 /* internal log shouldn't also show up in the free space btrees */
949 error = xfs_alloc_vextent(&args);
950 if (!error && args.agbno == NULLAGBLOCK)
955 * if extent allocation fails, need to roll the transaction to
956 * ensure that the AGFL fixup has been committed anyway.
958 xfs_trans_bhold(*tpp, agfbp);
959 err2 = xfs_trans_roll(tpp);
962 xfs_trans_bjoin(*tpp, agfbp);
967 * if successfully deleted from freespace btrees, need to confirm
968 * per-AG reservation works as expected.
970 be32_add_cpu(&agi->agi_length, -delta);
971 be32_add_cpu(&agf->agf_length, -delta);
973 err2 = xfs_ag_resv_init(pag, *tpp);
975 be32_add_cpu(&agi->agi_length, delta);
976 be32_add_cpu(&agf->agf_length, delta);
980 __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, true);
983 * Roll the transaction before trying to re-init the per-ag
984 * reservation. The new transaction is clean so it will cancel
985 * without any side effects.
987 error = xfs_defer_finish(tpp);
994 xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
995 xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
999 err2 = xfs_ag_resv_init(pag, *tpp);
1003 xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2);
1004 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1009 * Extent the AG indicated by the @id by the length passed in
1012 xfs_ag_extend_space(
1013 struct xfs_perag *pag,
1014 struct xfs_trans *tp,
1018 struct xfs_agi *agi;
1019 struct xfs_agf *agf;
1022 ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1);
1024 error = xfs_ialloc_read_agi(pag, tp, &bp);
1029 be32_add_cpu(&agi->agi_length, len);
1030 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
1033 * Change agf length.
1035 error = xfs_alloc_read_agf(pag, tp, 0, &bp);
1040 be32_add_cpu(&agf->agf_length, len);
1041 ASSERT(agf->agf_length == agi->agi_length);
1042 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
1045 * Free the new space.
1047 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
1048 * this doesn't actually exist in the rmap btree.
1050 error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len,
1051 len, &XFS_RMAP_OINFO_SKIP_UPDATE);
1055 error = xfs_free_extent(tp, XFS_AGB_TO_FSB(pag->pag_mount, pag->pag_agno,
1056 be32_to_cpu(agf->agf_length) - len),
1057 len, &XFS_RMAP_OINFO_SKIP_UPDATE,
1062 /* Update perag geometry */
1063 pag->block_count = be32_to_cpu(agf->agf_length);
1064 __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
1069 /* Retrieve AG geometry. */
1071 xfs_ag_get_geometry(
1072 struct xfs_perag *pag,
1073 struct xfs_ag_geometry *ageo)
1075 struct xfs_buf *agi_bp;
1076 struct xfs_buf *agf_bp;
1077 struct xfs_agi *agi;
1078 struct xfs_agf *agf;
1079 unsigned int freeblks;
1082 /* Lock the AG headers. */
1083 error = xfs_ialloc_read_agi(pag, NULL, &agi_bp);
1086 error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
1090 /* Fill out form. */
1091 memset(ageo, 0, sizeof(*ageo));
1092 ageo->ag_number = pag->pag_agno;
1094 agi = agi_bp->b_addr;
1095 ageo->ag_icount = be32_to_cpu(agi->agi_count);
1096 ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
1098 agf = agf_bp->b_addr;
1099 ageo->ag_length = be32_to_cpu(agf->agf_length);
1100 freeblks = pag->pagf_freeblks +
1102 pag->pagf_btreeblks -
1103 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
1104 ageo->ag_freeblks = freeblks;
1105 xfs_ag_geom_health(pag, ageo);
1107 /* Release resources. */
1108 xfs_buf_relse(agf_bp);
1110 xfs_buf_relse(agi_bp);