2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_extent_busy.h"
34 #include "xfs_errortag.h"
35 #include "xfs_error.h"
36 #include "xfs_cksum.h"
37 #include "xfs_trace.h"
38 #include "xfs_trans.h"
39 #include "xfs_buf_item.h"
41 #include "xfs_ag_resv.h"
44 extern kmem_zone_t *xfs_bmap_free_item_zone;
46 struct workqueue_struct *xfs_alloc_wq;
48 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
50 #define XFSA_FIXUP_BNO_OK 1
51 #define XFSA_FIXUP_CNT_OK 2
53 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
54 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
55 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
56 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
57 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
60 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
61 * the beginning of the block for a proper header with the location information
68 unsigned int size = mp->m_sb.sb_sectsize;
70 if (xfs_sb_version_hascrc(&mp->m_sb))
71 size -= sizeof(struct xfs_agfl);
73 return size / sizeof(xfs_agblock_t);
80 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
81 return XFS_RMAP_BLOCK(mp) + 1;
82 if (xfs_sb_version_hasfinobt(&mp->m_sb))
83 return XFS_FIBT_BLOCK(mp) + 1;
84 return XFS_IBT_BLOCK(mp) + 1;
91 if (xfs_sb_version_hasreflink(&mp->m_sb))
92 return xfs_refc_block(mp) + 1;
93 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
94 return XFS_RMAP_BLOCK(mp) + 1;
95 if (xfs_sb_version_hasfinobt(&mp->m_sb))
96 return XFS_FIBT_BLOCK(mp) + 1;
97 return XFS_IBT_BLOCK(mp) + 1;
101 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
102 * AGF buffer (PV 947395), we place constraints on the relationship among
103 * actual allocations for data blocks, freelist blocks, and potential file data
104 * bmap btree blocks. However, these restrictions may result in no actual space
105 * allocated for a delayed extent, for example, a data block in a certain AG is
106 * allocated but there is no additional block for the additional bmap btree
107 * block due to a split of the bmap btree of the file. The result of this may
108 * lead to an infinite loop when the file gets flushed to disk and all delayed
109 * extents need to be actually allocated. To get around this, we explicitly set
110 * aside a few blocks which will not be reserved in delayed allocation.
112 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
113 * potential split of the file's bmap btree.
117 struct xfs_mount *mp)
119 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
123 * When deciding how much space to allocate out of an AG, we limit the
124 * allocation maximum size to the size the AG. However, we cannot use all the
125 * blocks in the AG - some are permanently used by metadata. These
126 * blocks are generally:
127 * - the AG superblock, AGF, AGI and AGFL
128 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
129 * the AGI free inode and rmap btree root blocks.
130 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
131 * - the rmapbt root block
133 * The AG headers are sector sized, so the amount of space they take up is
134 * dependent on filesystem geometry. The others are all single blocks.
137 xfs_alloc_ag_max_usable(
138 struct xfs_mount *mp)
142 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
143 blocks += XFS_ALLOC_AGFL_RESERVE;
144 blocks += 3; /* AGF, AGI btree root blocks */
145 if (xfs_sb_version_hasfinobt(&mp->m_sb))
146 blocks++; /* finobt root block */
147 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
148 blocks++; /* rmap root block */
149 if (xfs_sb_version_hasreflink(&mp->m_sb))
150 blocks++; /* refcount root block */
152 return mp->m_sb.sb_agblocks - blocks;
156 * Lookup the record equal to [bno, len] in the btree given by cur.
158 STATIC int /* error */
160 struct xfs_btree_cur *cur, /* btree cursor */
161 xfs_agblock_t bno, /* starting block of extent */
162 xfs_extlen_t len, /* length of extent */
163 int *stat) /* success/failure */
165 cur->bc_rec.a.ar_startblock = bno;
166 cur->bc_rec.a.ar_blockcount = len;
167 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
171 * Lookup the first record greater than or equal to [bno, len]
172 * in the btree given by cur.
176 struct xfs_btree_cur *cur, /* btree cursor */
177 xfs_agblock_t bno, /* starting block of extent */
178 xfs_extlen_t len, /* length of extent */
179 int *stat) /* success/failure */
181 cur->bc_rec.a.ar_startblock = bno;
182 cur->bc_rec.a.ar_blockcount = len;
183 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
187 * Lookup the first record less than or equal to [bno, len]
188 * in the btree given by cur.
192 struct xfs_btree_cur *cur, /* btree cursor */
193 xfs_agblock_t bno, /* starting block of extent */
194 xfs_extlen_t len, /* length of extent */
195 int *stat) /* success/failure */
197 cur->bc_rec.a.ar_startblock = bno;
198 cur->bc_rec.a.ar_blockcount = len;
199 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
203 * Update the record referred to by cur to the value given
205 * This either works (return 0) or gets an EFSCORRUPTED error.
207 STATIC int /* error */
209 struct xfs_btree_cur *cur, /* btree cursor */
210 xfs_agblock_t bno, /* starting block of extent */
211 xfs_extlen_t len) /* length of extent */
213 union xfs_btree_rec rec;
215 rec.alloc.ar_startblock = cpu_to_be32(bno);
216 rec.alloc.ar_blockcount = cpu_to_be32(len);
217 return xfs_btree_update(cur, &rec);
221 * Get the data from the pointed-to record.
225 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t *bno, /* output: starting block of extent */
227 xfs_extlen_t *len, /* output: length of extent */
228 int *stat) /* output: success/failure */
230 union xfs_btree_rec *rec;
233 error = xfs_btree_get_rec(cur, &rec, stat);
234 if (error || !(*stat))
236 if (rec->alloc.ar_blockcount == 0)
237 return -EFSCORRUPTED;
239 *bno = be32_to_cpu(rec->alloc.ar_startblock);
240 *len = be32_to_cpu(rec->alloc.ar_blockcount);
246 * Compute aligned version of the found extent.
247 * Takes alignment and min length into account.
250 xfs_alloc_compute_aligned(
251 xfs_alloc_arg_t *args, /* allocation argument structure */
252 xfs_agblock_t foundbno, /* starting block in found extent */
253 xfs_extlen_t foundlen, /* length in found extent */
254 xfs_agblock_t *resbno, /* result block number */
255 xfs_extlen_t *reslen, /* result length */
258 xfs_agblock_t bno = foundbno;
259 xfs_extlen_t len = foundlen;
263 /* Trim busy sections out of found extent */
264 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
267 * If we have a largish extent that happens to start before min_agbno,
268 * see if we can shift it into range...
270 if (bno < args->min_agbno && bno + len > args->min_agbno) {
271 diff = args->min_agbno - bno;
278 if (args->alignment > 1 && len >= args->minlen) {
279 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
281 diff = aligned_bno - bno;
283 *resbno = aligned_bno;
284 *reslen = diff >= len ? 0 : len - diff;
294 * Compute best start block and diff for "near" allocations.
295 * freelen >= wantlen already checked by caller.
297 STATIC xfs_extlen_t /* difference value (absolute) */
298 xfs_alloc_compute_diff(
299 xfs_agblock_t wantbno, /* target starting block */
300 xfs_extlen_t wantlen, /* target length */
301 xfs_extlen_t alignment, /* target alignment */
302 int datatype, /* are we allocating data? */
303 xfs_agblock_t freebno, /* freespace's starting block */
304 xfs_extlen_t freelen, /* freespace's length */
305 xfs_agblock_t *newbnop) /* result: best start block from free */
307 xfs_agblock_t freeend; /* end of freespace extent */
308 xfs_agblock_t newbno1; /* return block number */
309 xfs_agblock_t newbno2; /* other new block number */
310 xfs_extlen_t newlen1=0; /* length with newbno1 */
311 xfs_extlen_t newlen2=0; /* length with newbno2 */
312 xfs_agblock_t wantend; /* end of target extent */
313 bool userdata = xfs_alloc_is_userdata(datatype);
315 ASSERT(freelen >= wantlen);
316 freeend = freebno + freelen;
317 wantend = wantbno + wantlen;
319 * We want to allocate from the start of a free extent if it is past
320 * the desired block or if we are allocating user data and the free
321 * extent is before desired block. The second case is there to allow
322 * for contiguous allocation from the remaining free space if the file
323 * grows in the short term.
325 if (freebno >= wantbno || (userdata && freeend < wantend)) {
326 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
327 newbno1 = NULLAGBLOCK;
328 } else if (freeend >= wantend && alignment > 1) {
329 newbno1 = roundup(wantbno, alignment);
330 newbno2 = newbno1 - alignment;
331 if (newbno1 >= freeend)
332 newbno1 = NULLAGBLOCK;
334 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
335 if (newbno2 < freebno)
336 newbno2 = NULLAGBLOCK;
338 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
339 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
340 if (newlen1 < newlen2 ||
341 (newlen1 == newlen2 &&
342 XFS_ABSDIFF(newbno1, wantbno) >
343 XFS_ABSDIFF(newbno2, wantbno)))
345 } else if (newbno2 != NULLAGBLOCK)
347 } else if (freeend >= wantend) {
349 } else if (alignment > 1) {
350 newbno1 = roundup(freeend - wantlen, alignment);
351 if (newbno1 > freeend - wantlen &&
352 newbno1 - alignment >= freebno)
353 newbno1 -= alignment;
354 else if (newbno1 >= freeend)
355 newbno1 = NULLAGBLOCK;
357 newbno1 = freeend - wantlen;
359 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
363 * Fix up the length, based on mod and prod.
364 * len should be k * prod + mod for some k.
365 * If len is too small it is returned unchanged.
366 * If len hits maxlen it is left alone.
370 xfs_alloc_arg_t *args) /* allocation argument structure */
375 ASSERT(args->mod < args->prod);
377 ASSERT(rlen >= args->minlen);
378 ASSERT(rlen <= args->maxlen);
379 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
380 (args->mod == 0 && rlen < args->prod))
382 k = rlen % args->prod;
386 rlen = rlen - (k - args->mod);
388 rlen = rlen - args->prod + (args->mod - k);
389 /* casts to (int) catch length underflows */
390 if ((int)rlen < (int)args->minlen)
392 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
393 ASSERT(rlen % args->prod == args->mod);
394 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
395 rlen + args->minleft);
400 * Update the two btrees, logically removing from freespace the extent
401 * starting at rbno, rlen blocks. The extent is contained within the
402 * actual (current) free extent fbno for flen blocks.
403 * Flags are passed in indicating whether the cursors are set to the
406 STATIC int /* error code */
407 xfs_alloc_fixup_trees(
408 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
409 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
410 xfs_agblock_t fbno, /* starting block of free extent */
411 xfs_extlen_t flen, /* length of free extent */
412 xfs_agblock_t rbno, /* starting block of returned extent */
413 xfs_extlen_t rlen, /* length of returned extent */
414 int flags) /* flags, XFSA_FIXUP_... */
416 int error; /* error code */
417 int i; /* operation results */
418 xfs_agblock_t nfbno1; /* first new free startblock */
419 xfs_agblock_t nfbno2; /* second new free startblock */
420 xfs_extlen_t nflen1=0; /* first new free length */
421 xfs_extlen_t nflen2=0; /* second new free length */
422 struct xfs_mount *mp;
427 * Look up the record in the by-size tree if necessary.
429 if (flags & XFSA_FIXUP_CNT_OK) {
431 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
433 XFS_WANT_CORRUPTED_RETURN(mp,
434 i == 1 && nfbno1 == fbno && nflen1 == flen);
437 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
439 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
442 * Look up the record in the by-block tree if necessary.
444 if (flags & XFSA_FIXUP_BNO_OK) {
446 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
448 XFS_WANT_CORRUPTED_RETURN(mp,
449 i == 1 && nfbno1 == fbno && nflen1 == flen);
452 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
454 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
458 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
459 struct xfs_btree_block *bnoblock;
460 struct xfs_btree_block *cntblock;
462 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
463 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
465 XFS_WANT_CORRUPTED_RETURN(mp,
466 bnoblock->bb_numrecs == cntblock->bb_numrecs);
471 * Deal with all four cases: the allocated record is contained
472 * within the freespace record, so we can have new freespace
473 * at either (or both) end, or no freespace remaining.
475 if (rbno == fbno && rlen == flen)
476 nfbno1 = nfbno2 = NULLAGBLOCK;
477 else if (rbno == fbno) {
478 nfbno1 = rbno + rlen;
479 nflen1 = flen - rlen;
480 nfbno2 = NULLAGBLOCK;
481 } else if (rbno + rlen == fbno + flen) {
483 nflen1 = flen - rlen;
484 nfbno2 = NULLAGBLOCK;
487 nflen1 = rbno - fbno;
488 nfbno2 = rbno + rlen;
489 nflen2 = (fbno + flen) - nfbno2;
492 * Delete the entry from the by-size btree.
494 if ((error = xfs_btree_delete(cnt_cur, &i)))
496 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
498 * Add new by-size btree entry(s).
500 if (nfbno1 != NULLAGBLOCK) {
501 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
503 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
504 if ((error = xfs_btree_insert(cnt_cur, &i)))
506 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
508 if (nfbno2 != NULLAGBLOCK) {
509 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
511 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
512 if ((error = xfs_btree_insert(cnt_cur, &i)))
514 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
517 * Fix up the by-block btree entry(s).
519 if (nfbno1 == NULLAGBLOCK) {
521 * No remaining freespace, just delete the by-block tree entry.
523 if ((error = xfs_btree_delete(bno_cur, &i)))
525 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
528 * Update the by-block entry to start later|be shorter.
530 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
533 if (nfbno2 != NULLAGBLOCK) {
535 * 2 resulting free entries, need to add one.
537 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
539 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
540 if ((error = xfs_btree_insert(bno_cur, &i)))
542 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
547 static xfs_failaddr_t
551 struct xfs_mount *mp = bp->b_target->bt_mount;
552 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
556 * There is no verification of non-crc AGFLs because mkfs does not
557 * initialise the AGFL to zero or NULL. Hence the only valid part of the
558 * AGFL is what the AGF says is active. We can't get to the AGF, so we
559 * can't verify just those entries are valid.
561 if (!xfs_sb_version_hascrc(&mp->m_sb))
564 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
565 return __this_address;
566 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
567 return __this_address;
569 * during growfs operations, the perag is not fully initialised,
570 * so we can't use it for any useful checking. growfs ensures we can't
571 * use it by using uncached buffers that don't have the perag attached
572 * so we can detect and avoid this problem.
574 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
575 return __this_address;
577 for (i = 0; i < xfs_agfl_size(mp); i++) {
578 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
579 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
580 return __this_address;
583 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
584 return __this_address;
589 xfs_agfl_read_verify(
592 struct xfs_mount *mp = bp->b_target->bt_mount;
596 * There is no verification of non-crc AGFLs because mkfs does not
597 * initialise the AGFL to zero or NULL. Hence the only valid part of the
598 * AGFL is what the AGF says is active. We can't get to the AGF, so we
599 * can't verify just those entries are valid.
601 if (!xfs_sb_version_hascrc(&mp->m_sb))
604 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
605 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
607 fa = xfs_agfl_verify(bp);
609 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
614 xfs_agfl_write_verify(
617 struct xfs_mount *mp = bp->b_target->bt_mount;
618 struct xfs_buf_log_item *bip = bp->b_log_item;
621 /* no verification of non-crc AGFLs */
622 if (!xfs_sb_version_hascrc(&mp->m_sb))
625 fa = xfs_agfl_verify(bp);
627 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
632 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
634 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
637 const struct xfs_buf_ops xfs_agfl_buf_ops = {
639 .verify_read = xfs_agfl_read_verify,
640 .verify_write = xfs_agfl_write_verify,
641 .verify_struct = xfs_agfl_verify,
645 * Read in the allocation group free block array.
649 xfs_mount_t *mp, /* mount point structure */
650 xfs_trans_t *tp, /* transaction pointer */
651 xfs_agnumber_t agno, /* allocation group number */
652 xfs_buf_t **bpp) /* buffer for the ag free block array */
654 xfs_buf_t *bp; /* return value */
657 ASSERT(agno != NULLAGNUMBER);
658 error = xfs_trans_read_buf(
659 mp, tp, mp->m_ddev_targp,
660 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
661 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
664 xfs_buf_set_ref(bp, XFS_AGFL_REF);
670 xfs_alloc_update_counters(
671 struct xfs_trans *tp,
672 struct xfs_perag *pag,
673 struct xfs_buf *agbp,
676 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
678 pag->pagf_freeblks += len;
679 be32_add_cpu(&agf->agf_freeblks, len);
681 xfs_trans_agblocks_delta(tp, len);
682 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
683 be32_to_cpu(agf->agf_length)))
684 return -EFSCORRUPTED;
686 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
691 * Allocation group level functions.
695 * Allocate a variable extent in the allocation group agno.
696 * Type and bno are used to determine where in the allocation group the
698 * Extent's length (returned in *len) will be between minlen and maxlen,
699 * and of the form k * prod + mod unless there's nothing that large.
700 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
702 STATIC int /* error */
703 xfs_alloc_ag_vextent(
704 xfs_alloc_arg_t *args) /* argument structure for allocation */
708 ASSERT(args->minlen > 0);
709 ASSERT(args->maxlen > 0);
710 ASSERT(args->minlen <= args->maxlen);
711 ASSERT(args->mod < args->prod);
712 ASSERT(args->alignment > 0);
715 * Branch to correct routine based on the type.
718 switch (args->type) {
719 case XFS_ALLOCTYPE_THIS_AG:
720 error = xfs_alloc_ag_vextent_size(args);
722 case XFS_ALLOCTYPE_NEAR_BNO:
723 error = xfs_alloc_ag_vextent_near(args);
725 case XFS_ALLOCTYPE_THIS_BNO:
726 error = xfs_alloc_ag_vextent_exact(args);
733 if (error || args->agbno == NULLAGBLOCK)
736 ASSERT(args->len >= args->minlen);
737 ASSERT(args->len <= args->maxlen);
738 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
739 ASSERT(args->agbno % args->alignment == 0);
741 /* if not file data, insert new block into the reverse map btree */
742 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
743 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
744 args->agbno, args->len, &args->oinfo);
749 if (!args->wasfromfl) {
750 error = xfs_alloc_update_counters(args->tp, args->pag,
752 -((long)(args->len)));
756 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
757 args->agbno, args->len));
760 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
762 XFS_STATS_INC(args->mp, xs_allocx);
763 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
768 * Allocate a variable extent at exactly agno/bno.
769 * Extent's length (returned in *len) will be between minlen and maxlen,
770 * and of the form k * prod + mod unless there's nothing that large.
771 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
773 STATIC int /* error */
774 xfs_alloc_ag_vextent_exact(
775 xfs_alloc_arg_t *args) /* allocation argument structure */
777 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
778 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
780 xfs_agblock_t fbno; /* start block of found extent */
781 xfs_extlen_t flen; /* length of found extent */
782 xfs_agblock_t tbno; /* start block of busy extent */
783 xfs_extlen_t tlen; /* length of busy extent */
784 xfs_agblock_t tend; /* end block of busy extent */
785 int i; /* success/failure of operation */
788 ASSERT(args->alignment == 1);
791 * Allocate/initialize a cursor for the by-number freespace btree.
793 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
794 args->agno, XFS_BTNUM_BNO);
797 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
798 * Look for the closest free block <= bno, it must contain bno
799 * if any free block does.
801 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
808 * Grab the freespace record.
810 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
813 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
814 ASSERT(fbno <= args->agbno);
817 * Check for overlapping busy extents.
821 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
824 * Give up if the start of the extent is busy, or the freespace isn't
825 * long enough for the minimum request.
827 if (tbno > args->agbno)
829 if (tlen < args->minlen)
832 if (tend < args->agbno + args->minlen)
836 * End of extent will be smaller of the freespace end and the
837 * maximal requested end.
839 * Fix the length according to mod and prod if given.
841 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
843 xfs_alloc_fix_len(args);
844 ASSERT(args->agbno + args->len <= tend);
847 * We are allocating agbno for args->len
848 * Allocate/initialize a cursor for the by-size btree.
850 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
851 args->agno, XFS_BTNUM_CNT);
852 ASSERT(args->agbno + args->len <=
853 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
854 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
855 args->len, XFSA_FIXUP_BNO_OK);
857 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
861 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
862 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
865 trace_xfs_alloc_exact_done(args);
869 /* Didn't find it, return null. */
870 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
871 args->agbno = NULLAGBLOCK;
872 trace_xfs_alloc_exact_notfound(args);
876 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
877 trace_xfs_alloc_exact_error(args);
882 * Search the btree in a given direction via the search cursor and compare
883 * the records found against the good extent we've already found.
886 xfs_alloc_find_best_extent(
887 struct xfs_alloc_arg *args, /* allocation argument structure */
888 struct xfs_btree_cur **gcur, /* good cursor */
889 struct xfs_btree_cur **scur, /* searching cursor */
890 xfs_agblock_t gdiff, /* difference for search comparison */
891 xfs_agblock_t *sbno, /* extent found by search */
892 xfs_extlen_t *slen, /* extent length */
893 xfs_agblock_t *sbnoa, /* aligned extent found by search */
894 xfs_extlen_t *slena, /* aligned extent length */
895 int dir) /* 0 = search right, 1 = search left */
903 /* The good extent is perfect, no need to search. */
908 * Look until we find a better one, run out of space or run off the end.
911 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
914 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
915 xfs_alloc_compute_aligned(args, *sbno, *slen,
916 sbnoa, slena, &busy_gen);
919 * The good extent is closer than this one.
922 if (*sbnoa > args->max_agbno)
924 if (*sbnoa >= args->agbno + gdiff)
927 if (*sbnoa < args->min_agbno)
929 if (*sbnoa <= args->agbno - gdiff)
934 * Same distance, compare length and pick the best.
936 if (*slena >= args->minlen) {
937 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
938 xfs_alloc_fix_len(args);
940 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
942 args->datatype, *sbnoa,
946 * Choose closer size and invalidate other cursor.
954 error = xfs_btree_increment(*scur, 0, &i);
956 error = xfs_btree_decrement(*scur, 0, &i);
962 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
967 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
972 /* caller invalidates cursors */
977 * Allocate a variable extent near bno in the allocation group agno.
978 * Extent's length (returned in len) will be between minlen and maxlen,
979 * and of the form k * prod + mod unless there's nothing that large.
980 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
982 STATIC int /* error */
983 xfs_alloc_ag_vextent_near(
984 xfs_alloc_arg_t *args) /* allocation argument structure */
986 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
987 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
988 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
989 xfs_agblock_t gtbno; /* start bno of right side entry */
990 xfs_agblock_t gtbnoa; /* aligned ... */
991 xfs_extlen_t gtdiff; /* difference to right side entry */
992 xfs_extlen_t gtlen; /* length of right side entry */
993 xfs_extlen_t gtlena; /* aligned ... */
994 xfs_agblock_t gtnew; /* useful start bno of right side */
995 int error; /* error code */
996 int i; /* result code, temporary */
997 int j; /* result code, temporary */
998 xfs_agblock_t ltbno; /* start bno of left side entry */
999 xfs_agblock_t ltbnoa; /* aligned ... */
1000 xfs_extlen_t ltdiff; /* difference to left side entry */
1001 xfs_extlen_t ltlen; /* length of left side entry */
1002 xfs_extlen_t ltlena; /* aligned ... */
1003 xfs_agblock_t ltnew; /* useful start bno of left side */
1004 xfs_extlen_t rlen; /* length of returned extent */
1009 * Randomly don't execute the first algorithm.
1011 int dofirst; /* set to do first algorithm */
1013 dofirst = prandom_u32() & 1;
1016 /* handle unitialized agbno range so caller doesn't have to */
1017 if (!args->min_agbno && !args->max_agbno)
1018 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1019 ASSERT(args->min_agbno <= args->max_agbno);
1021 /* clamp agbno to the range if it's outside */
1022 if (args->agbno < args->min_agbno)
1023 args->agbno = args->min_agbno;
1024 if (args->agbno > args->max_agbno)
1025 args->agbno = args->max_agbno;
1036 * Get a cursor for the by-size btree.
1038 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1039 args->agno, XFS_BTNUM_CNT);
1042 * See if there are any free extents as big as maxlen.
1044 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1047 * If none, then pick up the last entry in the tree unless the
1051 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
1054 if (i == 0 || ltlen == 0) {
1055 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1056 trace_xfs_alloc_near_noentry(args);
1061 args->wasfromfl = 0;
1065 * If the requested extent is large wrt the freespaces available
1066 * in this a.g., then the cursor will be pointing to a btree entry
1067 * near the right edge of the tree. If it's in the last btree leaf
1068 * block, then we just examine all the entries in that block
1069 * that are big enough, and pick the best one.
1070 * This is written as a while loop so we can break out of it,
1071 * but we never loop back to the top.
1073 while (xfs_btree_islastblock(cnt_cur, 0)) {
1076 xfs_extlen_t blen=0;
1077 xfs_agblock_t bnew=0;
1084 * Start from the entry that lookup found, sequence through
1085 * all larger free blocks. If we're actually pointing at a
1086 * record smaller than maxlen, go to the start of this block,
1087 * and skip all those smaller than minlen.
1089 if (ltlen || args->alignment > 1) {
1090 cnt_cur->bc_ptrs[0] = 1;
1092 if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
1095 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1096 if (ltlen >= args->minlen)
1098 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1101 ASSERT(ltlen >= args->minlen);
1105 i = cnt_cur->bc_ptrs[0];
1106 for (j = 1, blen = 0, bdiff = 0;
1107 !error && j && (blen < args->maxlen || bdiff > 0);
1108 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1110 * For each entry, decide if it's better than
1111 * the previous best entry.
1113 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1115 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1116 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1117 <bnoa, <lena, &busy_gen);
1118 if (ltlena < args->minlen)
1120 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1122 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1123 xfs_alloc_fix_len(args);
1124 ASSERT(args->len >= args->minlen);
1125 if (args->len < blen)
1127 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1128 args->alignment, args->datatype, ltbnoa,
1130 if (ltnew != NULLAGBLOCK &&
1131 (args->len > blen || ltdiff < bdiff)) {
1135 besti = cnt_cur->bc_ptrs[0];
1139 * It didn't work. We COULD be in a case where
1140 * there's a good record somewhere, so try again.
1145 * Point at the best entry, and retrieve it again.
1147 cnt_cur->bc_ptrs[0] = besti;
1148 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1150 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1151 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1155 * We are allocating starting at bnew for blen blocks.
1158 ASSERT(bnew >= ltbno);
1159 ASSERT(bnew + blen <= ltbno + ltlen);
1161 * Set up a cursor for the by-bno tree.
1163 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1164 args->agbp, args->agno, XFS_BTNUM_BNO);
1166 * Fix up the btree entries.
1168 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1169 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1171 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1172 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1174 trace_xfs_alloc_near_first(args);
1179 * Search in the by-bno tree to the left and to the right
1180 * simultaneously, until in each case we find a space big enough,
1181 * or run into the edge of the tree. When we run into the edge,
1182 * we deallocate that cursor.
1183 * If both searches succeed, we compare the two spaces and pick
1185 * With alignment, it's possible for both to fail; the upper
1186 * level algorithm that picks allocation groups for allocations
1187 * is not supposed to do this.
1190 * Allocate and initialize the cursor for the leftward search.
1192 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1193 args->agno, XFS_BTNUM_BNO);
1195 * Lookup <= bno to find the leftward search's starting point.
1197 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1201 * Didn't find anything; use this cursor for the rightward
1204 bno_cur_gt = bno_cur_lt;
1208 * Found something. Duplicate the cursor for the rightward search.
1210 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1213 * Increment the cursor, so we will point at the entry just right
1214 * of the leftward entry if any, or to the leftmost entry.
1216 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1220 * It failed, there are no rightward entries.
1222 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1226 * Loop going left with the leftward cursor, right with the
1227 * rightward cursor, until either both directions give up or
1228 * we find an entry at least as big as minlen.
1232 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
1234 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1235 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1236 <bnoa, <lena, &busy_gen);
1237 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1239 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1241 if (!i || ltbnoa < args->min_agbno) {
1242 xfs_btree_del_cursor(bno_cur_lt,
1248 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
1250 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1251 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1252 >bnoa, >lena, &busy_gen);
1253 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1255 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1257 if (!i || gtbnoa > args->max_agbno) {
1258 xfs_btree_del_cursor(bno_cur_gt,
1263 } while (bno_cur_lt || bno_cur_gt);
1266 * Got both cursors still active, need to find better entry.
1268 if (bno_cur_lt && bno_cur_gt) {
1269 if (ltlena >= args->minlen) {
1271 * Left side is good, look for a right side entry.
1273 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1274 xfs_alloc_fix_len(args);
1275 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1276 args->alignment, args->datatype, ltbnoa,
1279 error = xfs_alloc_find_best_extent(args,
1280 &bno_cur_lt, &bno_cur_gt,
1281 ltdiff, >bno, >len,
1283 0 /* search right */);
1285 ASSERT(gtlena >= args->minlen);
1288 * Right side is good, look for a left side entry.
1290 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1291 xfs_alloc_fix_len(args);
1292 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1293 args->alignment, args->datatype, gtbnoa,
1296 error = xfs_alloc_find_best_extent(args,
1297 &bno_cur_gt, &bno_cur_lt,
1298 gtdiff, <bno, <len,
1300 1 /* search left */);
1308 * If we couldn't get anything, give up.
1310 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1311 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1314 trace_xfs_alloc_near_busy(args);
1315 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1318 trace_xfs_alloc_size_neither(args);
1319 args->agbno = NULLAGBLOCK;
1324 * At this point we have selected a freespace entry, either to the
1325 * left or to the right. If it's on the right, copy all the
1326 * useful variables to the "left" set so we only have one
1327 * copy of this code.
1330 bno_cur_lt = bno_cur_gt;
1341 * Fix up the length and compute the useful address.
1343 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1344 xfs_alloc_fix_len(args);
1346 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1347 args->datatype, ltbnoa, ltlena, <new);
1348 ASSERT(ltnew >= ltbno);
1349 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1350 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1351 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1352 args->agbno = ltnew;
1354 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1355 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1359 trace_xfs_alloc_near_greater(args);
1361 trace_xfs_alloc_near_lesser(args);
1363 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1364 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1368 trace_xfs_alloc_near_error(args);
1369 if (cnt_cur != NULL)
1370 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1371 if (bno_cur_lt != NULL)
1372 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1373 if (bno_cur_gt != NULL)
1374 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1379 * Allocate a variable extent anywhere in the allocation group agno.
1380 * Extent's length (returned in len) will be between minlen and maxlen,
1381 * and of the form k * prod + mod unless there's nothing that large.
1382 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1384 STATIC int /* error */
1385 xfs_alloc_ag_vextent_size(
1386 xfs_alloc_arg_t *args) /* allocation argument structure */
1388 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1389 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1390 int error; /* error result */
1391 xfs_agblock_t fbno; /* start of found freespace */
1392 xfs_extlen_t flen; /* length of found freespace */
1393 int i; /* temp status variable */
1394 xfs_agblock_t rbno; /* returned block number */
1395 xfs_extlen_t rlen; /* length of returned extent */
1401 * Allocate and initialize a cursor for the by-size btree.
1403 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1404 args->agno, XFS_BTNUM_CNT);
1409 * Look for an entry >= maxlen+alignment-1 blocks.
1411 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1412 args->maxlen + args->alignment - 1, &i)))
1416 * If none then we have to settle for a smaller extent. In the case that
1417 * there are no large extents, this will return the last entry in the
1418 * tree unless the tree is empty. In the case that there are only busy
1419 * large extents, this will return the largest small extent unless there
1420 * are no smaller extents available.
1423 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1427 if (i == 0 || flen == 0) {
1428 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1429 trace_xfs_alloc_size_noentry(args);
1433 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1437 * Search for a non-busy extent that is large enough.
1440 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1443 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1445 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1446 &rbno, &rlen, &busy_gen);
1448 if (rlen >= args->maxlen)
1451 error = xfs_btree_increment(cnt_cur, 0, &i);
1456 * Our only valid extents must have been busy.
1457 * Make it unbusy by forcing the log out and
1460 xfs_btree_del_cursor(cnt_cur,
1462 trace_xfs_alloc_size_busy(args);
1463 xfs_extent_busy_flush(args->mp,
1464 args->pag, busy_gen);
1471 * In the first case above, we got the last entry in the
1472 * by-size btree. Now we check to see if the space hits maxlen
1473 * once aligned; if not, we search left for something better.
1474 * This can't happen in the second case above.
1476 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1477 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1478 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1479 if (rlen < args->maxlen) {
1480 xfs_agblock_t bestfbno;
1481 xfs_extlen_t bestflen;
1482 xfs_agblock_t bestrbno;
1483 xfs_extlen_t bestrlen;
1490 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1494 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1497 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1498 if (flen < bestrlen)
1500 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1501 &rbno, &rlen, &busy_gen);
1502 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1503 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1504 (rlen <= flen && rbno + rlen <= fbno + flen),
1506 if (rlen > bestrlen) {
1511 if (rlen == args->maxlen)
1515 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1518 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1524 args->wasfromfl = 0;
1526 * Fix up the length.
1529 if (rlen < args->minlen) {
1531 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1532 trace_xfs_alloc_size_busy(args);
1533 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1538 xfs_alloc_fix_len(args);
1541 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1543 * Allocate and initialize a cursor for the by-block tree.
1545 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1546 args->agno, XFS_BTNUM_BNO);
1547 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1548 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1550 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1551 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1552 cnt_cur = bno_cur = NULL;
1555 XFS_WANT_CORRUPTED_GOTO(args->mp,
1556 args->agbno + args->len <=
1557 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1559 trace_xfs_alloc_size_done(args);
1563 trace_xfs_alloc_size_error(args);
1565 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1567 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1571 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1572 trace_xfs_alloc_size_nominleft(args);
1573 args->agbno = NULLAGBLOCK;
1578 * Deal with the case where only small freespaces remain.
1579 * Either return the contents of the last freespace record,
1580 * or allocate space from the freelist if there is nothing in the tree.
1582 STATIC int /* error */
1583 xfs_alloc_ag_vextent_small(
1584 xfs_alloc_arg_t *args, /* allocation argument structure */
1585 xfs_btree_cur_t *ccur, /* by-size cursor */
1586 xfs_agblock_t *fbnop, /* result block number */
1587 xfs_extlen_t *flenp, /* result length */
1588 int *stat) /* status: 0-freelist, 1-normal/none */
1590 struct xfs_owner_info oinfo;
1596 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1599 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1601 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1604 * Nothing in the btree, try the freelist. Make sure
1605 * to respect minleft even when pulling from the
1608 else if (args->minlen == 1 && args->alignment == 1 &&
1609 args->resv != XFS_AG_RESV_AGFL &&
1610 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1612 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1615 if (fbno != NULLAGBLOCK) {
1616 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1617 xfs_alloc_allow_busy_reuse(args->datatype));
1619 if (xfs_alloc_is_userdata(args->datatype)) {
1622 bp = xfs_btree_get_bufs(args->mp, args->tp,
1623 args->agno, fbno, 0);
1625 error = -EFSCORRUPTED;
1628 xfs_trans_binval(args->tp, bp);
1632 XFS_WANT_CORRUPTED_GOTO(args->mp,
1633 args->agbno + args->len <=
1634 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1636 args->wasfromfl = 1;
1637 trace_xfs_alloc_small_freelist(args);
1640 * If we're feeding an AGFL block to something that
1641 * doesn't live in the free space, we need to clear
1642 * out the OWN_AG rmap.
1644 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1645 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1654 * Nothing in the freelist.
1660 * Can't allocate from the freelist for some reason.
1667 * Can't do the allocation, give up.
1669 if (flen < args->minlen) {
1670 args->agbno = NULLAGBLOCK;
1671 trace_xfs_alloc_small_notenough(args);
1677 trace_xfs_alloc_small_done(args);
1681 trace_xfs_alloc_small_error(args);
1686 * Free the extent starting at agno/bno for length.
1692 xfs_agnumber_t agno,
1695 struct xfs_owner_info *oinfo,
1696 enum xfs_ag_resv_type type)
1698 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1699 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1700 int error; /* error return value */
1701 xfs_agblock_t gtbno; /* start of right neighbor block */
1702 xfs_extlen_t gtlen; /* length of right neighbor block */
1703 int haveleft; /* have a left neighbor block */
1704 int haveright; /* have a right neighbor block */
1705 int i; /* temp, result code */
1706 xfs_agblock_t ltbno; /* start of left neighbor block */
1707 xfs_extlen_t ltlen; /* length of left neighbor block */
1708 xfs_mount_t *mp; /* mount point struct for filesystem */
1709 xfs_agblock_t nbno; /* new starting block of freespace */
1710 xfs_extlen_t nlen; /* new length of freespace */
1711 xfs_perag_t *pag; /* per allocation group data */
1713 bno_cur = cnt_cur = NULL;
1716 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1717 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1723 * Allocate and initialize a cursor for the by-block btree.
1725 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1727 * Look for a neighboring block on the left (lower block numbers)
1728 * that is contiguous with this space.
1730 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1734 * There is a block to our left.
1736 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1738 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1740 * It's not contiguous, though.
1742 if (ltbno + ltlen < bno)
1746 * If this failure happens the request to free this
1747 * space was invalid, it's (partly) already free.
1750 XFS_WANT_CORRUPTED_GOTO(mp,
1751 ltbno + ltlen <= bno, error0);
1755 * Look for a neighboring block on the right (higher block numbers)
1756 * that is contiguous with this space.
1758 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1762 * There is a block to our right.
1764 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1766 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1768 * It's not contiguous, though.
1770 if (bno + len < gtbno)
1774 * If this failure happens the request to free this
1775 * space was invalid, it's (partly) already free.
1778 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1782 * Now allocate and initialize a cursor for the by-size tree.
1784 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1786 * Have both left and right contiguous neighbors.
1787 * Merge all three into a single free block.
1789 if (haveleft && haveright) {
1791 * Delete the old by-size entry on the left.
1793 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1795 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1796 if ((error = xfs_btree_delete(cnt_cur, &i)))
1798 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1800 * Delete the old by-size entry on the right.
1802 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1804 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1805 if ((error = xfs_btree_delete(cnt_cur, &i)))
1807 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1809 * Delete the old by-block entry for the right block.
1811 if ((error = xfs_btree_delete(bno_cur, &i)))
1813 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1815 * Move the by-block cursor back to the left neighbor.
1817 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1819 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1822 * Check that this is the right record: delete didn't
1823 * mangle the cursor.
1826 xfs_agblock_t xxbno;
1829 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1832 XFS_WANT_CORRUPTED_GOTO(mp,
1833 i == 1 && xxbno == ltbno && xxlen == ltlen,
1838 * Update remaining by-block entry to the new, joined block.
1841 nlen = len + ltlen + gtlen;
1842 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1846 * Have only a left contiguous neighbor.
1847 * Merge it together with the new freespace.
1849 else if (haveleft) {
1851 * Delete the old by-size entry on the left.
1853 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1855 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1856 if ((error = xfs_btree_delete(cnt_cur, &i)))
1858 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1860 * Back up the by-block cursor to the left neighbor, and
1861 * update its length.
1863 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1865 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1868 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1872 * Have only a right contiguous neighbor.
1873 * Merge it together with the new freespace.
1875 else if (haveright) {
1877 * Delete the old by-size entry on the right.
1879 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1881 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1882 if ((error = xfs_btree_delete(cnt_cur, &i)))
1884 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1886 * Update the starting block and length of the right
1887 * neighbor in the by-block tree.
1891 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1895 * No contiguous neighbors.
1896 * Insert the new freespace into the by-block tree.
1901 if ((error = xfs_btree_insert(bno_cur, &i)))
1903 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1905 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1908 * In all cases we need to insert the new freespace in the by-size tree.
1910 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1912 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1913 if ((error = xfs_btree_insert(cnt_cur, &i)))
1915 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1916 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1920 * Update the freespace totals in the ag and superblock.
1922 pag = xfs_perag_get(mp, agno);
1923 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1924 xfs_ag_resv_free_extent(pag, type, tp, len);
1929 XFS_STATS_INC(mp, xs_freex);
1930 XFS_STATS_ADD(mp, xs_freeb, len);
1932 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
1937 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
1939 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1941 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1946 * Visible (exported) allocation/free functions.
1947 * Some of these are used just by xfs_alloc_btree.c and this file.
1951 * Compute and fill in value of m_ag_maxlevels.
1954 xfs_alloc_compute_maxlevels(
1955 xfs_mount_t *mp) /* file system mount structure */
1957 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
1958 (mp->m_sb.sb_agblocks + 1) / 2);
1962 * Find the length of the longest extent in an AG. The 'need' parameter
1963 * specifies how much space we're going to need for the AGFL and the
1964 * 'reserved' parameter tells us how many blocks in this AG are reserved for
1968 xfs_alloc_longest_free_extent(
1969 struct xfs_perag *pag,
1971 xfs_extlen_t reserved)
1973 xfs_extlen_t delta = 0;
1976 * If the AGFL needs a recharge, we'll have to subtract that from the
1979 if (need > pag->pagf_flcount)
1980 delta = need - pag->pagf_flcount;
1983 * If we cannot maintain others' reservations with space from the
1984 * not-longest freesp extents, we'll have to subtract /that/ from
1985 * the longest extent too.
1987 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1988 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1991 * If the longest extent is long enough to satisfy all the
1992 * reservations and AGFL rules in place, we can return this extent.
1994 if (pag->pagf_longest > delta)
1995 return pag->pagf_longest - delta;
1997 /* Otherwise, let the caller try for 1 block if there's space. */
1998 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2002 xfs_alloc_min_freelist(
2003 struct xfs_mount *mp,
2004 struct xfs_perag *pag)
2006 unsigned int min_free;
2008 /* space needed by-bno freespace btree */
2009 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2010 mp->m_ag_maxlevels);
2011 /* space needed by-size freespace btree */
2012 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2013 mp->m_ag_maxlevels);
2014 /* space needed reverse mapping used space btree */
2015 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2016 min_free += min_t(unsigned int,
2017 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2018 mp->m_rmap_maxlevels);
2024 * Check if the operation we are fixing up the freelist for should go ahead or
2025 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2026 * is dependent on whether the size and shape of free space available will
2027 * permit the requested allocation to take place.
2030 xfs_alloc_space_available(
2031 struct xfs_alloc_arg *args,
2032 xfs_extlen_t min_free,
2035 struct xfs_perag *pag = args->pag;
2036 xfs_extlen_t alloc_len, longest;
2037 xfs_extlen_t reservation; /* blocks that are still reserved */
2040 if (flags & XFS_ALLOC_FLAG_FREEING)
2043 reservation = xfs_ag_resv_needed(pag, args->resv);
2045 /* do we have enough contiguous free space for the allocation? */
2046 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2047 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2048 if (longest < alloc_len)
2051 /* do we have enough free space remaining for the allocation? */
2052 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2053 reservation - min_free - args->minleft);
2054 if (available < (int)max(args->total, alloc_len))
2058 * Clamp maxlen to the amount of free space available for the actual
2059 * extent allocation.
2061 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2062 args->maxlen = available;
2063 ASSERT(args->maxlen > 0);
2064 ASSERT(args->maxlen >= args->minlen);
2071 xfs_free_agfl_block(
2072 struct xfs_trans *tp,
2073 xfs_agnumber_t agno,
2074 xfs_agblock_t agbno,
2075 struct xfs_buf *agbp,
2076 struct xfs_owner_info *oinfo)
2081 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2086 bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno, 0);
2088 return -EFSCORRUPTED;
2089 xfs_trans_binval(tp, bp);
2095 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2096 * is to detect an agfl header padding mismatch between current and early v5
2097 * kernels. This problem manifests as a 1-slot size difference between the
2098 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2099 * may also catch variants of agfl count corruption unrelated to padding. Either
2100 * way, we'll reset the agfl and warn the user.
2102 * Return true if a reset is required before the agfl can be used, false
2106 xfs_agfl_needs_reset(
2107 struct xfs_mount *mp,
2108 struct xfs_agf *agf)
2110 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2111 uint32_t l = be32_to_cpu(agf->agf_fllast);
2112 uint32_t c = be32_to_cpu(agf->agf_flcount);
2113 int agfl_size = xfs_agfl_size(mp);
2116 /* no agfl header on v4 supers */
2117 if (!xfs_sb_version_hascrc(&mp->m_sb))
2121 * The agf read verifier catches severe corruption of these fields.
2122 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2123 * the verifier allows it.
2125 if (f >= agfl_size || l >= agfl_size)
2131 * Check consistency between the on-disk count and the active range. An
2132 * agfl padding mismatch manifests as an inconsistent flcount.
2137 active = agfl_size - f + l + 1;
2145 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2146 * agfl content cannot be trusted. Warn the user that a repair is required to
2147 * recover leaked blocks.
2149 * The purpose of this mechanism is to handle filesystems affected by the agfl
2150 * header padding mismatch problem. A reset keeps the filesystem online with a
2151 * relatively minor free space accounting inconsistency rather than suffer the
2152 * inevitable crash from use of an invalid agfl block.
2156 struct xfs_trans *tp,
2157 struct xfs_buf *agbp,
2158 struct xfs_perag *pag)
2160 struct xfs_mount *mp = tp->t_mountp;
2161 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
2163 ASSERT(pag->pagf_agflreset);
2164 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2167 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2168 "Please unmount and run xfs_repair.",
2169 pag->pag_agno, pag->pagf_flcount);
2171 agf->agf_flfirst = 0;
2172 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2173 agf->agf_flcount = 0;
2174 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2177 pag->pagf_flcount = 0;
2178 pag->pagf_agflreset = false;
2182 * Defer an AGFL block free. This is effectively equivalent to
2183 * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2185 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2186 * allocation operations in a transaction. AGFL frees are prone to this problem
2187 * because for one they are always freed one at a time. Further, an immediate
2188 * AGFL block free can cause a btree join and require another block free before
2189 * the real allocation can proceed. Deferring the free disconnects freeing up
2190 * the AGFL slot from freeing the block.
2193 xfs_defer_agfl_block(
2194 struct xfs_mount *mp,
2195 struct xfs_defer_ops *dfops,
2196 xfs_agnumber_t agno,
2197 xfs_fsblock_t agbno,
2198 struct xfs_owner_info *oinfo)
2200 struct xfs_extent_free_item *new; /* new element */
2202 ASSERT(xfs_bmap_free_item_zone != NULL);
2203 ASSERT(oinfo != NULL);
2205 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
2206 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2207 new->xefi_blockcount = 1;
2208 new->xefi_oinfo = *oinfo;
2210 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2212 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2216 * Decide whether to use this allocation group for this allocation.
2217 * If so, fix up the btree freelist's size.
2220 xfs_alloc_fix_freelist(
2221 struct xfs_alloc_arg *args, /* allocation argument structure */
2222 int flags) /* XFS_ALLOC_FLAG_... */
2224 struct xfs_mount *mp = args->mp;
2225 struct xfs_perag *pag = args->pag;
2226 struct xfs_trans *tp = args->tp;
2227 struct xfs_buf *agbp = NULL;
2228 struct xfs_buf *agflbp = NULL;
2229 struct xfs_alloc_arg targs; /* local allocation arguments */
2230 xfs_agblock_t bno; /* freelist block */
2231 xfs_extlen_t need; /* total blocks needed in freelist */
2234 if (!pag->pagf_init) {
2235 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2238 if (!pag->pagf_init) {
2239 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2240 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2241 goto out_agbp_relse;
2246 * If this is a metadata preferred pag and we are user data then try
2247 * somewhere else if we are not being asked to try harder at this
2250 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2251 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2252 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2253 goto out_agbp_relse;
2256 need = xfs_alloc_min_freelist(mp, pag);
2257 if (!xfs_alloc_space_available(args, need, flags |
2258 XFS_ALLOC_FLAG_CHECK))
2259 goto out_agbp_relse;
2262 * Get the a.g. freespace buffer.
2263 * Can fail if we're not blocking on locks, and it's held.
2266 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2270 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2271 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2276 /* reset a padding mismatched agfl before final free space check */
2277 if (pag->pagf_agflreset)
2278 xfs_agfl_reset(tp, agbp, pag);
2280 /* If there isn't enough total space or single-extent, reject it. */
2281 need = xfs_alloc_min_freelist(mp, pag);
2282 if (!xfs_alloc_space_available(args, need, flags))
2283 goto out_agbp_relse;
2286 * Make the freelist shorter if it's too long.
2288 * Note that from this point onwards, we will always release the agf and
2289 * agfl buffers on error. This handles the case where we error out and
2290 * the buffers are clean or may not have been joined to the transaction
2291 * and hence need to be released manually. If they have been joined to
2292 * the transaction, then xfs_trans_brelse() will handle them
2293 * appropriately based on the recursion count and dirty state of the
2296 * XXX (dgc): When we have lots of free space, does this buy us
2297 * anything other than extra overhead when we need to put more blocks
2298 * back on the free list? Maybe we should only do this when space is
2299 * getting low or the AGFL is more than half full?
2301 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2302 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2303 * updating the rmapbt. Both flags are used in xfs_repair while we're
2304 * rebuilding the rmapbt, and neither are used by the kernel. They're
2305 * both required to ensure that rmaps are correctly recorded for the
2306 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2307 * repair/rmap.c in xfsprogs for details.
2309 memset(&targs, 0, sizeof(targs));
2310 if (flags & XFS_ALLOC_FLAG_NORMAP)
2311 xfs_rmap_skip_owner_update(&targs.oinfo);
2313 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2314 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2315 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2317 goto out_agbp_relse;
2319 /* defer agfl frees if dfops is provided */
2320 if (tp->t_agfl_dfops) {
2321 xfs_defer_agfl_block(mp, tp->t_agfl_dfops, args->agno,
2324 error = xfs_free_agfl_block(tp, args->agno, bno, agbp,
2327 goto out_agbp_relse;
2334 targs.agno = args->agno;
2335 targs.alignment = targs.minlen = targs.prod = 1;
2336 targs.type = XFS_ALLOCTYPE_THIS_AG;
2338 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2340 goto out_agbp_relse;
2342 /* Make the freelist longer if it's too short. */
2343 while (pag->pagf_flcount < need) {
2345 targs.maxlen = need - pag->pagf_flcount;
2346 targs.resv = XFS_AG_RESV_AGFL;
2348 /* Allocate as many blocks as possible at once. */
2349 error = xfs_alloc_ag_vextent(&targs);
2351 goto out_agflbp_relse;
2354 * Stop if we run out. Won't happen if callers are obeying
2355 * the restrictions correctly. Can happen for free calls
2356 * on a completely full ag.
2358 if (targs.agbno == NULLAGBLOCK) {
2359 if (flags & XFS_ALLOC_FLAG_FREEING)
2361 goto out_agflbp_relse;
2364 * Put each allocated block on the list.
2366 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2367 error = xfs_alloc_put_freelist(tp, agbp,
2370 goto out_agflbp_relse;
2373 xfs_trans_brelse(tp, agflbp);
2378 xfs_trans_brelse(tp, agflbp);
2381 xfs_trans_brelse(tp, agbp);
2388 * Get a block from the freelist.
2389 * Returns with the buffer for the block gotten.
2392 xfs_alloc_get_freelist(
2393 xfs_trans_t *tp, /* transaction pointer */
2394 xfs_buf_t *agbp, /* buffer containing the agf structure */
2395 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2396 int btreeblk) /* destination is a AGF btree */
2398 xfs_agf_t *agf; /* a.g. freespace structure */
2399 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2400 xfs_agblock_t bno; /* block number returned */
2404 xfs_mount_t *mp = tp->t_mountp;
2405 xfs_perag_t *pag; /* per allocation group data */
2408 * Freelist is empty, give up.
2410 agf = XFS_BUF_TO_AGF(agbp);
2411 if (!agf->agf_flcount) {
2412 *bnop = NULLAGBLOCK;
2416 * Read the array of free blocks.
2418 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2425 * Get the block number and update the data structures.
2427 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2428 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2429 be32_add_cpu(&agf->agf_flfirst, 1);
2430 xfs_trans_brelse(tp, agflbp);
2431 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2432 agf->agf_flfirst = 0;
2434 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2435 ASSERT(!pag->pagf_agflreset);
2436 be32_add_cpu(&agf->agf_flcount, -1);
2437 xfs_trans_agflist_delta(tp, -1);
2438 pag->pagf_flcount--;
2441 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2443 be32_add_cpu(&agf->agf_btreeblks, 1);
2444 pag->pagf_btreeblks++;
2445 logflags |= XFS_AGF_BTREEBLKS;
2448 xfs_alloc_log_agf(tp, agbp, logflags);
2455 * Log the given fields from the agf structure.
2459 xfs_trans_t *tp, /* transaction pointer */
2460 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2461 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2463 int first; /* first byte offset */
2464 int last; /* last byte offset */
2465 static const short offsets[] = {
2466 offsetof(xfs_agf_t, agf_magicnum),
2467 offsetof(xfs_agf_t, agf_versionnum),
2468 offsetof(xfs_agf_t, agf_seqno),
2469 offsetof(xfs_agf_t, agf_length),
2470 offsetof(xfs_agf_t, agf_roots[0]),
2471 offsetof(xfs_agf_t, agf_levels[0]),
2472 offsetof(xfs_agf_t, agf_flfirst),
2473 offsetof(xfs_agf_t, agf_fllast),
2474 offsetof(xfs_agf_t, agf_flcount),
2475 offsetof(xfs_agf_t, agf_freeblks),
2476 offsetof(xfs_agf_t, agf_longest),
2477 offsetof(xfs_agf_t, agf_btreeblks),
2478 offsetof(xfs_agf_t, agf_uuid),
2479 offsetof(xfs_agf_t, agf_rmap_blocks),
2480 offsetof(xfs_agf_t, agf_refcount_blocks),
2481 offsetof(xfs_agf_t, agf_refcount_root),
2482 offsetof(xfs_agf_t, agf_refcount_level),
2483 /* needed so that we don't log the whole rest of the structure: */
2484 offsetof(xfs_agf_t, agf_spare64),
2488 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2490 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2492 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2493 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2497 * Interface for inode allocation to force the pag data to be initialized.
2500 xfs_alloc_pagf_init(
2501 xfs_mount_t *mp, /* file system mount structure */
2502 xfs_trans_t *tp, /* transaction pointer */
2503 xfs_agnumber_t agno, /* allocation group number */
2504 int flags) /* XFS_ALLOC_FLAGS_... */
2509 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2512 xfs_trans_brelse(tp, bp);
2517 * Put the block on the freelist for the allocation group.
2520 xfs_alloc_put_freelist(
2521 xfs_trans_t *tp, /* transaction pointer */
2522 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2523 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2524 xfs_agblock_t bno, /* block being freed */
2525 int btreeblk) /* block came from a AGF btree */
2527 xfs_agf_t *agf; /* a.g. freespace structure */
2528 __be32 *blockp;/* pointer to array entry */
2531 xfs_mount_t *mp; /* mount structure */
2532 xfs_perag_t *pag; /* per allocation group data */
2536 agf = XFS_BUF_TO_AGF(agbp);
2539 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2540 be32_to_cpu(agf->agf_seqno), &agflbp)))
2542 be32_add_cpu(&agf->agf_fllast, 1);
2543 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2544 agf->agf_fllast = 0;
2546 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2547 ASSERT(!pag->pagf_agflreset);
2548 be32_add_cpu(&agf->agf_flcount, 1);
2549 xfs_trans_agflist_delta(tp, 1);
2550 pag->pagf_flcount++;
2552 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2554 be32_add_cpu(&agf->agf_btreeblks, -1);
2555 pag->pagf_btreeblks--;
2556 logflags |= XFS_AGF_BTREEBLKS;
2560 xfs_alloc_log_agf(tp, agbp, logflags);
2562 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2564 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2565 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2566 *blockp = cpu_to_be32(bno);
2567 startoff = (char *)blockp - (char *)agflbp->b_addr;
2569 xfs_alloc_log_agf(tp, agbp, logflags);
2571 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2572 xfs_trans_log_buf(tp, agflbp, startoff,
2573 startoff + sizeof(xfs_agblock_t) - 1);
2577 static xfs_failaddr_t
2581 struct xfs_mount *mp = bp->b_target->bt_mount;
2582 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
2584 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2585 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2586 return __this_address;
2587 if (!xfs_log_check_lsn(mp,
2588 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2589 return __this_address;
2592 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2593 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2594 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2595 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2596 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2597 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2598 return __this_address;
2600 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2601 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2602 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2603 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2604 return __this_address;
2606 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2607 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2608 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2609 return __this_address;
2612 * during growfs operations, the perag is not fully initialised,
2613 * so we can't use it for any useful checking. growfs ensures we can't
2614 * use it by using uncached buffers that don't have the perag attached
2615 * so we can detect and avoid this problem.
2617 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2618 return __this_address;
2620 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2621 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2622 return __this_address;
2624 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2625 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2626 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2627 return __this_address;
2634 xfs_agf_read_verify(
2637 struct xfs_mount *mp = bp->b_target->bt_mount;
2640 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2641 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2642 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2644 fa = xfs_agf_verify(bp);
2645 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2646 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2651 xfs_agf_write_verify(
2654 struct xfs_mount *mp = bp->b_target->bt_mount;
2655 struct xfs_buf_log_item *bip = bp->b_log_item;
2658 fa = xfs_agf_verify(bp);
2660 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2664 if (!xfs_sb_version_hascrc(&mp->m_sb))
2668 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2670 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2673 const struct xfs_buf_ops xfs_agf_buf_ops = {
2675 .verify_read = xfs_agf_read_verify,
2676 .verify_write = xfs_agf_write_verify,
2677 .verify_struct = xfs_agf_verify,
2681 * Read in the allocation group header (free/alloc section).
2685 struct xfs_mount *mp, /* mount point structure */
2686 struct xfs_trans *tp, /* transaction pointer */
2687 xfs_agnumber_t agno, /* allocation group number */
2688 int flags, /* XFS_BUF_ */
2689 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2693 trace_xfs_read_agf(mp, agno);
2695 ASSERT(agno != NULLAGNUMBER);
2696 error = xfs_trans_read_buf(
2697 mp, tp, mp->m_ddev_targp,
2698 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2699 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2705 ASSERT(!(*bpp)->b_error);
2706 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2711 * Read in the allocation group header (free/alloc section).
2715 struct xfs_mount *mp, /* mount point structure */
2716 struct xfs_trans *tp, /* transaction pointer */
2717 xfs_agnumber_t agno, /* allocation group number */
2718 int flags, /* XFS_ALLOC_FLAG_... */
2719 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2721 struct xfs_agf *agf; /* ag freelist header */
2722 struct xfs_perag *pag; /* per allocation group data */
2725 trace_xfs_alloc_read_agf(mp, agno);
2727 ASSERT(agno != NULLAGNUMBER);
2728 error = xfs_read_agf(mp, tp, agno,
2729 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2735 ASSERT(!(*bpp)->b_error);
2737 agf = XFS_BUF_TO_AGF(*bpp);
2738 pag = xfs_perag_get(mp, agno);
2739 if (!pag->pagf_init) {
2740 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2741 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2742 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2743 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2744 pag->pagf_levels[XFS_BTNUM_BNOi] =
2745 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2746 pag->pagf_levels[XFS_BTNUM_CNTi] =
2747 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2748 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2749 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2750 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2751 spin_lock_init(&pag->pagb_lock);
2752 pag->pagb_count = 0;
2753 pag->pagb_tree = RB_ROOT;
2755 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2758 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2759 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2760 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2761 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2762 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2763 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2764 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2765 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2766 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2774 * Allocate an extent (variable-size).
2775 * Depending on the allocation type, we either look in a single allocation
2776 * group or loop over the allocation groups to find the result.
2780 xfs_alloc_arg_t *args) /* allocation argument structure */
2782 xfs_agblock_t agsize; /* allocation group size */
2784 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2785 xfs_mount_t *mp; /* mount structure pointer */
2786 xfs_agnumber_t sagno; /* starting allocation group number */
2787 xfs_alloctype_t type; /* input allocation type */
2789 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2792 type = args->otype = args->type;
2793 args->agbno = NULLAGBLOCK;
2795 * Just fix this up, for the case where the last a.g. is shorter
2796 * (or there's only one a.g.) and the caller couldn't easily figure
2797 * that out (xfs_bmap_alloc).
2799 agsize = mp->m_sb.sb_agblocks;
2800 if (args->maxlen > agsize)
2801 args->maxlen = agsize;
2802 if (args->alignment == 0)
2803 args->alignment = 1;
2804 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2805 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2806 ASSERT(args->minlen <= args->maxlen);
2807 ASSERT(args->minlen <= agsize);
2808 ASSERT(args->mod < args->prod);
2809 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2810 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2811 args->minlen > args->maxlen || args->minlen > agsize ||
2812 args->mod >= args->prod) {
2813 args->fsbno = NULLFSBLOCK;
2814 trace_xfs_alloc_vextent_badargs(args);
2819 case XFS_ALLOCTYPE_THIS_AG:
2820 case XFS_ALLOCTYPE_NEAR_BNO:
2821 case XFS_ALLOCTYPE_THIS_BNO:
2823 * These three force us into a single a.g.
2825 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2826 args->pag = xfs_perag_get(mp, args->agno);
2827 error = xfs_alloc_fix_freelist(args, 0);
2829 trace_xfs_alloc_vextent_nofix(args);
2833 trace_xfs_alloc_vextent_noagbp(args);
2836 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2837 if ((error = xfs_alloc_ag_vextent(args)))
2840 case XFS_ALLOCTYPE_START_BNO:
2842 * Try near allocation first, then anywhere-in-ag after
2843 * the first a.g. fails.
2845 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2846 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2847 args->fsbno = XFS_AGB_TO_FSB(mp,
2848 ((mp->m_agfrotor / rotorstep) %
2849 mp->m_sb.sb_agcount), 0);
2852 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2853 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2855 case XFS_ALLOCTYPE_FIRST_AG:
2857 * Rotate through the allocation groups looking for a winner.
2859 if (type == XFS_ALLOCTYPE_FIRST_AG) {
2861 * Start with allocation group given by bno.
2863 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2864 args->type = XFS_ALLOCTYPE_THIS_AG;
2869 * Start with the given allocation group.
2871 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2872 flags = XFS_ALLOC_FLAG_TRYLOCK;
2875 * Loop over allocation groups twice; first time with
2876 * trylock set, second time without.
2879 args->pag = xfs_perag_get(mp, args->agno);
2880 error = xfs_alloc_fix_freelist(args, flags);
2882 trace_xfs_alloc_vextent_nofix(args);
2886 * If we get a buffer back then the allocation will fly.
2889 if ((error = xfs_alloc_ag_vextent(args)))
2894 trace_xfs_alloc_vextent_loopfailed(args);
2897 * Didn't work, figure out the next iteration.
2899 if (args->agno == sagno &&
2900 type == XFS_ALLOCTYPE_START_BNO)
2901 args->type = XFS_ALLOCTYPE_THIS_AG;
2903 * For the first allocation, we can try any AG to get
2904 * space. However, if we already have allocated a
2905 * block, we don't want to try AGs whose number is below
2906 * sagno. Otherwise, we may end up with out-of-order
2907 * locking of AGF, which might cause deadlock.
2909 if (++(args->agno) == mp->m_sb.sb_agcount) {
2910 if (args->firstblock != NULLFSBLOCK)
2916 * Reached the starting a.g., must either be done
2917 * or switch to non-trylock mode.
2919 if (args->agno == sagno) {
2921 args->agbno = NULLAGBLOCK;
2922 trace_xfs_alloc_vextent_allfailed(args);
2927 if (type == XFS_ALLOCTYPE_START_BNO) {
2928 args->agbno = XFS_FSB_TO_AGBNO(mp,
2930 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2933 xfs_perag_put(args->pag);
2936 if (args->agno == sagno)
2937 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2938 (mp->m_sb.sb_agcount * rotorstep);
2940 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2941 (mp->m_sb.sb_agcount * rotorstep);
2948 if (args->agbno == NULLAGBLOCK)
2949 args->fsbno = NULLFSBLOCK;
2951 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2953 ASSERT(args->len >= args->minlen);
2954 ASSERT(args->len <= args->maxlen);
2955 ASSERT(args->agbno % args->alignment == 0);
2956 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2960 /* Zero the extent if we were asked to do so */
2961 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2962 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2968 xfs_perag_put(args->pag);
2971 xfs_perag_put(args->pag);
2975 /* Ensure that the freelist is at full capacity. */
2977 xfs_free_extent_fix_freelist(
2978 struct xfs_trans *tp,
2979 xfs_agnumber_t agno,
2980 struct xfs_buf **agbp)
2982 struct xfs_alloc_arg args;
2985 memset(&args, 0, sizeof(struct xfs_alloc_arg));
2987 args.mp = tp->t_mountp;
2991 * validate that the block number is legal - the enables us to detect
2992 * and handle a silent filesystem corruption rather than crashing.
2994 if (args.agno >= args.mp->m_sb.sb_agcount)
2995 return -EFSCORRUPTED;
2997 args.pag = xfs_perag_get(args.mp, args.agno);
3000 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3006 xfs_perag_put(args.pag);
3012 * Just break up the extent address and hand off to xfs_free_ag_extent
3013 * after fixing up the freelist.
3017 struct xfs_trans *tp, /* transaction pointer */
3018 xfs_fsblock_t bno, /* starting block number of extent */
3019 xfs_extlen_t len, /* length of extent */
3020 struct xfs_owner_info *oinfo, /* extent owner */
3021 enum xfs_ag_resv_type type, /* block reservation type */
3024 struct xfs_mount *mp = tp->t_mountp;
3025 struct xfs_buf *agbp;
3026 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
3027 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
3029 unsigned int busy_flags = 0;
3032 ASSERT(type != XFS_AG_RESV_AGFL);
3034 if (XFS_TEST_ERROR(false, mp,
3035 XFS_ERRTAG_FREE_EXTENT))
3038 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3042 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
3044 /* validate the extent size is legal now we have the agf locked */
3045 XFS_WANT_CORRUPTED_GOTO(mp,
3046 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
3049 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3054 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3055 xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3059 xfs_trans_brelse(tp, agbp);
3063 struct xfs_alloc_query_range_info {
3064 xfs_alloc_query_range_fn fn;
3068 /* Format btree record and pass to our callback. */
3070 xfs_alloc_query_range_helper(
3071 struct xfs_btree_cur *cur,
3072 union xfs_btree_rec *rec,
3075 struct xfs_alloc_query_range_info *query = priv;
3076 struct xfs_alloc_rec_incore irec;
3078 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3079 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3080 return query->fn(cur, &irec, query->priv);
3083 /* Find all free space within a given range of blocks. */
3085 xfs_alloc_query_range(
3086 struct xfs_btree_cur *cur,
3087 struct xfs_alloc_rec_incore *low_rec,
3088 struct xfs_alloc_rec_incore *high_rec,
3089 xfs_alloc_query_range_fn fn,
3092 union xfs_btree_irec low_brec;
3093 union xfs_btree_irec high_brec;
3094 struct xfs_alloc_query_range_info query;
3096 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3097 low_brec.a = *low_rec;
3098 high_brec.a = *high_rec;
3101 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3102 xfs_alloc_query_range_helper, &query);
3105 /* Find all free space records. */
3107 xfs_alloc_query_all(
3108 struct xfs_btree_cur *cur,
3109 xfs_alloc_query_range_fn fn,
3112 struct xfs_alloc_query_range_info query;
3114 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3117 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3120 /* Find the size of the AG, in blocks. */
3123 struct xfs_mount *mp,
3124 xfs_agnumber_t agno)
3126 ASSERT(agno < mp->m_sb.sb_agcount);
3128 if (agno < mp->m_sb.sb_agcount - 1)
3129 return mp->m_sb.sb_agblocks;
3130 return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
3134 * Verify that an AG block number pointer neither points outside the AG
3135 * nor points at static metadata.
3139 struct xfs_mount *mp,
3140 xfs_agnumber_t agno,
3141 xfs_agblock_t agbno)
3145 eoag = xfs_ag_block_count(mp, agno);
3148 if (agbno <= XFS_AGFL_BLOCK(mp))
3154 * Verify that an FS block number pointer neither points outside the
3155 * filesystem nor points at static AG metadata.
3159 struct xfs_mount *mp,
3160 xfs_fsblock_t fsbno)
3162 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
3164 if (agno >= mp->m_sb.sb_agcount)
3166 return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
3169 /* Is there a record covering a given extent? */
3171 xfs_alloc_has_record(
3172 struct xfs_btree_cur *cur,
3177 union xfs_btree_irec low;
3178 union xfs_btree_irec high;
3180 memset(&low, 0, sizeof(low));
3181 low.a.ar_startblock = bno;
3182 memset(&high, 0xFF, sizeof(high));
3183 high.a.ar_startblock = bno + len - 1;
3185 return xfs_btree_has_record(cur, &low, &high, exists);
3189 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
3190 * error code or XFS_BTREE_QUERY_RANGE_ABORT.
3194 struct xfs_mount *mp,
3195 struct xfs_agf *agf,
3196 struct xfs_buf *agflbp,
3197 xfs_agfl_walk_fn walk_fn,
3204 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3205 i = be32_to_cpu(agf->agf_flfirst);
3207 /* Nothing to walk in an empty AGFL. */
3208 if (agf->agf_flcount == cpu_to_be32(0))
3211 /* Otherwise, walk from first to last, wrapping as needed. */
3213 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3216 if (i == be32_to_cpu(agf->agf_fllast))
3218 if (++i == xfs_agfl_size(mp))