2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_extent_busy.h"
34 #include "xfs_errortag.h"
35 #include "xfs_error.h"
36 #include "xfs_cksum.h"
37 #include "xfs_trace.h"
38 #include "xfs_trans.h"
39 #include "xfs_buf_item.h"
41 #include "xfs_ag_resv.h"
44 extern kmem_zone_t *xfs_bmap_free_item_zone;
46 struct workqueue_struct *xfs_alloc_wq;
48 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
50 #define XFSA_FIXUP_BNO_OK 1
51 #define XFSA_FIXUP_CNT_OK 2
53 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
54 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
55 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
56 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
57 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
60 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
61 * the beginning of the block for a proper header with the location information
68 unsigned int size = mp->m_sb.sb_sectsize;
70 if (xfs_sb_version_hascrc(&mp->m_sb))
71 size -= sizeof(struct xfs_agfl);
73 return size / sizeof(xfs_agblock_t);
80 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
81 return XFS_RMAP_BLOCK(mp) + 1;
82 if (xfs_sb_version_hasfinobt(&mp->m_sb))
83 return XFS_FIBT_BLOCK(mp) + 1;
84 return XFS_IBT_BLOCK(mp) + 1;
91 if (xfs_sb_version_hasreflink(&mp->m_sb))
92 return xfs_refc_block(mp) + 1;
93 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
94 return XFS_RMAP_BLOCK(mp) + 1;
95 if (xfs_sb_version_hasfinobt(&mp->m_sb))
96 return XFS_FIBT_BLOCK(mp) + 1;
97 return XFS_IBT_BLOCK(mp) + 1;
101 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
102 * AGF buffer (PV 947395), we place constraints on the relationship among
103 * actual allocations for data blocks, freelist blocks, and potential file data
104 * bmap btree blocks. However, these restrictions may result in no actual space
105 * allocated for a delayed extent, for example, a data block in a certain AG is
106 * allocated but there is no additional block for the additional bmap btree
107 * block due to a split of the bmap btree of the file. The result of this may
108 * lead to an infinite loop when the file gets flushed to disk and all delayed
109 * extents need to be actually allocated. To get around this, we explicitly set
110 * aside a few blocks which will not be reserved in delayed allocation.
112 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
113 * potential split of the file's bmap btree.
117 struct xfs_mount *mp)
119 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
123 * When deciding how much space to allocate out of an AG, we limit the
124 * allocation maximum size to the size the AG. However, we cannot use all the
125 * blocks in the AG - some are permanently used by metadata. These
126 * blocks are generally:
127 * - the AG superblock, AGF, AGI and AGFL
128 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
129 * the AGI free inode and rmap btree root blocks.
130 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
131 * - the rmapbt root block
133 * The AG headers are sector sized, so the amount of space they take up is
134 * dependent on filesystem geometry. The others are all single blocks.
137 xfs_alloc_ag_max_usable(
138 struct xfs_mount *mp)
142 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
143 blocks += XFS_ALLOC_AGFL_RESERVE;
144 blocks += 3; /* AGF, AGI btree root blocks */
145 if (xfs_sb_version_hasfinobt(&mp->m_sb))
146 blocks++; /* finobt root block */
147 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
148 blocks++; /* rmap root block */
149 if (xfs_sb_version_hasreflink(&mp->m_sb))
150 blocks++; /* refcount root block */
152 return mp->m_sb.sb_agblocks - blocks;
156 * Lookup the record equal to [bno, len] in the btree given by cur.
158 STATIC int /* error */
160 struct xfs_btree_cur *cur, /* btree cursor */
161 xfs_agblock_t bno, /* starting block of extent */
162 xfs_extlen_t len, /* length of extent */
163 int *stat) /* success/failure */
165 cur->bc_rec.a.ar_startblock = bno;
166 cur->bc_rec.a.ar_blockcount = len;
167 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
171 * Lookup the first record greater than or equal to [bno, len]
172 * in the btree given by cur.
176 struct xfs_btree_cur *cur, /* btree cursor */
177 xfs_agblock_t bno, /* starting block of extent */
178 xfs_extlen_t len, /* length of extent */
179 int *stat) /* success/failure */
181 cur->bc_rec.a.ar_startblock = bno;
182 cur->bc_rec.a.ar_blockcount = len;
183 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
187 * Lookup the first record less than or equal to [bno, len]
188 * in the btree given by cur.
192 struct xfs_btree_cur *cur, /* btree cursor */
193 xfs_agblock_t bno, /* starting block of extent */
194 xfs_extlen_t len, /* length of extent */
195 int *stat) /* success/failure */
197 cur->bc_rec.a.ar_startblock = bno;
198 cur->bc_rec.a.ar_blockcount = len;
199 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
203 * Update the record referred to by cur to the value given
205 * This either works (return 0) or gets an EFSCORRUPTED error.
207 STATIC int /* error */
209 struct xfs_btree_cur *cur, /* btree cursor */
210 xfs_agblock_t bno, /* starting block of extent */
211 xfs_extlen_t len) /* length of extent */
213 union xfs_btree_rec rec;
215 rec.alloc.ar_startblock = cpu_to_be32(bno);
216 rec.alloc.ar_blockcount = cpu_to_be32(len);
217 return xfs_btree_update(cur, &rec);
221 * Get the data from the pointed-to record.
225 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t *bno, /* output: starting block of extent */
227 xfs_extlen_t *len, /* output: length of extent */
228 int *stat) /* output: success/failure */
230 struct xfs_mount *mp = cur->bc_mp;
231 xfs_agnumber_t agno = cur->bc_private.a.agno;
232 union xfs_btree_rec *rec;
235 error = xfs_btree_get_rec(cur, &rec, stat);
236 if (error || !(*stat))
238 if (rec->alloc.ar_blockcount == 0)
241 *bno = be32_to_cpu(rec->alloc.ar_startblock);
242 *len = be32_to_cpu(rec->alloc.ar_blockcount);
244 /* check for valid extent range, including overflow */
245 if (!xfs_verify_agbno(mp, agno, *bno))
247 if (*bno > *bno + *len)
249 if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
256 "%s Freespace BTree record corruption in AG %d detected!",
257 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
259 "start block 0x%x block count 0x%x", *bno, *len);
260 return -EFSCORRUPTED;
264 * Compute aligned version of the found extent.
265 * Takes alignment and min length into account.
268 xfs_alloc_compute_aligned(
269 xfs_alloc_arg_t *args, /* allocation argument structure */
270 xfs_agblock_t foundbno, /* starting block in found extent */
271 xfs_extlen_t foundlen, /* length in found extent */
272 xfs_agblock_t *resbno, /* result block number */
273 xfs_extlen_t *reslen, /* result length */
276 xfs_agblock_t bno = foundbno;
277 xfs_extlen_t len = foundlen;
281 /* Trim busy sections out of found extent */
282 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
285 * If we have a largish extent that happens to start before min_agbno,
286 * see if we can shift it into range...
288 if (bno < args->min_agbno && bno + len > args->min_agbno) {
289 diff = args->min_agbno - bno;
296 if (args->alignment > 1 && len >= args->minlen) {
297 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
299 diff = aligned_bno - bno;
301 *resbno = aligned_bno;
302 *reslen = diff >= len ? 0 : len - diff;
312 * Compute best start block and diff for "near" allocations.
313 * freelen >= wantlen already checked by caller.
315 STATIC xfs_extlen_t /* difference value (absolute) */
316 xfs_alloc_compute_diff(
317 xfs_agblock_t wantbno, /* target starting block */
318 xfs_extlen_t wantlen, /* target length */
319 xfs_extlen_t alignment, /* target alignment */
320 int datatype, /* are we allocating data? */
321 xfs_agblock_t freebno, /* freespace's starting block */
322 xfs_extlen_t freelen, /* freespace's length */
323 xfs_agblock_t *newbnop) /* result: best start block from free */
325 xfs_agblock_t freeend; /* end of freespace extent */
326 xfs_agblock_t newbno1; /* return block number */
327 xfs_agblock_t newbno2; /* other new block number */
328 xfs_extlen_t newlen1=0; /* length with newbno1 */
329 xfs_extlen_t newlen2=0; /* length with newbno2 */
330 xfs_agblock_t wantend; /* end of target extent */
331 bool userdata = xfs_alloc_is_userdata(datatype);
333 ASSERT(freelen >= wantlen);
334 freeend = freebno + freelen;
335 wantend = wantbno + wantlen;
337 * We want to allocate from the start of a free extent if it is past
338 * the desired block or if we are allocating user data and the free
339 * extent is before desired block. The second case is there to allow
340 * for contiguous allocation from the remaining free space if the file
341 * grows in the short term.
343 if (freebno >= wantbno || (userdata && freeend < wantend)) {
344 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
345 newbno1 = NULLAGBLOCK;
346 } else if (freeend >= wantend && alignment > 1) {
347 newbno1 = roundup(wantbno, alignment);
348 newbno2 = newbno1 - alignment;
349 if (newbno1 >= freeend)
350 newbno1 = NULLAGBLOCK;
352 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
353 if (newbno2 < freebno)
354 newbno2 = NULLAGBLOCK;
356 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
357 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
358 if (newlen1 < newlen2 ||
359 (newlen1 == newlen2 &&
360 XFS_ABSDIFF(newbno1, wantbno) >
361 XFS_ABSDIFF(newbno2, wantbno)))
363 } else if (newbno2 != NULLAGBLOCK)
365 } else if (freeend >= wantend) {
367 } else if (alignment > 1) {
368 newbno1 = roundup(freeend - wantlen, alignment);
369 if (newbno1 > freeend - wantlen &&
370 newbno1 - alignment >= freebno)
371 newbno1 -= alignment;
372 else if (newbno1 >= freeend)
373 newbno1 = NULLAGBLOCK;
375 newbno1 = freeend - wantlen;
377 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
381 * Fix up the length, based on mod and prod.
382 * len should be k * prod + mod for some k.
383 * If len is too small it is returned unchanged.
384 * If len hits maxlen it is left alone.
388 xfs_alloc_arg_t *args) /* allocation argument structure */
393 ASSERT(args->mod < args->prod);
395 ASSERT(rlen >= args->minlen);
396 ASSERT(rlen <= args->maxlen);
397 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
398 (args->mod == 0 && rlen < args->prod))
400 k = rlen % args->prod;
404 rlen = rlen - (k - args->mod);
406 rlen = rlen - args->prod + (args->mod - k);
407 /* casts to (int) catch length underflows */
408 if ((int)rlen < (int)args->minlen)
410 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
411 ASSERT(rlen % args->prod == args->mod);
412 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
413 rlen + args->minleft);
418 * Update the two btrees, logically removing from freespace the extent
419 * starting at rbno, rlen blocks. The extent is contained within the
420 * actual (current) free extent fbno for flen blocks.
421 * Flags are passed in indicating whether the cursors are set to the
424 STATIC int /* error code */
425 xfs_alloc_fixup_trees(
426 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
427 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
428 xfs_agblock_t fbno, /* starting block of free extent */
429 xfs_extlen_t flen, /* length of free extent */
430 xfs_agblock_t rbno, /* starting block of returned extent */
431 xfs_extlen_t rlen, /* length of returned extent */
432 int flags) /* flags, XFSA_FIXUP_... */
434 int error; /* error code */
435 int i; /* operation results */
436 xfs_agblock_t nfbno1; /* first new free startblock */
437 xfs_agblock_t nfbno2; /* second new free startblock */
438 xfs_extlen_t nflen1=0; /* first new free length */
439 xfs_extlen_t nflen2=0; /* second new free length */
440 struct xfs_mount *mp;
445 * Look up the record in the by-size tree if necessary.
447 if (flags & XFSA_FIXUP_CNT_OK) {
449 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
451 XFS_WANT_CORRUPTED_RETURN(mp,
452 i == 1 && nfbno1 == fbno && nflen1 == flen);
455 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
457 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
460 * Look up the record in the by-block tree if necessary.
462 if (flags & XFSA_FIXUP_BNO_OK) {
464 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
466 XFS_WANT_CORRUPTED_RETURN(mp,
467 i == 1 && nfbno1 == fbno && nflen1 == flen);
470 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
472 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
476 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
477 struct xfs_btree_block *bnoblock;
478 struct xfs_btree_block *cntblock;
480 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
481 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
483 XFS_WANT_CORRUPTED_RETURN(mp,
484 bnoblock->bb_numrecs == cntblock->bb_numrecs);
489 * Deal with all four cases: the allocated record is contained
490 * within the freespace record, so we can have new freespace
491 * at either (or both) end, or no freespace remaining.
493 if (rbno == fbno && rlen == flen)
494 nfbno1 = nfbno2 = NULLAGBLOCK;
495 else if (rbno == fbno) {
496 nfbno1 = rbno + rlen;
497 nflen1 = flen - rlen;
498 nfbno2 = NULLAGBLOCK;
499 } else if (rbno + rlen == fbno + flen) {
501 nflen1 = flen - rlen;
502 nfbno2 = NULLAGBLOCK;
505 nflen1 = rbno - fbno;
506 nfbno2 = rbno + rlen;
507 nflen2 = (fbno + flen) - nfbno2;
510 * Delete the entry from the by-size btree.
512 if ((error = xfs_btree_delete(cnt_cur, &i)))
514 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
516 * Add new by-size btree entry(s).
518 if (nfbno1 != NULLAGBLOCK) {
519 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
521 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
522 if ((error = xfs_btree_insert(cnt_cur, &i)))
524 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
526 if (nfbno2 != NULLAGBLOCK) {
527 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
529 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
530 if ((error = xfs_btree_insert(cnt_cur, &i)))
532 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
535 * Fix up the by-block btree entry(s).
537 if (nfbno1 == NULLAGBLOCK) {
539 * No remaining freespace, just delete the by-block tree entry.
541 if ((error = xfs_btree_delete(bno_cur, &i)))
543 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
546 * Update the by-block entry to start later|be shorter.
548 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
551 if (nfbno2 != NULLAGBLOCK) {
553 * 2 resulting free entries, need to add one.
555 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
557 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
558 if ((error = xfs_btree_insert(bno_cur, &i)))
560 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
565 static xfs_failaddr_t
569 struct xfs_mount *mp = bp->b_target->bt_mount;
570 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
574 * There is no verification of non-crc AGFLs because mkfs does not
575 * initialise the AGFL to zero or NULL. Hence the only valid part of the
576 * AGFL is what the AGF says is active. We can't get to the AGF, so we
577 * can't verify just those entries are valid.
579 if (!xfs_sb_version_hascrc(&mp->m_sb))
582 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
583 return __this_address;
584 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
585 return __this_address;
587 * during growfs operations, the perag is not fully initialised,
588 * so we can't use it for any useful checking. growfs ensures we can't
589 * use it by using uncached buffers that don't have the perag attached
590 * so we can detect and avoid this problem.
592 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
593 return __this_address;
595 for (i = 0; i < xfs_agfl_size(mp); i++) {
596 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
597 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
598 return __this_address;
601 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
602 return __this_address;
607 xfs_agfl_read_verify(
610 struct xfs_mount *mp = bp->b_target->bt_mount;
614 * There is no verification of non-crc AGFLs because mkfs does not
615 * initialise the AGFL to zero or NULL. Hence the only valid part of the
616 * AGFL is what the AGF says is active. We can't get to the AGF, so we
617 * can't verify just those entries are valid.
619 if (!xfs_sb_version_hascrc(&mp->m_sb))
622 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
623 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
625 fa = xfs_agfl_verify(bp);
627 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
632 xfs_agfl_write_verify(
635 struct xfs_mount *mp = bp->b_target->bt_mount;
636 struct xfs_buf_log_item *bip = bp->b_log_item;
639 /* no verification of non-crc AGFLs */
640 if (!xfs_sb_version_hascrc(&mp->m_sb))
643 fa = xfs_agfl_verify(bp);
645 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
650 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
652 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
655 const struct xfs_buf_ops xfs_agfl_buf_ops = {
657 .verify_read = xfs_agfl_read_verify,
658 .verify_write = xfs_agfl_write_verify,
659 .verify_struct = xfs_agfl_verify,
663 * Read in the allocation group free block array.
667 xfs_mount_t *mp, /* mount point structure */
668 xfs_trans_t *tp, /* transaction pointer */
669 xfs_agnumber_t agno, /* allocation group number */
670 xfs_buf_t **bpp) /* buffer for the ag free block array */
672 xfs_buf_t *bp; /* return value */
675 ASSERT(agno != NULLAGNUMBER);
676 error = xfs_trans_read_buf(
677 mp, tp, mp->m_ddev_targp,
678 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
679 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
682 xfs_buf_set_ref(bp, XFS_AGFL_REF);
688 xfs_alloc_update_counters(
689 struct xfs_trans *tp,
690 struct xfs_perag *pag,
691 struct xfs_buf *agbp,
694 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
696 pag->pagf_freeblks += len;
697 be32_add_cpu(&agf->agf_freeblks, len);
699 xfs_trans_agblocks_delta(tp, len);
700 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
701 be32_to_cpu(agf->agf_length)))
702 return -EFSCORRUPTED;
704 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
709 * Allocation group level functions.
713 * Allocate a variable extent in the allocation group agno.
714 * Type and bno are used to determine where in the allocation group the
716 * Extent's length (returned in *len) will be between minlen and maxlen,
717 * and of the form k * prod + mod unless there's nothing that large.
718 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
720 STATIC int /* error */
721 xfs_alloc_ag_vextent(
722 xfs_alloc_arg_t *args) /* argument structure for allocation */
726 ASSERT(args->minlen > 0);
727 ASSERT(args->maxlen > 0);
728 ASSERT(args->minlen <= args->maxlen);
729 ASSERT(args->mod < args->prod);
730 ASSERT(args->alignment > 0);
733 * Branch to correct routine based on the type.
736 switch (args->type) {
737 case XFS_ALLOCTYPE_THIS_AG:
738 error = xfs_alloc_ag_vextent_size(args);
740 case XFS_ALLOCTYPE_NEAR_BNO:
741 error = xfs_alloc_ag_vextent_near(args);
743 case XFS_ALLOCTYPE_THIS_BNO:
744 error = xfs_alloc_ag_vextent_exact(args);
751 if (error || args->agbno == NULLAGBLOCK)
754 ASSERT(args->len >= args->minlen);
755 ASSERT(args->len <= args->maxlen);
756 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
757 ASSERT(args->agbno % args->alignment == 0);
759 /* if not file data, insert new block into the reverse map btree */
760 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
761 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
762 args->agbno, args->len, &args->oinfo);
767 if (!args->wasfromfl) {
768 error = xfs_alloc_update_counters(args->tp, args->pag,
770 -((long)(args->len)));
774 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
775 args->agbno, args->len));
778 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
780 XFS_STATS_INC(args->mp, xs_allocx);
781 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
786 * Allocate a variable extent at exactly agno/bno.
787 * Extent's length (returned in *len) will be between minlen and maxlen,
788 * and of the form k * prod + mod unless there's nothing that large.
789 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
791 STATIC int /* error */
792 xfs_alloc_ag_vextent_exact(
793 xfs_alloc_arg_t *args) /* allocation argument structure */
795 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
796 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
798 xfs_agblock_t fbno; /* start block of found extent */
799 xfs_extlen_t flen; /* length of found extent */
800 xfs_agblock_t tbno; /* start block of busy extent */
801 xfs_extlen_t tlen; /* length of busy extent */
802 xfs_agblock_t tend; /* end block of busy extent */
803 int i; /* success/failure of operation */
806 ASSERT(args->alignment == 1);
809 * Allocate/initialize a cursor for the by-number freespace btree.
811 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
812 args->agno, XFS_BTNUM_BNO);
815 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
816 * Look for the closest free block <= bno, it must contain bno
817 * if any free block does.
819 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
826 * Grab the freespace record.
828 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
831 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
832 ASSERT(fbno <= args->agbno);
835 * Check for overlapping busy extents.
839 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
842 * Give up if the start of the extent is busy, or the freespace isn't
843 * long enough for the minimum request.
845 if (tbno > args->agbno)
847 if (tlen < args->minlen)
850 if (tend < args->agbno + args->minlen)
854 * End of extent will be smaller of the freespace end and the
855 * maximal requested end.
857 * Fix the length according to mod and prod if given.
859 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
861 xfs_alloc_fix_len(args);
862 ASSERT(args->agbno + args->len <= tend);
865 * We are allocating agbno for args->len
866 * Allocate/initialize a cursor for the by-size btree.
868 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
869 args->agno, XFS_BTNUM_CNT);
870 ASSERT(args->agbno + args->len <=
871 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
872 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
873 args->len, XFSA_FIXUP_BNO_OK);
875 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
879 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
880 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
883 trace_xfs_alloc_exact_done(args);
887 /* Didn't find it, return null. */
888 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
889 args->agbno = NULLAGBLOCK;
890 trace_xfs_alloc_exact_notfound(args);
894 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
895 trace_xfs_alloc_exact_error(args);
900 * Search the btree in a given direction via the search cursor and compare
901 * the records found against the good extent we've already found.
904 xfs_alloc_find_best_extent(
905 struct xfs_alloc_arg *args, /* allocation argument structure */
906 struct xfs_btree_cur **gcur, /* good cursor */
907 struct xfs_btree_cur **scur, /* searching cursor */
908 xfs_agblock_t gdiff, /* difference for search comparison */
909 xfs_agblock_t *sbno, /* extent found by search */
910 xfs_extlen_t *slen, /* extent length */
911 xfs_agblock_t *sbnoa, /* aligned extent found by search */
912 xfs_extlen_t *slena, /* aligned extent length */
913 int dir) /* 0 = search right, 1 = search left */
921 /* The good extent is perfect, no need to search. */
926 * Look until we find a better one, run out of space or run off the end.
929 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
932 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
933 xfs_alloc_compute_aligned(args, *sbno, *slen,
934 sbnoa, slena, &busy_gen);
937 * The good extent is closer than this one.
940 if (*sbnoa > args->max_agbno)
942 if (*sbnoa >= args->agbno + gdiff)
945 if (*sbnoa < args->min_agbno)
947 if (*sbnoa <= args->agbno - gdiff)
952 * Same distance, compare length and pick the best.
954 if (*slena >= args->minlen) {
955 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
956 xfs_alloc_fix_len(args);
958 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
960 args->datatype, *sbnoa,
964 * Choose closer size and invalidate other cursor.
972 error = xfs_btree_increment(*scur, 0, &i);
974 error = xfs_btree_decrement(*scur, 0, &i);
980 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
985 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
990 /* caller invalidates cursors */
995 * Allocate a variable extent near bno in the allocation group agno.
996 * Extent's length (returned in len) will be between minlen and maxlen,
997 * and of the form k * prod + mod unless there's nothing that large.
998 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1000 STATIC int /* error */
1001 xfs_alloc_ag_vextent_near(
1002 xfs_alloc_arg_t *args) /* allocation argument structure */
1004 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
1005 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
1006 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
1007 xfs_agblock_t gtbno; /* start bno of right side entry */
1008 xfs_agblock_t gtbnoa; /* aligned ... */
1009 xfs_extlen_t gtdiff; /* difference to right side entry */
1010 xfs_extlen_t gtlen; /* length of right side entry */
1011 xfs_extlen_t gtlena; /* aligned ... */
1012 xfs_agblock_t gtnew; /* useful start bno of right side */
1013 int error; /* error code */
1014 int i; /* result code, temporary */
1015 int j; /* result code, temporary */
1016 xfs_agblock_t ltbno; /* start bno of left side entry */
1017 xfs_agblock_t ltbnoa; /* aligned ... */
1018 xfs_extlen_t ltdiff; /* difference to left side entry */
1019 xfs_extlen_t ltlen; /* length of left side entry */
1020 xfs_extlen_t ltlena; /* aligned ... */
1021 xfs_agblock_t ltnew; /* useful start bno of left side */
1022 xfs_extlen_t rlen; /* length of returned extent */
1027 * Randomly don't execute the first algorithm.
1029 int dofirst; /* set to do first algorithm */
1031 dofirst = prandom_u32() & 1;
1034 /* handle unitialized agbno range so caller doesn't have to */
1035 if (!args->min_agbno && !args->max_agbno)
1036 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1037 ASSERT(args->min_agbno <= args->max_agbno);
1039 /* clamp agbno to the range if it's outside */
1040 if (args->agbno < args->min_agbno)
1041 args->agbno = args->min_agbno;
1042 if (args->agbno > args->max_agbno)
1043 args->agbno = args->max_agbno;
1054 * Get a cursor for the by-size btree.
1056 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1057 args->agno, XFS_BTNUM_CNT);
1060 * See if there are any free extents as big as maxlen.
1062 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1065 * If none, then pick up the last entry in the tree unless the
1069 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
1072 if (i == 0 || ltlen == 0) {
1073 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1074 trace_xfs_alloc_near_noentry(args);
1079 args->wasfromfl = 0;
1083 * If the requested extent is large wrt the freespaces available
1084 * in this a.g., then the cursor will be pointing to a btree entry
1085 * near the right edge of the tree. If it's in the last btree leaf
1086 * block, then we just examine all the entries in that block
1087 * that are big enough, and pick the best one.
1088 * This is written as a while loop so we can break out of it,
1089 * but we never loop back to the top.
1091 while (xfs_btree_islastblock(cnt_cur, 0)) {
1094 xfs_extlen_t blen=0;
1095 xfs_agblock_t bnew=0;
1102 * Start from the entry that lookup found, sequence through
1103 * all larger free blocks. If we're actually pointing at a
1104 * record smaller than maxlen, go to the start of this block,
1105 * and skip all those smaller than minlen.
1107 if (ltlen || args->alignment > 1) {
1108 cnt_cur->bc_ptrs[0] = 1;
1110 if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
1113 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1114 if (ltlen >= args->minlen)
1116 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1119 ASSERT(ltlen >= args->minlen);
1123 i = cnt_cur->bc_ptrs[0];
1124 for (j = 1, blen = 0, bdiff = 0;
1125 !error && j && (blen < args->maxlen || bdiff > 0);
1126 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1128 * For each entry, decide if it's better than
1129 * the previous best entry.
1131 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1133 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1134 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1135 <bnoa, <lena, &busy_gen);
1136 if (ltlena < args->minlen)
1138 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1140 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1141 xfs_alloc_fix_len(args);
1142 ASSERT(args->len >= args->minlen);
1143 if (args->len < blen)
1145 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1146 args->alignment, args->datatype, ltbnoa,
1148 if (ltnew != NULLAGBLOCK &&
1149 (args->len > blen || ltdiff < bdiff)) {
1153 besti = cnt_cur->bc_ptrs[0];
1157 * It didn't work. We COULD be in a case where
1158 * there's a good record somewhere, so try again.
1163 * Point at the best entry, and retrieve it again.
1165 cnt_cur->bc_ptrs[0] = besti;
1166 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1168 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1169 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1173 * We are allocating starting at bnew for blen blocks.
1176 ASSERT(bnew >= ltbno);
1177 ASSERT(bnew + blen <= ltbno + ltlen);
1179 * Set up a cursor for the by-bno tree.
1181 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1182 args->agbp, args->agno, XFS_BTNUM_BNO);
1184 * Fix up the btree entries.
1186 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1187 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1189 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1190 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1192 trace_xfs_alloc_near_first(args);
1197 * Search in the by-bno tree to the left and to the right
1198 * simultaneously, until in each case we find a space big enough,
1199 * or run into the edge of the tree. When we run into the edge,
1200 * we deallocate that cursor.
1201 * If both searches succeed, we compare the two spaces and pick
1203 * With alignment, it's possible for both to fail; the upper
1204 * level algorithm that picks allocation groups for allocations
1205 * is not supposed to do this.
1208 * Allocate and initialize the cursor for the leftward search.
1210 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1211 args->agno, XFS_BTNUM_BNO);
1213 * Lookup <= bno to find the leftward search's starting point.
1215 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1219 * Didn't find anything; use this cursor for the rightward
1222 bno_cur_gt = bno_cur_lt;
1226 * Found something. Duplicate the cursor for the rightward search.
1228 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1231 * Increment the cursor, so we will point at the entry just right
1232 * of the leftward entry if any, or to the leftmost entry.
1234 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1238 * It failed, there are no rightward entries.
1240 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1244 * Loop going left with the leftward cursor, right with the
1245 * rightward cursor, until either both directions give up or
1246 * we find an entry at least as big as minlen.
1250 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
1252 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1253 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1254 <bnoa, <lena, &busy_gen);
1255 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1257 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1259 if (!i || ltbnoa < args->min_agbno) {
1260 xfs_btree_del_cursor(bno_cur_lt,
1266 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
1268 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1269 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1270 >bnoa, >lena, &busy_gen);
1271 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1273 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1275 if (!i || gtbnoa > args->max_agbno) {
1276 xfs_btree_del_cursor(bno_cur_gt,
1281 } while (bno_cur_lt || bno_cur_gt);
1284 * Got both cursors still active, need to find better entry.
1286 if (bno_cur_lt && bno_cur_gt) {
1287 if (ltlena >= args->minlen) {
1289 * Left side is good, look for a right side entry.
1291 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1292 xfs_alloc_fix_len(args);
1293 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1294 args->alignment, args->datatype, ltbnoa,
1297 error = xfs_alloc_find_best_extent(args,
1298 &bno_cur_lt, &bno_cur_gt,
1299 ltdiff, >bno, >len,
1301 0 /* search right */);
1303 ASSERT(gtlena >= args->minlen);
1306 * Right side is good, look for a left side entry.
1308 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1309 xfs_alloc_fix_len(args);
1310 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1311 args->alignment, args->datatype, gtbnoa,
1314 error = xfs_alloc_find_best_extent(args,
1315 &bno_cur_gt, &bno_cur_lt,
1316 gtdiff, <bno, <len,
1318 1 /* search left */);
1326 * If we couldn't get anything, give up.
1328 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1329 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1332 trace_xfs_alloc_near_busy(args);
1333 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1336 trace_xfs_alloc_size_neither(args);
1337 args->agbno = NULLAGBLOCK;
1342 * At this point we have selected a freespace entry, either to the
1343 * left or to the right. If it's on the right, copy all the
1344 * useful variables to the "left" set so we only have one
1345 * copy of this code.
1348 bno_cur_lt = bno_cur_gt;
1359 * Fix up the length and compute the useful address.
1361 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1362 xfs_alloc_fix_len(args);
1364 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1365 args->datatype, ltbnoa, ltlena, <new);
1366 ASSERT(ltnew >= ltbno);
1367 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1368 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1369 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1370 args->agbno = ltnew;
1372 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1373 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1377 trace_xfs_alloc_near_greater(args);
1379 trace_xfs_alloc_near_lesser(args);
1381 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1382 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1386 trace_xfs_alloc_near_error(args);
1387 if (cnt_cur != NULL)
1388 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1389 if (bno_cur_lt != NULL)
1390 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1391 if (bno_cur_gt != NULL)
1392 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1397 * Allocate a variable extent anywhere in the allocation group agno.
1398 * Extent's length (returned in len) will be between minlen and maxlen,
1399 * and of the form k * prod + mod unless there's nothing that large.
1400 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1402 STATIC int /* error */
1403 xfs_alloc_ag_vextent_size(
1404 xfs_alloc_arg_t *args) /* allocation argument structure */
1406 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1407 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1408 int error; /* error result */
1409 xfs_agblock_t fbno; /* start of found freespace */
1410 xfs_extlen_t flen; /* length of found freespace */
1411 int i; /* temp status variable */
1412 xfs_agblock_t rbno; /* returned block number */
1413 xfs_extlen_t rlen; /* length of returned extent */
1419 * Allocate and initialize a cursor for the by-size btree.
1421 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1422 args->agno, XFS_BTNUM_CNT);
1427 * Look for an entry >= maxlen+alignment-1 blocks.
1429 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1430 args->maxlen + args->alignment - 1, &i)))
1434 * If none then we have to settle for a smaller extent. In the case that
1435 * there are no large extents, this will return the last entry in the
1436 * tree unless the tree is empty. In the case that there are only busy
1437 * large extents, this will return the largest small extent unless there
1438 * are no smaller extents available.
1441 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1445 if (i == 0 || flen == 0) {
1446 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1447 trace_xfs_alloc_size_noentry(args);
1451 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1455 * Search for a non-busy extent that is large enough.
1458 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1461 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1463 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1464 &rbno, &rlen, &busy_gen);
1466 if (rlen >= args->maxlen)
1469 error = xfs_btree_increment(cnt_cur, 0, &i);
1474 * Our only valid extents must have been busy.
1475 * Make it unbusy by forcing the log out and
1478 xfs_btree_del_cursor(cnt_cur,
1480 trace_xfs_alloc_size_busy(args);
1481 xfs_extent_busy_flush(args->mp,
1482 args->pag, busy_gen);
1489 * In the first case above, we got the last entry in the
1490 * by-size btree. Now we check to see if the space hits maxlen
1491 * once aligned; if not, we search left for something better.
1492 * This can't happen in the second case above.
1494 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1495 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1496 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1497 if (rlen < args->maxlen) {
1498 xfs_agblock_t bestfbno;
1499 xfs_extlen_t bestflen;
1500 xfs_agblock_t bestrbno;
1501 xfs_extlen_t bestrlen;
1508 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1512 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1515 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1516 if (flen < bestrlen)
1518 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1519 &rbno, &rlen, &busy_gen);
1520 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1521 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1522 (rlen <= flen && rbno + rlen <= fbno + flen),
1524 if (rlen > bestrlen) {
1529 if (rlen == args->maxlen)
1533 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1536 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1542 args->wasfromfl = 0;
1544 * Fix up the length.
1547 if (rlen < args->minlen) {
1549 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1550 trace_xfs_alloc_size_busy(args);
1551 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1556 xfs_alloc_fix_len(args);
1559 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1561 * Allocate and initialize a cursor for the by-block tree.
1563 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1564 args->agno, XFS_BTNUM_BNO);
1565 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1566 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1568 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1569 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1570 cnt_cur = bno_cur = NULL;
1573 XFS_WANT_CORRUPTED_GOTO(args->mp,
1574 args->agbno + args->len <=
1575 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1577 trace_xfs_alloc_size_done(args);
1581 trace_xfs_alloc_size_error(args);
1583 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1585 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1589 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1590 trace_xfs_alloc_size_nominleft(args);
1591 args->agbno = NULLAGBLOCK;
1596 * Deal with the case where only small freespaces remain.
1597 * Either return the contents of the last freespace record,
1598 * or allocate space from the freelist if there is nothing in the tree.
1600 STATIC int /* error */
1601 xfs_alloc_ag_vextent_small(
1602 xfs_alloc_arg_t *args, /* allocation argument structure */
1603 xfs_btree_cur_t *ccur, /* by-size cursor */
1604 xfs_agblock_t *fbnop, /* result block number */
1605 xfs_extlen_t *flenp, /* result length */
1606 int *stat) /* status: 0-freelist, 1-normal/none */
1608 struct xfs_owner_info oinfo;
1614 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1617 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1619 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1622 * Nothing in the btree, try the freelist. Make sure
1623 * to respect minleft even when pulling from the
1626 else if (args->minlen == 1 && args->alignment == 1 &&
1627 args->resv != XFS_AG_RESV_AGFL &&
1628 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1630 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1633 if (fbno != NULLAGBLOCK) {
1634 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1635 xfs_alloc_allow_busy_reuse(args->datatype));
1637 if (xfs_alloc_is_userdata(args->datatype)) {
1640 bp = xfs_btree_get_bufs(args->mp, args->tp,
1641 args->agno, fbno, 0);
1643 error = -EFSCORRUPTED;
1646 xfs_trans_binval(args->tp, bp);
1650 XFS_WANT_CORRUPTED_GOTO(args->mp,
1651 args->agbno + args->len <=
1652 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1654 args->wasfromfl = 1;
1655 trace_xfs_alloc_small_freelist(args);
1658 * If we're feeding an AGFL block to something that
1659 * doesn't live in the free space, we need to clear
1660 * out the OWN_AG rmap.
1662 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1663 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1672 * Nothing in the freelist.
1678 * Can't allocate from the freelist for some reason.
1685 * Can't do the allocation, give up.
1687 if (flen < args->minlen) {
1688 args->agbno = NULLAGBLOCK;
1689 trace_xfs_alloc_small_notenough(args);
1695 trace_xfs_alloc_small_done(args);
1699 trace_xfs_alloc_small_error(args);
1704 * Free the extent starting at agno/bno for length.
1710 xfs_agnumber_t agno,
1713 struct xfs_owner_info *oinfo,
1714 enum xfs_ag_resv_type type)
1716 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1717 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1718 int error; /* error return value */
1719 xfs_agblock_t gtbno; /* start of right neighbor block */
1720 xfs_extlen_t gtlen; /* length of right neighbor block */
1721 int haveleft; /* have a left neighbor block */
1722 int haveright; /* have a right neighbor block */
1723 int i; /* temp, result code */
1724 xfs_agblock_t ltbno; /* start of left neighbor block */
1725 xfs_extlen_t ltlen; /* length of left neighbor block */
1726 xfs_mount_t *mp; /* mount point struct for filesystem */
1727 xfs_agblock_t nbno; /* new starting block of freespace */
1728 xfs_extlen_t nlen; /* new length of freespace */
1729 xfs_perag_t *pag; /* per allocation group data */
1731 bno_cur = cnt_cur = NULL;
1734 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1735 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1741 * Allocate and initialize a cursor for the by-block btree.
1743 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1745 * Look for a neighboring block on the left (lower block numbers)
1746 * that is contiguous with this space.
1748 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1752 * There is a block to our left.
1754 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1756 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1758 * It's not contiguous, though.
1760 if (ltbno + ltlen < bno)
1764 * If this failure happens the request to free this
1765 * space was invalid, it's (partly) already free.
1768 XFS_WANT_CORRUPTED_GOTO(mp,
1769 ltbno + ltlen <= bno, error0);
1773 * Look for a neighboring block on the right (higher block numbers)
1774 * that is contiguous with this space.
1776 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1780 * There is a block to our right.
1782 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1784 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1786 * It's not contiguous, though.
1788 if (bno + len < gtbno)
1792 * If this failure happens the request to free this
1793 * space was invalid, it's (partly) already free.
1796 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1800 * Now allocate and initialize a cursor for the by-size tree.
1802 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1804 * Have both left and right contiguous neighbors.
1805 * Merge all three into a single free block.
1807 if (haveleft && haveright) {
1809 * Delete the old by-size entry on the left.
1811 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1813 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1814 if ((error = xfs_btree_delete(cnt_cur, &i)))
1816 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1818 * Delete the old by-size entry on the right.
1820 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1822 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1823 if ((error = xfs_btree_delete(cnt_cur, &i)))
1825 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1827 * Delete the old by-block entry for the right block.
1829 if ((error = xfs_btree_delete(bno_cur, &i)))
1831 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1833 * Move the by-block cursor back to the left neighbor.
1835 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1837 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1840 * Check that this is the right record: delete didn't
1841 * mangle the cursor.
1844 xfs_agblock_t xxbno;
1847 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1850 XFS_WANT_CORRUPTED_GOTO(mp,
1851 i == 1 && xxbno == ltbno && xxlen == ltlen,
1856 * Update remaining by-block entry to the new, joined block.
1859 nlen = len + ltlen + gtlen;
1860 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1864 * Have only a left contiguous neighbor.
1865 * Merge it together with the new freespace.
1867 else if (haveleft) {
1869 * Delete the old by-size entry on the left.
1871 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1873 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1874 if ((error = xfs_btree_delete(cnt_cur, &i)))
1876 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1878 * Back up the by-block cursor to the left neighbor, and
1879 * update its length.
1881 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1883 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1886 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1890 * Have only a right contiguous neighbor.
1891 * Merge it together with the new freespace.
1893 else if (haveright) {
1895 * Delete the old by-size entry on the right.
1897 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1899 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1900 if ((error = xfs_btree_delete(cnt_cur, &i)))
1902 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1904 * Update the starting block and length of the right
1905 * neighbor in the by-block tree.
1909 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1913 * No contiguous neighbors.
1914 * Insert the new freespace into the by-block tree.
1919 if ((error = xfs_btree_insert(bno_cur, &i)))
1921 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1923 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1926 * In all cases we need to insert the new freespace in the by-size tree.
1928 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1930 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1931 if ((error = xfs_btree_insert(cnt_cur, &i)))
1933 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1934 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1938 * Update the freespace totals in the ag and superblock.
1940 pag = xfs_perag_get(mp, agno);
1941 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1942 xfs_ag_resv_free_extent(pag, type, tp, len);
1947 XFS_STATS_INC(mp, xs_freex);
1948 XFS_STATS_ADD(mp, xs_freeb, len);
1950 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
1955 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
1957 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1959 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1964 * Visible (exported) allocation/free functions.
1965 * Some of these are used just by xfs_alloc_btree.c and this file.
1969 * Compute and fill in value of m_ag_maxlevels.
1972 xfs_alloc_compute_maxlevels(
1973 xfs_mount_t *mp) /* file system mount structure */
1975 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
1976 (mp->m_sb.sb_agblocks + 1) / 2);
1980 * Find the length of the longest extent in an AG. The 'need' parameter
1981 * specifies how much space we're going to need for the AGFL and the
1982 * 'reserved' parameter tells us how many blocks in this AG are reserved for
1986 xfs_alloc_longest_free_extent(
1987 struct xfs_perag *pag,
1989 xfs_extlen_t reserved)
1991 xfs_extlen_t delta = 0;
1994 * If the AGFL needs a recharge, we'll have to subtract that from the
1997 if (need > pag->pagf_flcount)
1998 delta = need - pag->pagf_flcount;
2001 * If we cannot maintain others' reservations with space from the
2002 * not-longest freesp extents, we'll have to subtract /that/ from
2003 * the longest extent too.
2005 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2006 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2009 * If the longest extent is long enough to satisfy all the
2010 * reservations and AGFL rules in place, we can return this extent.
2012 if (pag->pagf_longest > delta)
2013 return pag->pagf_longest - delta;
2015 /* Otherwise, let the caller try for 1 block if there's space. */
2016 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2020 xfs_alloc_min_freelist(
2021 struct xfs_mount *mp,
2022 struct xfs_perag *pag)
2024 unsigned int min_free;
2026 /* space needed by-bno freespace btree */
2027 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2028 mp->m_ag_maxlevels);
2029 /* space needed by-size freespace btree */
2030 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2031 mp->m_ag_maxlevels);
2032 /* space needed reverse mapping used space btree */
2033 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2034 min_free += min_t(unsigned int,
2035 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2036 mp->m_rmap_maxlevels);
2042 * Check if the operation we are fixing up the freelist for should go ahead or
2043 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2044 * is dependent on whether the size and shape of free space available will
2045 * permit the requested allocation to take place.
2048 xfs_alloc_space_available(
2049 struct xfs_alloc_arg *args,
2050 xfs_extlen_t min_free,
2053 struct xfs_perag *pag = args->pag;
2054 xfs_extlen_t alloc_len, longest;
2055 xfs_extlen_t reservation; /* blocks that are still reserved */
2058 if (flags & XFS_ALLOC_FLAG_FREEING)
2061 reservation = xfs_ag_resv_needed(pag, args->resv);
2063 /* do we have enough contiguous free space for the allocation? */
2064 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2065 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2066 if (longest < alloc_len)
2069 /* do we have enough free space remaining for the allocation? */
2070 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2071 reservation - min_free - args->minleft);
2072 if (available < (int)max(args->total, alloc_len))
2076 * Clamp maxlen to the amount of free space available for the actual
2077 * extent allocation.
2079 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2080 args->maxlen = available;
2081 ASSERT(args->maxlen > 0);
2082 ASSERT(args->maxlen >= args->minlen);
2089 xfs_free_agfl_block(
2090 struct xfs_trans *tp,
2091 xfs_agnumber_t agno,
2092 xfs_agblock_t agbno,
2093 struct xfs_buf *agbp,
2094 struct xfs_owner_info *oinfo)
2099 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2104 bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno, 0);
2106 return -EFSCORRUPTED;
2107 xfs_trans_binval(tp, bp);
2113 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2114 * is to detect an agfl header padding mismatch between current and early v5
2115 * kernels. This problem manifests as a 1-slot size difference between the
2116 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2117 * may also catch variants of agfl count corruption unrelated to padding. Either
2118 * way, we'll reset the agfl and warn the user.
2120 * Return true if a reset is required before the agfl can be used, false
2124 xfs_agfl_needs_reset(
2125 struct xfs_mount *mp,
2126 struct xfs_agf *agf)
2128 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2129 uint32_t l = be32_to_cpu(agf->agf_fllast);
2130 uint32_t c = be32_to_cpu(agf->agf_flcount);
2131 int agfl_size = xfs_agfl_size(mp);
2134 /* no agfl header on v4 supers */
2135 if (!xfs_sb_version_hascrc(&mp->m_sb))
2139 * The agf read verifier catches severe corruption of these fields.
2140 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2141 * the verifier allows it.
2143 if (f >= agfl_size || l >= agfl_size)
2149 * Check consistency between the on-disk count and the active range. An
2150 * agfl padding mismatch manifests as an inconsistent flcount.
2155 active = agfl_size - f + l + 1;
2163 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2164 * agfl content cannot be trusted. Warn the user that a repair is required to
2165 * recover leaked blocks.
2167 * The purpose of this mechanism is to handle filesystems affected by the agfl
2168 * header padding mismatch problem. A reset keeps the filesystem online with a
2169 * relatively minor free space accounting inconsistency rather than suffer the
2170 * inevitable crash from use of an invalid agfl block.
2174 struct xfs_trans *tp,
2175 struct xfs_buf *agbp,
2176 struct xfs_perag *pag)
2178 struct xfs_mount *mp = tp->t_mountp;
2179 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
2181 ASSERT(pag->pagf_agflreset);
2182 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2185 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2186 "Please unmount and run xfs_repair.",
2187 pag->pag_agno, pag->pagf_flcount);
2189 agf->agf_flfirst = 0;
2190 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2191 agf->agf_flcount = 0;
2192 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2195 pag->pagf_flcount = 0;
2196 pag->pagf_agflreset = false;
2200 * Defer an AGFL block free. This is effectively equivalent to
2201 * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2203 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2204 * allocation operations in a transaction. AGFL frees are prone to this problem
2205 * because for one they are always freed one at a time. Further, an immediate
2206 * AGFL block free can cause a btree join and require another block free before
2207 * the real allocation can proceed. Deferring the free disconnects freeing up
2208 * the AGFL slot from freeing the block.
2211 xfs_defer_agfl_block(
2212 struct xfs_mount *mp,
2213 struct xfs_defer_ops *dfops,
2214 xfs_agnumber_t agno,
2215 xfs_fsblock_t agbno,
2216 struct xfs_owner_info *oinfo)
2218 struct xfs_extent_free_item *new; /* new element */
2220 ASSERT(xfs_bmap_free_item_zone != NULL);
2221 ASSERT(oinfo != NULL);
2223 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
2224 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2225 new->xefi_blockcount = 1;
2226 new->xefi_oinfo = *oinfo;
2228 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2230 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2234 * Decide whether to use this allocation group for this allocation.
2235 * If so, fix up the btree freelist's size.
2238 xfs_alloc_fix_freelist(
2239 struct xfs_alloc_arg *args, /* allocation argument structure */
2240 int flags) /* XFS_ALLOC_FLAG_... */
2242 struct xfs_mount *mp = args->mp;
2243 struct xfs_perag *pag = args->pag;
2244 struct xfs_trans *tp = args->tp;
2245 struct xfs_buf *agbp = NULL;
2246 struct xfs_buf *agflbp = NULL;
2247 struct xfs_alloc_arg targs; /* local allocation arguments */
2248 xfs_agblock_t bno; /* freelist block */
2249 xfs_extlen_t need; /* total blocks needed in freelist */
2252 if (!pag->pagf_init) {
2253 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2256 if (!pag->pagf_init) {
2257 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2258 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2259 goto out_agbp_relse;
2264 * If this is a metadata preferred pag and we are user data then try
2265 * somewhere else if we are not being asked to try harder at this
2268 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2269 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2270 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2271 goto out_agbp_relse;
2274 need = xfs_alloc_min_freelist(mp, pag);
2275 if (!xfs_alloc_space_available(args, need, flags |
2276 XFS_ALLOC_FLAG_CHECK))
2277 goto out_agbp_relse;
2280 * Get the a.g. freespace buffer.
2281 * Can fail if we're not blocking on locks, and it's held.
2284 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2288 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2289 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2294 /* reset a padding mismatched agfl before final free space check */
2295 if (pag->pagf_agflreset)
2296 xfs_agfl_reset(tp, agbp, pag);
2298 /* If there isn't enough total space or single-extent, reject it. */
2299 need = xfs_alloc_min_freelist(mp, pag);
2300 if (!xfs_alloc_space_available(args, need, flags))
2301 goto out_agbp_relse;
2304 * Make the freelist shorter if it's too long.
2306 * Note that from this point onwards, we will always release the agf and
2307 * agfl buffers on error. This handles the case where we error out and
2308 * the buffers are clean or may not have been joined to the transaction
2309 * and hence need to be released manually. If they have been joined to
2310 * the transaction, then xfs_trans_brelse() will handle them
2311 * appropriately based on the recursion count and dirty state of the
2314 * XXX (dgc): When we have lots of free space, does this buy us
2315 * anything other than extra overhead when we need to put more blocks
2316 * back on the free list? Maybe we should only do this when space is
2317 * getting low or the AGFL is more than half full?
2319 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2320 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2321 * updating the rmapbt. Both flags are used in xfs_repair while we're
2322 * rebuilding the rmapbt, and neither are used by the kernel. They're
2323 * both required to ensure that rmaps are correctly recorded for the
2324 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2325 * repair/rmap.c in xfsprogs for details.
2327 memset(&targs, 0, sizeof(targs));
2328 if (flags & XFS_ALLOC_FLAG_NORMAP)
2329 xfs_rmap_skip_owner_update(&targs.oinfo);
2331 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2332 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2333 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2335 goto out_agbp_relse;
2337 /* defer agfl frees if dfops is provided */
2338 if (tp->t_agfl_dfops) {
2339 xfs_defer_agfl_block(mp, tp->t_agfl_dfops, args->agno,
2342 error = xfs_free_agfl_block(tp, args->agno, bno, agbp,
2345 goto out_agbp_relse;
2352 targs.agno = args->agno;
2353 targs.alignment = targs.minlen = targs.prod = 1;
2354 targs.type = XFS_ALLOCTYPE_THIS_AG;
2356 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2358 goto out_agbp_relse;
2360 /* Make the freelist longer if it's too short. */
2361 while (pag->pagf_flcount < need) {
2363 targs.maxlen = need - pag->pagf_flcount;
2364 targs.resv = XFS_AG_RESV_AGFL;
2366 /* Allocate as many blocks as possible at once. */
2367 error = xfs_alloc_ag_vextent(&targs);
2369 goto out_agflbp_relse;
2372 * Stop if we run out. Won't happen if callers are obeying
2373 * the restrictions correctly. Can happen for free calls
2374 * on a completely full ag.
2376 if (targs.agbno == NULLAGBLOCK) {
2377 if (flags & XFS_ALLOC_FLAG_FREEING)
2379 goto out_agflbp_relse;
2382 * Put each allocated block on the list.
2384 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2385 error = xfs_alloc_put_freelist(tp, agbp,
2388 goto out_agflbp_relse;
2391 xfs_trans_brelse(tp, agflbp);
2396 xfs_trans_brelse(tp, agflbp);
2399 xfs_trans_brelse(tp, agbp);
2406 * Get a block from the freelist.
2407 * Returns with the buffer for the block gotten.
2410 xfs_alloc_get_freelist(
2411 xfs_trans_t *tp, /* transaction pointer */
2412 xfs_buf_t *agbp, /* buffer containing the agf structure */
2413 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2414 int btreeblk) /* destination is a AGF btree */
2416 xfs_agf_t *agf; /* a.g. freespace structure */
2417 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2418 xfs_agblock_t bno; /* block number returned */
2422 xfs_mount_t *mp = tp->t_mountp;
2423 xfs_perag_t *pag; /* per allocation group data */
2426 * Freelist is empty, give up.
2428 agf = XFS_BUF_TO_AGF(agbp);
2429 if (!agf->agf_flcount) {
2430 *bnop = NULLAGBLOCK;
2434 * Read the array of free blocks.
2436 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2443 * Get the block number and update the data structures.
2445 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2446 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2447 be32_add_cpu(&agf->agf_flfirst, 1);
2448 xfs_trans_brelse(tp, agflbp);
2449 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2450 agf->agf_flfirst = 0;
2452 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2453 ASSERT(!pag->pagf_agflreset);
2454 be32_add_cpu(&agf->agf_flcount, -1);
2455 xfs_trans_agflist_delta(tp, -1);
2456 pag->pagf_flcount--;
2459 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2461 be32_add_cpu(&agf->agf_btreeblks, 1);
2462 pag->pagf_btreeblks++;
2463 logflags |= XFS_AGF_BTREEBLKS;
2466 xfs_alloc_log_agf(tp, agbp, logflags);
2473 * Log the given fields from the agf structure.
2477 xfs_trans_t *tp, /* transaction pointer */
2478 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2479 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2481 int first; /* first byte offset */
2482 int last; /* last byte offset */
2483 static const short offsets[] = {
2484 offsetof(xfs_agf_t, agf_magicnum),
2485 offsetof(xfs_agf_t, agf_versionnum),
2486 offsetof(xfs_agf_t, agf_seqno),
2487 offsetof(xfs_agf_t, agf_length),
2488 offsetof(xfs_agf_t, agf_roots[0]),
2489 offsetof(xfs_agf_t, agf_levels[0]),
2490 offsetof(xfs_agf_t, agf_flfirst),
2491 offsetof(xfs_agf_t, agf_fllast),
2492 offsetof(xfs_agf_t, agf_flcount),
2493 offsetof(xfs_agf_t, agf_freeblks),
2494 offsetof(xfs_agf_t, agf_longest),
2495 offsetof(xfs_agf_t, agf_btreeblks),
2496 offsetof(xfs_agf_t, agf_uuid),
2497 offsetof(xfs_agf_t, agf_rmap_blocks),
2498 offsetof(xfs_agf_t, agf_refcount_blocks),
2499 offsetof(xfs_agf_t, agf_refcount_root),
2500 offsetof(xfs_agf_t, agf_refcount_level),
2501 /* needed so that we don't log the whole rest of the structure: */
2502 offsetof(xfs_agf_t, agf_spare64),
2506 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2508 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2510 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2511 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2515 * Interface for inode allocation to force the pag data to be initialized.
2518 xfs_alloc_pagf_init(
2519 xfs_mount_t *mp, /* file system mount structure */
2520 xfs_trans_t *tp, /* transaction pointer */
2521 xfs_agnumber_t agno, /* allocation group number */
2522 int flags) /* XFS_ALLOC_FLAGS_... */
2527 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2530 xfs_trans_brelse(tp, bp);
2535 * Put the block on the freelist for the allocation group.
2538 xfs_alloc_put_freelist(
2539 xfs_trans_t *tp, /* transaction pointer */
2540 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2541 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2542 xfs_agblock_t bno, /* block being freed */
2543 int btreeblk) /* block came from a AGF btree */
2545 xfs_agf_t *agf; /* a.g. freespace structure */
2546 __be32 *blockp;/* pointer to array entry */
2549 xfs_mount_t *mp; /* mount structure */
2550 xfs_perag_t *pag; /* per allocation group data */
2554 agf = XFS_BUF_TO_AGF(agbp);
2557 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2558 be32_to_cpu(agf->agf_seqno), &agflbp)))
2560 be32_add_cpu(&agf->agf_fllast, 1);
2561 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2562 agf->agf_fllast = 0;
2564 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2565 ASSERT(!pag->pagf_agflreset);
2566 be32_add_cpu(&agf->agf_flcount, 1);
2567 xfs_trans_agflist_delta(tp, 1);
2568 pag->pagf_flcount++;
2570 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2572 be32_add_cpu(&agf->agf_btreeblks, -1);
2573 pag->pagf_btreeblks--;
2574 logflags |= XFS_AGF_BTREEBLKS;
2578 xfs_alloc_log_agf(tp, agbp, logflags);
2580 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2582 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2583 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2584 *blockp = cpu_to_be32(bno);
2585 startoff = (char *)blockp - (char *)agflbp->b_addr;
2587 xfs_alloc_log_agf(tp, agbp, logflags);
2589 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2590 xfs_trans_log_buf(tp, agflbp, startoff,
2591 startoff + sizeof(xfs_agblock_t) - 1);
2595 static xfs_failaddr_t
2599 struct xfs_mount *mp = bp->b_target->bt_mount;
2600 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
2602 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2603 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2604 return __this_address;
2605 if (!xfs_log_check_lsn(mp,
2606 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2607 return __this_address;
2610 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2611 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2612 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2613 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2614 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2615 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2616 return __this_address;
2618 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2619 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2620 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2621 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2622 return __this_address;
2624 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2625 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2626 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2627 return __this_address;
2630 * during growfs operations, the perag is not fully initialised,
2631 * so we can't use it for any useful checking. growfs ensures we can't
2632 * use it by using uncached buffers that don't have the perag attached
2633 * so we can detect and avoid this problem.
2635 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2636 return __this_address;
2638 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2639 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2640 return __this_address;
2642 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2643 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2644 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2645 return __this_address;
2652 xfs_agf_read_verify(
2655 struct xfs_mount *mp = bp->b_target->bt_mount;
2658 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2659 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2660 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2662 fa = xfs_agf_verify(bp);
2663 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2664 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2669 xfs_agf_write_verify(
2672 struct xfs_mount *mp = bp->b_target->bt_mount;
2673 struct xfs_buf_log_item *bip = bp->b_log_item;
2676 fa = xfs_agf_verify(bp);
2678 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2682 if (!xfs_sb_version_hascrc(&mp->m_sb))
2686 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2688 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2691 const struct xfs_buf_ops xfs_agf_buf_ops = {
2693 .verify_read = xfs_agf_read_verify,
2694 .verify_write = xfs_agf_write_verify,
2695 .verify_struct = xfs_agf_verify,
2699 * Read in the allocation group header (free/alloc section).
2703 struct xfs_mount *mp, /* mount point structure */
2704 struct xfs_trans *tp, /* transaction pointer */
2705 xfs_agnumber_t agno, /* allocation group number */
2706 int flags, /* XFS_BUF_ */
2707 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2711 trace_xfs_read_agf(mp, agno);
2713 ASSERT(agno != NULLAGNUMBER);
2714 error = xfs_trans_read_buf(
2715 mp, tp, mp->m_ddev_targp,
2716 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2717 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2723 ASSERT(!(*bpp)->b_error);
2724 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2729 * Read in the allocation group header (free/alloc section).
2733 struct xfs_mount *mp, /* mount point structure */
2734 struct xfs_trans *tp, /* transaction pointer */
2735 xfs_agnumber_t agno, /* allocation group number */
2736 int flags, /* XFS_ALLOC_FLAG_... */
2737 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2739 struct xfs_agf *agf; /* ag freelist header */
2740 struct xfs_perag *pag; /* per allocation group data */
2743 trace_xfs_alloc_read_agf(mp, agno);
2745 ASSERT(agno != NULLAGNUMBER);
2746 error = xfs_read_agf(mp, tp, agno,
2747 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2753 ASSERT(!(*bpp)->b_error);
2755 agf = XFS_BUF_TO_AGF(*bpp);
2756 pag = xfs_perag_get(mp, agno);
2757 if (!pag->pagf_init) {
2758 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2759 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2760 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2761 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2762 pag->pagf_levels[XFS_BTNUM_BNOi] =
2763 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2764 pag->pagf_levels[XFS_BTNUM_CNTi] =
2765 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2766 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2767 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2768 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2769 spin_lock_init(&pag->pagb_lock);
2770 pag->pagb_count = 0;
2771 pag->pagb_tree = RB_ROOT;
2773 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2776 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2777 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2778 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2779 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2780 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2781 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2782 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2783 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2784 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2792 * Allocate an extent (variable-size).
2793 * Depending on the allocation type, we either look in a single allocation
2794 * group or loop over the allocation groups to find the result.
2798 xfs_alloc_arg_t *args) /* allocation argument structure */
2800 xfs_agblock_t agsize; /* allocation group size */
2802 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2803 xfs_mount_t *mp; /* mount structure pointer */
2804 xfs_agnumber_t sagno; /* starting allocation group number */
2805 xfs_alloctype_t type; /* input allocation type */
2807 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2810 type = args->otype = args->type;
2811 args->agbno = NULLAGBLOCK;
2813 * Just fix this up, for the case where the last a.g. is shorter
2814 * (or there's only one a.g.) and the caller couldn't easily figure
2815 * that out (xfs_bmap_alloc).
2817 agsize = mp->m_sb.sb_agblocks;
2818 if (args->maxlen > agsize)
2819 args->maxlen = agsize;
2820 if (args->alignment == 0)
2821 args->alignment = 1;
2822 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2823 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2824 ASSERT(args->minlen <= args->maxlen);
2825 ASSERT(args->minlen <= agsize);
2826 ASSERT(args->mod < args->prod);
2827 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2828 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2829 args->minlen > args->maxlen || args->minlen > agsize ||
2830 args->mod >= args->prod) {
2831 args->fsbno = NULLFSBLOCK;
2832 trace_xfs_alloc_vextent_badargs(args);
2837 case XFS_ALLOCTYPE_THIS_AG:
2838 case XFS_ALLOCTYPE_NEAR_BNO:
2839 case XFS_ALLOCTYPE_THIS_BNO:
2841 * These three force us into a single a.g.
2843 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2844 args->pag = xfs_perag_get(mp, args->agno);
2845 error = xfs_alloc_fix_freelist(args, 0);
2847 trace_xfs_alloc_vextent_nofix(args);
2851 trace_xfs_alloc_vextent_noagbp(args);
2854 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2855 if ((error = xfs_alloc_ag_vextent(args)))
2858 case XFS_ALLOCTYPE_START_BNO:
2860 * Try near allocation first, then anywhere-in-ag after
2861 * the first a.g. fails.
2863 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2864 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2865 args->fsbno = XFS_AGB_TO_FSB(mp,
2866 ((mp->m_agfrotor / rotorstep) %
2867 mp->m_sb.sb_agcount), 0);
2870 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2871 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2873 case XFS_ALLOCTYPE_FIRST_AG:
2875 * Rotate through the allocation groups looking for a winner.
2877 if (type == XFS_ALLOCTYPE_FIRST_AG) {
2879 * Start with allocation group given by bno.
2881 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2882 args->type = XFS_ALLOCTYPE_THIS_AG;
2887 * Start with the given allocation group.
2889 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2890 flags = XFS_ALLOC_FLAG_TRYLOCK;
2893 * Loop over allocation groups twice; first time with
2894 * trylock set, second time without.
2897 args->pag = xfs_perag_get(mp, args->agno);
2898 error = xfs_alloc_fix_freelist(args, flags);
2900 trace_xfs_alloc_vextent_nofix(args);
2904 * If we get a buffer back then the allocation will fly.
2907 if ((error = xfs_alloc_ag_vextent(args)))
2912 trace_xfs_alloc_vextent_loopfailed(args);
2915 * Didn't work, figure out the next iteration.
2917 if (args->agno == sagno &&
2918 type == XFS_ALLOCTYPE_START_BNO)
2919 args->type = XFS_ALLOCTYPE_THIS_AG;
2921 * For the first allocation, we can try any AG to get
2922 * space. However, if we already have allocated a
2923 * block, we don't want to try AGs whose number is below
2924 * sagno. Otherwise, we may end up with out-of-order
2925 * locking of AGF, which might cause deadlock.
2927 if (++(args->agno) == mp->m_sb.sb_agcount) {
2928 if (args->firstblock != NULLFSBLOCK)
2934 * Reached the starting a.g., must either be done
2935 * or switch to non-trylock mode.
2937 if (args->agno == sagno) {
2939 args->agbno = NULLAGBLOCK;
2940 trace_xfs_alloc_vextent_allfailed(args);
2945 if (type == XFS_ALLOCTYPE_START_BNO) {
2946 args->agbno = XFS_FSB_TO_AGBNO(mp,
2948 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2951 xfs_perag_put(args->pag);
2954 if (args->agno == sagno)
2955 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2956 (mp->m_sb.sb_agcount * rotorstep);
2958 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2959 (mp->m_sb.sb_agcount * rotorstep);
2966 if (args->agbno == NULLAGBLOCK)
2967 args->fsbno = NULLFSBLOCK;
2969 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2971 ASSERT(args->len >= args->minlen);
2972 ASSERT(args->len <= args->maxlen);
2973 ASSERT(args->agbno % args->alignment == 0);
2974 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2978 /* Zero the extent if we were asked to do so */
2979 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2980 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2986 xfs_perag_put(args->pag);
2989 xfs_perag_put(args->pag);
2993 /* Ensure that the freelist is at full capacity. */
2995 xfs_free_extent_fix_freelist(
2996 struct xfs_trans *tp,
2997 xfs_agnumber_t agno,
2998 struct xfs_buf **agbp)
3000 struct xfs_alloc_arg args;
3003 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3005 args.mp = tp->t_mountp;
3009 * validate that the block number is legal - the enables us to detect
3010 * and handle a silent filesystem corruption rather than crashing.
3012 if (args.agno >= args.mp->m_sb.sb_agcount)
3013 return -EFSCORRUPTED;
3015 args.pag = xfs_perag_get(args.mp, args.agno);
3018 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3024 xfs_perag_put(args.pag);
3030 * Just break up the extent address and hand off to xfs_free_ag_extent
3031 * after fixing up the freelist.
3035 struct xfs_trans *tp, /* transaction pointer */
3036 xfs_fsblock_t bno, /* starting block number of extent */
3037 xfs_extlen_t len, /* length of extent */
3038 struct xfs_owner_info *oinfo, /* extent owner */
3039 enum xfs_ag_resv_type type, /* block reservation type */
3042 struct xfs_mount *mp = tp->t_mountp;
3043 struct xfs_buf *agbp;
3044 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
3045 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
3047 unsigned int busy_flags = 0;
3050 ASSERT(type != XFS_AG_RESV_AGFL);
3052 if (XFS_TEST_ERROR(false, mp,
3053 XFS_ERRTAG_FREE_EXTENT))
3056 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3060 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
3062 /* validate the extent size is legal now we have the agf locked */
3063 XFS_WANT_CORRUPTED_GOTO(mp,
3064 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
3067 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3072 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3073 xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3077 xfs_trans_brelse(tp, agbp);
3081 struct xfs_alloc_query_range_info {
3082 xfs_alloc_query_range_fn fn;
3086 /* Format btree record and pass to our callback. */
3088 xfs_alloc_query_range_helper(
3089 struct xfs_btree_cur *cur,
3090 union xfs_btree_rec *rec,
3093 struct xfs_alloc_query_range_info *query = priv;
3094 struct xfs_alloc_rec_incore irec;
3096 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3097 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3098 return query->fn(cur, &irec, query->priv);
3101 /* Find all free space within a given range of blocks. */
3103 xfs_alloc_query_range(
3104 struct xfs_btree_cur *cur,
3105 struct xfs_alloc_rec_incore *low_rec,
3106 struct xfs_alloc_rec_incore *high_rec,
3107 xfs_alloc_query_range_fn fn,
3110 union xfs_btree_irec low_brec;
3111 union xfs_btree_irec high_brec;
3112 struct xfs_alloc_query_range_info query;
3114 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3115 low_brec.a = *low_rec;
3116 high_brec.a = *high_rec;
3119 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3120 xfs_alloc_query_range_helper, &query);
3123 /* Find all free space records. */
3125 xfs_alloc_query_all(
3126 struct xfs_btree_cur *cur,
3127 xfs_alloc_query_range_fn fn,
3130 struct xfs_alloc_query_range_info query;
3132 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3135 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3138 /* Find the size of the AG, in blocks. */
3141 struct xfs_mount *mp,
3142 xfs_agnumber_t agno)
3144 ASSERT(agno < mp->m_sb.sb_agcount);
3146 if (agno < mp->m_sb.sb_agcount - 1)
3147 return mp->m_sb.sb_agblocks;
3148 return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
3152 * Verify that an AG block number pointer neither points outside the AG
3153 * nor points at static metadata.
3157 struct xfs_mount *mp,
3158 xfs_agnumber_t agno,
3159 xfs_agblock_t agbno)
3163 eoag = xfs_ag_block_count(mp, agno);
3166 if (agbno <= XFS_AGFL_BLOCK(mp))
3172 * Verify that an FS block number pointer neither points outside the
3173 * filesystem nor points at static AG metadata.
3177 struct xfs_mount *mp,
3178 xfs_fsblock_t fsbno)
3180 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
3182 if (agno >= mp->m_sb.sb_agcount)
3184 return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
3187 /* Is there a record covering a given extent? */
3189 xfs_alloc_has_record(
3190 struct xfs_btree_cur *cur,
3195 union xfs_btree_irec low;
3196 union xfs_btree_irec high;
3198 memset(&low, 0, sizeof(low));
3199 low.a.ar_startblock = bno;
3200 memset(&high, 0xFF, sizeof(high));
3201 high.a.ar_startblock = bno + len - 1;
3203 return xfs_btree_has_record(cur, &low, &high, exists);
3207 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
3208 * error code or XFS_BTREE_QUERY_RANGE_ABORT.
3212 struct xfs_mount *mp,
3213 struct xfs_agf *agf,
3214 struct xfs_buf *agflbp,
3215 xfs_agfl_walk_fn walk_fn,
3222 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3223 i = be32_to_cpu(agf->agf_flfirst);
3225 /* Nothing to walk in an empty AGFL. */
3226 if (agf->agf_flcount == cpu_to_be32(0))
3229 /* Otherwise, walk from first to last, wrapping as needed. */
3231 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3234 if (i == be32_to_cpu(agf->agf_fllast))
3236 if (++i == xfs_agfl_size(mp))