2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_extent_busy.h"
34 #include "xfs_error.h"
35 #include "xfs_cksum.h"
36 #include "xfs_trace.h"
37 #include "xfs_trans.h"
38 #include "xfs_buf_item.h"
41 struct workqueue_struct *xfs_alloc_wq;
43 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
45 #define XFSA_FIXUP_BNO_OK 1
46 #define XFSA_FIXUP_CNT_OK 2
48 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
49 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
50 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
51 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
52 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
58 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
59 return XFS_RMAP_BLOCK(mp) + 1;
60 if (xfs_sb_version_hasfinobt(&mp->m_sb))
61 return XFS_FIBT_BLOCK(mp) + 1;
62 return XFS_IBT_BLOCK(mp) + 1;
66 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
67 * AGF buffer (PV 947395), we place constraints on the relationship among
68 * actual allocations for data blocks, freelist blocks, and potential file data
69 * bmap btree blocks. However, these restrictions may result in no actual space
70 * allocated for a delayed extent, for example, a data block in a certain AG is
71 * allocated but there is no additional block for the additional bmap btree
72 * block due to a split of the bmap btree of the file. The result of this may
73 * lead to an infinite loop when the file gets flushed to disk and all delayed
74 * extents need to be actually allocated. To get around this, we explicitly set
75 * aside a few blocks which will not be reserved in delayed allocation.
77 * When rmap is disabled, we need to reserve 4 fsbs _per AG_ for the freelist
78 * and 4 more to handle a potential split of the file's bmap btree.
80 * When rmap is enabled, we must also be able to handle two rmap btree inserts
81 * to record both the file data extent and a new bmbt block. The bmbt block
82 * might not be in the same AG as the file data extent. In the worst case
83 * the bmap btree splits multiple levels and all the new blocks come from
84 * different AGs, so set aside enough to handle rmap btree splits in all AGs.
92 blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
93 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
94 blocks += mp->m_sb.sb_agcount * mp->m_rmap_maxlevels;
99 * When deciding how much space to allocate out of an AG, we limit the
100 * allocation maximum size to the size the AG. However, we cannot use all the
101 * blocks in the AG - some are permanently used by metadata. These
102 * blocks are generally:
103 * - the AG superblock, AGF, AGI and AGFL
104 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
105 * the AGI free inode and rmap btree root blocks.
106 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
107 * - the rmapbt root block
109 * The AG headers are sector sized, so the amount of space they take up is
110 * dependent on filesystem geometry. The others are all single blocks.
113 xfs_alloc_ag_max_usable(
114 struct xfs_mount *mp)
118 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
119 blocks += XFS_ALLOC_AGFL_RESERVE;
120 blocks += 3; /* AGF, AGI btree root blocks */
121 if (xfs_sb_version_hasfinobt(&mp->m_sb))
122 blocks++; /* finobt root block */
123 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
124 blocks++; /* rmap root block */
126 return mp->m_sb.sb_agblocks - blocks;
130 * Lookup the record equal to [bno, len] in the btree given by cur.
132 STATIC int /* error */
134 struct xfs_btree_cur *cur, /* btree cursor */
135 xfs_agblock_t bno, /* starting block of extent */
136 xfs_extlen_t len, /* length of extent */
137 int *stat) /* success/failure */
139 cur->bc_rec.a.ar_startblock = bno;
140 cur->bc_rec.a.ar_blockcount = len;
141 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
145 * Lookup the first record greater than or equal to [bno, len]
146 * in the btree given by cur.
150 struct xfs_btree_cur *cur, /* btree cursor */
151 xfs_agblock_t bno, /* starting block of extent */
152 xfs_extlen_t len, /* length of extent */
153 int *stat) /* success/failure */
155 cur->bc_rec.a.ar_startblock = bno;
156 cur->bc_rec.a.ar_blockcount = len;
157 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
161 * Lookup the first record less than or equal to [bno, len]
162 * in the btree given by cur.
164 static int /* error */
166 struct xfs_btree_cur *cur, /* btree cursor */
167 xfs_agblock_t bno, /* starting block of extent */
168 xfs_extlen_t len, /* length of extent */
169 int *stat) /* success/failure */
171 cur->bc_rec.a.ar_startblock = bno;
172 cur->bc_rec.a.ar_blockcount = len;
173 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
177 * Update the record referred to by cur to the value given
179 * This either works (return 0) or gets an EFSCORRUPTED error.
181 STATIC int /* error */
183 struct xfs_btree_cur *cur, /* btree cursor */
184 xfs_agblock_t bno, /* starting block of extent */
185 xfs_extlen_t len) /* length of extent */
187 union xfs_btree_rec rec;
189 rec.alloc.ar_startblock = cpu_to_be32(bno);
190 rec.alloc.ar_blockcount = cpu_to_be32(len);
191 return xfs_btree_update(cur, &rec);
195 * Get the data from the pointed-to record.
199 struct xfs_btree_cur *cur, /* btree cursor */
200 xfs_agblock_t *bno, /* output: starting block of extent */
201 xfs_extlen_t *len, /* output: length of extent */
202 int *stat) /* output: success/failure */
204 union xfs_btree_rec *rec;
207 error = xfs_btree_get_rec(cur, &rec, stat);
208 if (!error && *stat == 1) {
209 *bno = be32_to_cpu(rec->alloc.ar_startblock);
210 *len = be32_to_cpu(rec->alloc.ar_blockcount);
216 * Compute aligned version of the found extent.
217 * Takes alignment and min length into account.
220 xfs_alloc_compute_aligned(
221 xfs_alloc_arg_t *args, /* allocation argument structure */
222 xfs_agblock_t foundbno, /* starting block in found extent */
223 xfs_extlen_t foundlen, /* length in found extent */
224 xfs_agblock_t *resbno, /* result block number */
225 xfs_extlen_t *reslen) /* result length */
231 /* Trim busy sections out of found extent */
232 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
235 * If we have a largish extent that happens to start before min_agbno,
236 * see if we can shift it into range...
238 if (bno < args->min_agbno && bno + len > args->min_agbno) {
239 diff = args->min_agbno - bno;
246 if (args->alignment > 1 && len >= args->minlen) {
247 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
249 diff = aligned_bno - bno;
251 *resbno = aligned_bno;
252 *reslen = diff >= len ? 0 : len - diff;
260 * Compute best start block and diff for "near" allocations.
261 * freelen >= wantlen already checked by caller.
263 STATIC xfs_extlen_t /* difference value (absolute) */
264 xfs_alloc_compute_diff(
265 xfs_agblock_t wantbno, /* target starting block */
266 xfs_extlen_t wantlen, /* target length */
267 xfs_extlen_t alignment, /* target alignment */
268 char userdata, /* are we allocating data? */
269 xfs_agblock_t freebno, /* freespace's starting block */
270 xfs_extlen_t freelen, /* freespace's length */
271 xfs_agblock_t *newbnop) /* result: best start block from free */
273 xfs_agblock_t freeend; /* end of freespace extent */
274 xfs_agblock_t newbno1; /* return block number */
275 xfs_agblock_t newbno2; /* other new block number */
276 xfs_extlen_t newlen1=0; /* length with newbno1 */
277 xfs_extlen_t newlen2=0; /* length with newbno2 */
278 xfs_agblock_t wantend; /* end of target extent */
280 ASSERT(freelen >= wantlen);
281 freeend = freebno + freelen;
282 wantend = wantbno + wantlen;
284 * We want to allocate from the start of a free extent if it is past
285 * the desired block or if we are allocating user data and the free
286 * extent is before desired block. The second case is there to allow
287 * for contiguous allocation from the remaining free space if the file
288 * grows in the short term.
290 if (freebno >= wantbno || (userdata && freeend < wantend)) {
291 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
292 newbno1 = NULLAGBLOCK;
293 } else if (freeend >= wantend && alignment > 1) {
294 newbno1 = roundup(wantbno, alignment);
295 newbno2 = newbno1 - alignment;
296 if (newbno1 >= freeend)
297 newbno1 = NULLAGBLOCK;
299 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
300 if (newbno2 < freebno)
301 newbno2 = NULLAGBLOCK;
303 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
304 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
305 if (newlen1 < newlen2 ||
306 (newlen1 == newlen2 &&
307 XFS_ABSDIFF(newbno1, wantbno) >
308 XFS_ABSDIFF(newbno2, wantbno)))
310 } else if (newbno2 != NULLAGBLOCK)
312 } else if (freeend >= wantend) {
314 } else if (alignment > 1) {
315 newbno1 = roundup(freeend - wantlen, alignment);
316 if (newbno1 > freeend - wantlen &&
317 newbno1 - alignment >= freebno)
318 newbno1 -= alignment;
319 else if (newbno1 >= freeend)
320 newbno1 = NULLAGBLOCK;
322 newbno1 = freeend - wantlen;
324 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
328 * Fix up the length, based on mod and prod.
329 * len should be k * prod + mod for some k.
330 * If len is too small it is returned unchanged.
331 * If len hits maxlen it is left alone.
335 xfs_alloc_arg_t *args) /* allocation argument structure */
340 ASSERT(args->mod < args->prod);
342 ASSERT(rlen >= args->minlen);
343 ASSERT(rlen <= args->maxlen);
344 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
345 (args->mod == 0 && rlen < args->prod))
347 k = rlen % args->prod;
351 rlen = rlen - (k - args->mod);
353 rlen = rlen - args->prod + (args->mod - k);
354 /* casts to (int) catch length underflows */
355 if ((int)rlen < (int)args->minlen)
357 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
358 ASSERT(rlen % args->prod == args->mod);
363 * Fix up length if there is too little space left in the a.g.
364 * Return 1 if ok, 0 if too little, should give up.
367 xfs_alloc_fix_minleft(
368 xfs_alloc_arg_t *args) /* allocation argument structure */
370 xfs_agf_t *agf; /* a.g. freelist header */
371 int diff; /* free space difference */
373 if (args->minleft == 0)
375 agf = XFS_BUF_TO_AGF(args->agbp);
376 diff = be32_to_cpu(agf->agf_freeblks)
377 - args->len - args->minleft;
380 args->len += diff; /* shrink the allocated space */
381 /* casts to (int) catch length underflows */
382 if ((int)args->len >= (int)args->minlen)
384 args->agbno = NULLAGBLOCK;
389 * Update the two btrees, logically removing from freespace the extent
390 * starting at rbno, rlen blocks. The extent is contained within the
391 * actual (current) free extent fbno for flen blocks.
392 * Flags are passed in indicating whether the cursors are set to the
395 STATIC int /* error code */
396 xfs_alloc_fixup_trees(
397 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
398 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
399 xfs_agblock_t fbno, /* starting block of free extent */
400 xfs_extlen_t flen, /* length of free extent */
401 xfs_agblock_t rbno, /* starting block of returned extent */
402 xfs_extlen_t rlen, /* length of returned extent */
403 int flags) /* flags, XFSA_FIXUP_... */
405 int error; /* error code */
406 int i; /* operation results */
407 xfs_agblock_t nfbno1; /* first new free startblock */
408 xfs_agblock_t nfbno2; /* second new free startblock */
409 xfs_extlen_t nflen1=0; /* first new free length */
410 xfs_extlen_t nflen2=0; /* second new free length */
411 struct xfs_mount *mp;
416 * Look up the record in the by-size tree if necessary.
418 if (flags & XFSA_FIXUP_CNT_OK) {
420 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
422 XFS_WANT_CORRUPTED_RETURN(mp,
423 i == 1 && nfbno1 == fbno && nflen1 == flen);
426 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
428 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
431 * Look up the record in the by-block tree if necessary.
433 if (flags & XFSA_FIXUP_BNO_OK) {
435 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
437 XFS_WANT_CORRUPTED_RETURN(mp,
438 i == 1 && nfbno1 == fbno && nflen1 == flen);
441 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
443 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
447 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
448 struct xfs_btree_block *bnoblock;
449 struct xfs_btree_block *cntblock;
451 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
452 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
454 XFS_WANT_CORRUPTED_RETURN(mp,
455 bnoblock->bb_numrecs == cntblock->bb_numrecs);
460 * Deal with all four cases: the allocated record is contained
461 * within the freespace record, so we can have new freespace
462 * at either (or both) end, or no freespace remaining.
464 if (rbno == fbno && rlen == flen)
465 nfbno1 = nfbno2 = NULLAGBLOCK;
466 else if (rbno == fbno) {
467 nfbno1 = rbno + rlen;
468 nflen1 = flen - rlen;
469 nfbno2 = NULLAGBLOCK;
470 } else if (rbno + rlen == fbno + flen) {
472 nflen1 = flen - rlen;
473 nfbno2 = NULLAGBLOCK;
476 nflen1 = rbno - fbno;
477 nfbno2 = rbno + rlen;
478 nflen2 = (fbno + flen) - nfbno2;
481 * Delete the entry from the by-size btree.
483 if ((error = xfs_btree_delete(cnt_cur, &i)))
485 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
487 * Add new by-size btree entry(s).
489 if (nfbno1 != NULLAGBLOCK) {
490 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
492 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
493 if ((error = xfs_btree_insert(cnt_cur, &i)))
495 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
497 if (nfbno2 != NULLAGBLOCK) {
498 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
500 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
501 if ((error = xfs_btree_insert(cnt_cur, &i)))
503 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
506 * Fix up the by-block btree entry(s).
508 if (nfbno1 == NULLAGBLOCK) {
510 * No remaining freespace, just delete the by-block tree entry.
512 if ((error = xfs_btree_delete(bno_cur, &i)))
514 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
517 * Update the by-block entry to start later|be shorter.
519 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
522 if (nfbno2 != NULLAGBLOCK) {
524 * 2 resulting free entries, need to add one.
526 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
528 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
529 if ((error = xfs_btree_insert(bno_cur, &i)))
531 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
540 struct xfs_mount *mp = bp->b_target->bt_mount;
541 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
544 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
546 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
549 * during growfs operations, the perag is not fully initialised,
550 * so we can't use it for any useful checking. growfs ensures we can't
551 * use it by using uncached buffers that don't have the perag attached
552 * so we can detect and avoid this problem.
554 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
557 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
558 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
559 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
563 return xfs_log_check_lsn(mp,
564 be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn));
568 xfs_agfl_read_verify(
571 struct xfs_mount *mp = bp->b_target->bt_mount;
574 * There is no verification of non-crc AGFLs because mkfs does not
575 * initialise the AGFL to zero or NULL. Hence the only valid part of the
576 * AGFL is what the AGF says is active. We can't get to the AGF, so we
577 * can't verify just those entries are valid.
579 if (!xfs_sb_version_hascrc(&mp->m_sb))
582 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
583 xfs_buf_ioerror(bp, -EFSBADCRC);
584 else if (!xfs_agfl_verify(bp))
585 xfs_buf_ioerror(bp, -EFSCORRUPTED);
588 xfs_verifier_error(bp);
592 xfs_agfl_write_verify(
595 struct xfs_mount *mp = bp->b_target->bt_mount;
596 struct xfs_buf_log_item *bip = bp->b_fspriv;
598 /* no verification of non-crc AGFLs */
599 if (!xfs_sb_version_hascrc(&mp->m_sb))
602 if (!xfs_agfl_verify(bp)) {
603 xfs_buf_ioerror(bp, -EFSCORRUPTED);
604 xfs_verifier_error(bp);
609 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
611 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
614 const struct xfs_buf_ops xfs_agfl_buf_ops = {
616 .verify_read = xfs_agfl_read_verify,
617 .verify_write = xfs_agfl_write_verify,
621 * Read in the allocation group free block array.
623 STATIC int /* error */
625 xfs_mount_t *mp, /* mount point structure */
626 xfs_trans_t *tp, /* transaction pointer */
627 xfs_agnumber_t agno, /* allocation group number */
628 xfs_buf_t **bpp) /* buffer for the ag free block array */
630 xfs_buf_t *bp; /* return value */
633 ASSERT(agno != NULLAGNUMBER);
634 error = xfs_trans_read_buf(
635 mp, tp, mp->m_ddev_targp,
636 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
637 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
640 xfs_buf_set_ref(bp, XFS_AGFL_REF);
646 xfs_alloc_update_counters(
647 struct xfs_trans *tp,
648 struct xfs_perag *pag,
649 struct xfs_buf *agbp,
652 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
654 pag->pagf_freeblks += len;
655 be32_add_cpu(&agf->agf_freeblks, len);
657 xfs_trans_agblocks_delta(tp, len);
658 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
659 be32_to_cpu(agf->agf_length)))
660 return -EFSCORRUPTED;
662 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
667 * Allocation group level functions.
671 * Allocate a variable extent in the allocation group agno.
672 * Type and bno are used to determine where in the allocation group the
674 * Extent's length (returned in *len) will be between minlen and maxlen,
675 * and of the form k * prod + mod unless there's nothing that large.
676 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
678 STATIC int /* error */
679 xfs_alloc_ag_vextent(
680 xfs_alloc_arg_t *args) /* argument structure for allocation */
684 ASSERT(args->minlen > 0);
685 ASSERT(args->maxlen > 0);
686 ASSERT(args->minlen <= args->maxlen);
687 ASSERT(args->mod < args->prod);
688 ASSERT(args->alignment > 0);
690 * Branch to correct routine based on the type.
693 switch (args->type) {
694 case XFS_ALLOCTYPE_THIS_AG:
695 error = xfs_alloc_ag_vextent_size(args);
697 case XFS_ALLOCTYPE_NEAR_BNO:
698 error = xfs_alloc_ag_vextent_near(args);
700 case XFS_ALLOCTYPE_THIS_BNO:
701 error = xfs_alloc_ag_vextent_exact(args);
708 if (error || args->agbno == NULLAGBLOCK)
711 ASSERT(args->len >= args->minlen);
712 ASSERT(args->len <= args->maxlen);
713 ASSERT(!args->wasfromfl || !args->isfl);
714 ASSERT(args->agbno % args->alignment == 0);
716 /* if not file data, insert new block into the reverse map btree */
717 if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
718 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
719 args->agbno, args->len, &args->oinfo);
724 if (!args->wasfromfl) {
725 error = xfs_alloc_update_counters(args->tp, args->pag,
727 -((long)(args->len)));
731 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
732 args->agbno, args->len));
736 xfs_trans_mod_sb(args->tp, args->wasdel ?
737 XFS_TRANS_SB_RES_FDBLOCKS :
738 XFS_TRANS_SB_FDBLOCKS,
739 -((long)(args->len)));
742 XFS_STATS_INC(args->mp, xs_allocx);
743 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
748 * Allocate a variable extent at exactly agno/bno.
749 * Extent's length (returned in *len) will be between minlen and maxlen,
750 * and of the form k * prod + mod unless there's nothing that large.
751 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
753 STATIC int /* error */
754 xfs_alloc_ag_vextent_exact(
755 xfs_alloc_arg_t *args) /* allocation argument structure */
757 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
758 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
760 xfs_agblock_t fbno; /* start block of found extent */
761 xfs_extlen_t flen; /* length of found extent */
762 xfs_agblock_t tbno; /* start block of trimmed extent */
763 xfs_extlen_t tlen; /* length of trimmed extent */
764 xfs_agblock_t tend; /* end block of trimmed extent */
765 int i; /* success/failure of operation */
767 ASSERT(args->alignment == 1);
770 * Allocate/initialize a cursor for the by-number freespace btree.
772 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
773 args->agno, XFS_BTNUM_BNO);
776 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
777 * Look for the closest free block <= bno, it must contain bno
778 * if any free block does.
780 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
787 * Grab the freespace record.
789 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
792 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
793 ASSERT(fbno <= args->agbno);
796 * Check for overlapping busy extents.
798 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
801 * Give up if the start of the extent is busy, or the freespace isn't
802 * long enough for the minimum request.
804 if (tbno > args->agbno)
806 if (tlen < args->minlen)
809 if (tend < args->agbno + args->minlen)
813 * End of extent will be smaller of the freespace end and the
814 * maximal requested end.
816 * Fix the length according to mod and prod if given.
818 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
820 xfs_alloc_fix_len(args);
821 if (!xfs_alloc_fix_minleft(args))
824 ASSERT(args->agbno + args->len <= tend);
827 * We are allocating agbno for args->len
828 * Allocate/initialize a cursor for the by-size btree.
830 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
831 args->agno, XFS_BTNUM_CNT);
832 ASSERT(args->agbno + args->len <=
833 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
834 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
835 args->len, XFSA_FIXUP_BNO_OK);
837 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
841 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
842 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
845 trace_xfs_alloc_exact_done(args);
849 /* Didn't find it, return null. */
850 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
851 args->agbno = NULLAGBLOCK;
852 trace_xfs_alloc_exact_notfound(args);
856 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
857 trace_xfs_alloc_exact_error(args);
862 * Search the btree in a given direction via the search cursor and compare
863 * the records found against the good extent we've already found.
866 xfs_alloc_find_best_extent(
867 struct xfs_alloc_arg *args, /* allocation argument structure */
868 struct xfs_btree_cur **gcur, /* good cursor */
869 struct xfs_btree_cur **scur, /* searching cursor */
870 xfs_agblock_t gdiff, /* difference for search comparison */
871 xfs_agblock_t *sbno, /* extent found by search */
872 xfs_extlen_t *slen, /* extent length */
873 xfs_agblock_t *sbnoa, /* aligned extent found by search */
874 xfs_extlen_t *slena, /* aligned extent length */
875 int dir) /* 0 = search right, 1 = search left */
882 /* The good extent is perfect, no need to search. */
887 * Look until we find a better one, run out of space or run off the end.
890 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
893 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
894 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
897 * The good extent is closer than this one.
900 if (*sbnoa > args->max_agbno)
902 if (*sbnoa >= args->agbno + gdiff)
905 if (*sbnoa < args->min_agbno)
907 if (*sbnoa <= args->agbno - gdiff)
912 * Same distance, compare length and pick the best.
914 if (*slena >= args->minlen) {
915 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
916 xfs_alloc_fix_len(args);
918 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
920 args->userdata, *sbnoa,
924 * Choose closer size and invalidate other cursor.
932 error = xfs_btree_increment(*scur, 0, &i);
934 error = xfs_btree_decrement(*scur, 0, &i);
940 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
945 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
950 /* caller invalidates cursors */
955 * Allocate a variable extent near bno in the allocation group agno.
956 * Extent's length (returned in len) will be between minlen and maxlen,
957 * and of the form k * prod + mod unless there's nothing that large.
958 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
960 STATIC int /* error */
961 xfs_alloc_ag_vextent_near(
962 xfs_alloc_arg_t *args) /* allocation argument structure */
964 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
965 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
966 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
967 xfs_agblock_t gtbno; /* start bno of right side entry */
968 xfs_agblock_t gtbnoa; /* aligned ... */
969 xfs_extlen_t gtdiff; /* difference to right side entry */
970 xfs_extlen_t gtlen; /* length of right side entry */
971 xfs_extlen_t gtlena; /* aligned ... */
972 xfs_agblock_t gtnew; /* useful start bno of right side */
973 int error; /* error code */
974 int i; /* result code, temporary */
975 int j; /* result code, temporary */
976 xfs_agblock_t ltbno; /* start bno of left side entry */
977 xfs_agblock_t ltbnoa; /* aligned ... */
978 xfs_extlen_t ltdiff; /* difference to left side entry */
979 xfs_extlen_t ltlen; /* length of left side entry */
980 xfs_extlen_t ltlena; /* aligned ... */
981 xfs_agblock_t ltnew; /* useful start bno of left side */
982 xfs_extlen_t rlen; /* length of returned extent */
986 * Randomly don't execute the first algorithm.
988 int dofirst; /* set to do first algorithm */
990 dofirst = prandom_u32() & 1;
993 /* handle unitialized agbno range so caller doesn't have to */
994 if (!args->min_agbno && !args->max_agbno)
995 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
996 ASSERT(args->min_agbno <= args->max_agbno);
998 /* clamp agbno to the range if it's outside */
999 if (args->agbno < args->min_agbno)
1000 args->agbno = args->min_agbno;
1001 if (args->agbno > args->max_agbno)
1002 args->agbno = args->max_agbno;
1012 * Get a cursor for the by-size btree.
1014 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1015 args->agno, XFS_BTNUM_CNT);
1018 * See if there are any free extents as big as maxlen.
1020 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1023 * If none, then pick up the last entry in the tree unless the
1027 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
1030 if (i == 0 || ltlen == 0) {
1031 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1032 trace_xfs_alloc_near_noentry(args);
1037 args->wasfromfl = 0;
1041 * If the requested extent is large wrt the freespaces available
1042 * in this a.g., then the cursor will be pointing to a btree entry
1043 * near the right edge of the tree. If it's in the last btree leaf
1044 * block, then we just examine all the entries in that block
1045 * that are big enough, and pick the best one.
1046 * This is written as a while loop so we can break out of it,
1047 * but we never loop back to the top.
1049 while (xfs_btree_islastblock(cnt_cur, 0)) {
1052 xfs_extlen_t blen=0;
1053 xfs_agblock_t bnew=0;
1060 * Start from the entry that lookup found, sequence through
1061 * all larger free blocks. If we're actually pointing at a
1062 * record smaller than maxlen, go to the start of this block,
1063 * and skip all those smaller than minlen.
1065 if (ltlen || args->alignment > 1) {
1066 cnt_cur->bc_ptrs[0] = 1;
1068 if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
1071 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1072 if (ltlen >= args->minlen)
1074 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1077 ASSERT(ltlen >= args->minlen);
1081 i = cnt_cur->bc_ptrs[0];
1082 for (j = 1, blen = 0, bdiff = 0;
1083 !error && j && (blen < args->maxlen || bdiff > 0);
1084 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1086 * For each entry, decide if it's better than
1087 * the previous best entry.
1089 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1091 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1092 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1094 if (ltlena < args->minlen)
1096 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1098 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1099 xfs_alloc_fix_len(args);
1100 ASSERT(args->len >= args->minlen);
1101 if (args->len < blen)
1103 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1104 args->alignment, args->userdata, ltbnoa,
1106 if (ltnew != NULLAGBLOCK &&
1107 (args->len > blen || ltdiff < bdiff)) {
1111 besti = cnt_cur->bc_ptrs[0];
1115 * It didn't work. We COULD be in a case where
1116 * there's a good record somewhere, so try again.
1121 * Point at the best entry, and retrieve it again.
1123 cnt_cur->bc_ptrs[0] = besti;
1124 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1126 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1127 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1129 if (!xfs_alloc_fix_minleft(args)) {
1130 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1131 trace_xfs_alloc_near_nominleft(args);
1136 * We are allocating starting at bnew for blen blocks.
1139 ASSERT(bnew >= ltbno);
1140 ASSERT(bnew + blen <= ltbno + ltlen);
1142 * Set up a cursor for the by-bno tree.
1144 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1145 args->agbp, args->agno, XFS_BTNUM_BNO);
1147 * Fix up the btree entries.
1149 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1150 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1152 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1153 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1155 trace_xfs_alloc_near_first(args);
1160 * Search in the by-bno tree to the left and to the right
1161 * simultaneously, until in each case we find a space big enough,
1162 * or run into the edge of the tree. When we run into the edge,
1163 * we deallocate that cursor.
1164 * If both searches succeed, we compare the two spaces and pick
1166 * With alignment, it's possible for both to fail; the upper
1167 * level algorithm that picks allocation groups for allocations
1168 * is not supposed to do this.
1171 * Allocate and initialize the cursor for the leftward search.
1173 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1174 args->agno, XFS_BTNUM_BNO);
1176 * Lookup <= bno to find the leftward search's starting point.
1178 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1182 * Didn't find anything; use this cursor for the rightward
1185 bno_cur_gt = bno_cur_lt;
1189 * Found something. Duplicate the cursor for the rightward search.
1191 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1194 * Increment the cursor, so we will point at the entry just right
1195 * of the leftward entry if any, or to the leftmost entry.
1197 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1201 * It failed, there are no rightward entries.
1203 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1207 * Loop going left with the leftward cursor, right with the
1208 * rightward cursor, until either both directions give up or
1209 * we find an entry at least as big as minlen.
1213 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
1215 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1216 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1218 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1220 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1222 if (!i || ltbnoa < args->min_agbno) {
1223 xfs_btree_del_cursor(bno_cur_lt,
1229 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
1231 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1232 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1234 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1236 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1238 if (!i || gtbnoa > args->max_agbno) {
1239 xfs_btree_del_cursor(bno_cur_gt,
1244 } while (bno_cur_lt || bno_cur_gt);
1247 * Got both cursors still active, need to find better entry.
1249 if (bno_cur_lt && bno_cur_gt) {
1250 if (ltlena >= args->minlen) {
1252 * Left side is good, look for a right side entry.
1254 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1255 xfs_alloc_fix_len(args);
1256 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1257 args->alignment, args->userdata, ltbnoa,
1260 error = xfs_alloc_find_best_extent(args,
1261 &bno_cur_lt, &bno_cur_gt,
1262 ltdiff, >bno, >len,
1264 0 /* search right */);
1266 ASSERT(gtlena >= args->minlen);
1269 * Right side is good, look for a left side entry.
1271 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1272 xfs_alloc_fix_len(args);
1273 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1274 args->alignment, args->userdata, gtbnoa,
1277 error = xfs_alloc_find_best_extent(args,
1278 &bno_cur_gt, &bno_cur_lt,
1279 gtdiff, <bno, <len,
1281 1 /* search left */);
1289 * If we couldn't get anything, give up.
1291 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1292 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1295 trace_xfs_alloc_near_busy(args);
1296 xfs_log_force(args->mp, XFS_LOG_SYNC);
1299 trace_xfs_alloc_size_neither(args);
1300 args->agbno = NULLAGBLOCK;
1305 * At this point we have selected a freespace entry, either to the
1306 * left or to the right. If it's on the right, copy all the
1307 * useful variables to the "left" set so we only have one
1308 * copy of this code.
1311 bno_cur_lt = bno_cur_gt;
1322 * Fix up the length and compute the useful address.
1324 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1325 xfs_alloc_fix_len(args);
1326 if (!xfs_alloc_fix_minleft(args)) {
1327 trace_xfs_alloc_near_nominleft(args);
1328 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1329 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1333 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1334 args->userdata, ltbnoa, ltlena, <new);
1335 ASSERT(ltnew >= ltbno);
1336 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1337 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1338 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1339 args->agbno = ltnew;
1341 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1342 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1346 trace_xfs_alloc_near_greater(args);
1348 trace_xfs_alloc_near_lesser(args);
1350 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1351 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1355 trace_xfs_alloc_near_error(args);
1356 if (cnt_cur != NULL)
1357 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1358 if (bno_cur_lt != NULL)
1359 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1360 if (bno_cur_gt != NULL)
1361 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1366 * Allocate a variable extent anywhere in the allocation group agno.
1367 * Extent's length (returned in len) will be between minlen and maxlen,
1368 * and of the form k * prod + mod unless there's nothing that large.
1369 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1371 STATIC int /* error */
1372 xfs_alloc_ag_vextent_size(
1373 xfs_alloc_arg_t *args) /* allocation argument structure */
1375 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1376 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1377 int error; /* error result */
1378 xfs_agblock_t fbno; /* start of found freespace */
1379 xfs_extlen_t flen; /* length of found freespace */
1380 int i; /* temp status variable */
1381 xfs_agblock_t rbno; /* returned block number */
1382 xfs_extlen_t rlen; /* length of returned extent */
1387 * Allocate and initialize a cursor for the by-size btree.
1389 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1390 args->agno, XFS_BTNUM_CNT);
1394 * Look for an entry >= maxlen+alignment-1 blocks.
1396 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1397 args->maxlen + args->alignment - 1, &i)))
1401 * If none or we have busy extents that we cannot allocate from, then
1402 * we have to settle for a smaller extent. In the case that there are
1403 * no large extents, this will return the last entry in the tree unless
1404 * the tree is empty. In the case that there are only busy large
1405 * extents, this will return the largest small extent unless there
1406 * are no smaller extents available.
1408 if (!i || forced > 1) {
1409 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1413 if (i == 0 || flen == 0) {
1414 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1415 trace_xfs_alloc_size_noentry(args);
1419 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1422 * Search for a non-busy extent that is large enough.
1423 * If we are at low space, don't check, or if we fall of
1424 * the end of the btree, turn off the busy check and
1428 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1431 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1433 xfs_alloc_compute_aligned(args, fbno, flen,
1436 if (rlen >= args->maxlen)
1439 error = xfs_btree_increment(cnt_cur, 0, &i);
1444 * Our only valid extents must have been busy.
1445 * Make it unbusy by forcing the log out and
1446 * retrying. If we've been here before, forcing
1447 * the log isn't making the extents available,
1448 * which means they have probably been freed in
1449 * this transaction. In that case, we have to
1450 * give up on them and we'll attempt a minlen
1451 * allocation the next time around.
1453 xfs_btree_del_cursor(cnt_cur,
1455 trace_xfs_alloc_size_busy(args);
1457 xfs_log_force(args->mp, XFS_LOG_SYNC);
1464 * In the first case above, we got the last entry in the
1465 * by-size btree. Now we check to see if the space hits maxlen
1466 * once aligned; if not, we search left for something better.
1467 * This can't happen in the second case above.
1469 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1470 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1471 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1472 if (rlen < args->maxlen) {
1473 xfs_agblock_t bestfbno;
1474 xfs_extlen_t bestflen;
1475 xfs_agblock_t bestrbno;
1476 xfs_extlen_t bestrlen;
1483 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1487 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1490 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1491 if (flen < bestrlen)
1493 xfs_alloc_compute_aligned(args, fbno, flen,
1495 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1496 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1497 (rlen <= flen && rbno + rlen <= fbno + flen),
1499 if (rlen > bestrlen) {
1504 if (rlen == args->maxlen)
1508 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1511 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1517 args->wasfromfl = 0;
1519 * Fix up the length.
1522 if (rlen < args->minlen) {
1524 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1525 trace_xfs_alloc_size_busy(args);
1526 xfs_log_force(args->mp, XFS_LOG_SYNC);
1531 xfs_alloc_fix_len(args);
1533 if (!xfs_alloc_fix_minleft(args))
1536 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1538 * Allocate and initialize a cursor for the by-block tree.
1540 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1541 args->agno, XFS_BTNUM_BNO);
1542 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1543 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1545 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1546 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1547 cnt_cur = bno_cur = NULL;
1550 XFS_WANT_CORRUPTED_GOTO(args->mp,
1551 args->agbno + args->len <=
1552 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1554 trace_xfs_alloc_size_done(args);
1558 trace_xfs_alloc_size_error(args);
1560 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1562 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1566 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1567 trace_xfs_alloc_size_nominleft(args);
1568 args->agbno = NULLAGBLOCK;
1573 * Deal with the case where only small freespaces remain.
1574 * Either return the contents of the last freespace record,
1575 * or allocate space from the freelist if there is nothing in the tree.
1577 STATIC int /* error */
1578 xfs_alloc_ag_vextent_small(
1579 xfs_alloc_arg_t *args, /* allocation argument structure */
1580 xfs_btree_cur_t *ccur, /* by-size cursor */
1581 xfs_agblock_t *fbnop, /* result block number */
1582 xfs_extlen_t *flenp, /* result length */
1583 int *stat) /* status: 0-freelist, 1-normal/none */
1590 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1593 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1595 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1598 * Nothing in the btree, try the freelist. Make sure
1599 * to respect minleft even when pulling from the
1602 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1603 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1605 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1608 if (fbno != NULLAGBLOCK) {
1609 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1612 if (args->userdata) {
1615 bp = xfs_btree_get_bufs(args->mp, args->tp,
1616 args->agno, fbno, 0);
1617 xfs_trans_binval(args->tp, bp);
1621 XFS_WANT_CORRUPTED_GOTO(args->mp,
1622 args->agbno + args->len <=
1623 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1625 args->wasfromfl = 1;
1626 trace_xfs_alloc_small_freelist(args);
1631 * Nothing in the freelist.
1637 * Can't allocate from the freelist for some reason.
1644 * Can't do the allocation, give up.
1646 if (flen < args->minlen) {
1647 args->agbno = NULLAGBLOCK;
1648 trace_xfs_alloc_small_notenough(args);
1654 trace_xfs_alloc_small_done(args);
1658 trace_xfs_alloc_small_error(args);
1663 * Free the extent starting at agno/bno for length.
1669 xfs_agnumber_t agno,
1672 struct xfs_owner_info *oinfo,
1675 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1676 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1677 int error; /* error return value */
1678 xfs_agblock_t gtbno; /* start of right neighbor block */
1679 xfs_extlen_t gtlen; /* length of right neighbor block */
1680 int haveleft; /* have a left neighbor block */
1681 int haveright; /* have a right neighbor block */
1682 int i; /* temp, result code */
1683 xfs_agblock_t ltbno; /* start of left neighbor block */
1684 xfs_extlen_t ltlen; /* length of left neighbor block */
1685 xfs_mount_t *mp; /* mount point struct for filesystem */
1686 xfs_agblock_t nbno; /* new starting block of freespace */
1687 xfs_extlen_t nlen; /* new length of freespace */
1688 xfs_perag_t *pag; /* per allocation group data */
1690 bno_cur = cnt_cur = NULL;
1693 if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
1694 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1700 * Allocate and initialize a cursor for the by-block btree.
1702 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1704 * Look for a neighboring block on the left (lower block numbers)
1705 * that is contiguous with this space.
1707 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1711 * There is a block to our left.
1713 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1715 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1717 * It's not contiguous, though.
1719 if (ltbno + ltlen < bno)
1723 * If this failure happens the request to free this
1724 * space was invalid, it's (partly) already free.
1727 XFS_WANT_CORRUPTED_GOTO(mp,
1728 ltbno + ltlen <= bno, error0);
1732 * Look for a neighboring block on the right (higher block numbers)
1733 * that is contiguous with this space.
1735 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1739 * There is a block to our right.
1741 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1743 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1745 * It's not contiguous, though.
1747 if (bno + len < gtbno)
1751 * If this failure happens the request to free this
1752 * space was invalid, it's (partly) already free.
1755 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1759 * Now allocate and initialize a cursor for the by-size tree.
1761 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1763 * Have both left and right contiguous neighbors.
1764 * Merge all three into a single free block.
1766 if (haveleft && haveright) {
1768 * Delete the old by-size entry on the left.
1770 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1772 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1773 if ((error = xfs_btree_delete(cnt_cur, &i)))
1775 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1777 * Delete the old by-size entry on the right.
1779 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1781 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1782 if ((error = xfs_btree_delete(cnt_cur, &i)))
1784 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1786 * Delete the old by-block entry for the right block.
1788 if ((error = xfs_btree_delete(bno_cur, &i)))
1790 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1792 * Move the by-block cursor back to the left neighbor.
1794 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1796 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1799 * Check that this is the right record: delete didn't
1800 * mangle the cursor.
1803 xfs_agblock_t xxbno;
1806 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1809 XFS_WANT_CORRUPTED_GOTO(mp,
1810 i == 1 && xxbno == ltbno && xxlen == ltlen,
1815 * Update remaining by-block entry to the new, joined block.
1818 nlen = len + ltlen + gtlen;
1819 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1823 * Have only a left contiguous neighbor.
1824 * Merge it together with the new freespace.
1826 else if (haveleft) {
1828 * Delete the old by-size entry on the left.
1830 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1832 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1833 if ((error = xfs_btree_delete(cnt_cur, &i)))
1835 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1837 * Back up the by-block cursor to the left neighbor, and
1838 * update its length.
1840 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1842 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1845 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1849 * Have only a right contiguous neighbor.
1850 * Merge it together with the new freespace.
1852 else if (haveright) {
1854 * Delete the old by-size entry on the right.
1856 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1858 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1859 if ((error = xfs_btree_delete(cnt_cur, &i)))
1861 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1863 * Update the starting block and length of the right
1864 * neighbor in the by-block tree.
1868 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1872 * No contiguous neighbors.
1873 * Insert the new freespace into the by-block tree.
1878 if ((error = xfs_btree_insert(bno_cur, &i)))
1880 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1882 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1885 * In all cases we need to insert the new freespace in the by-size tree.
1887 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1889 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1890 if ((error = xfs_btree_insert(cnt_cur, &i)))
1892 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1893 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1897 * Update the freespace totals in the ag and superblock.
1899 pag = xfs_perag_get(mp, agno);
1900 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1906 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1907 XFS_STATS_INC(mp, xs_freex);
1908 XFS_STATS_ADD(mp, xs_freeb, len);
1910 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1915 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1917 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1919 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1924 * Visible (exported) allocation/free functions.
1925 * Some of these are used just by xfs_alloc_btree.c and this file.
1929 * Compute and fill in value of m_ag_maxlevels.
1932 xfs_alloc_compute_maxlevels(
1933 xfs_mount_t *mp) /* file system mount structure */
1935 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
1936 (mp->m_sb.sb_agblocks + 1) / 2);
1940 * Find the length of the longest extent in an AG.
1943 xfs_alloc_longest_free_extent(
1944 struct xfs_mount *mp,
1945 struct xfs_perag *pag,
1948 xfs_extlen_t delta = 0;
1950 if (need > pag->pagf_flcount)
1951 delta = need - pag->pagf_flcount;
1953 if (pag->pagf_longest > delta)
1954 return pag->pagf_longest - delta;
1955 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1959 xfs_alloc_min_freelist(
1960 struct xfs_mount *mp,
1961 struct xfs_perag *pag)
1963 unsigned int min_free;
1965 /* space needed by-bno freespace btree */
1966 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
1967 mp->m_ag_maxlevels);
1968 /* space needed by-size freespace btree */
1969 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
1970 mp->m_ag_maxlevels);
1971 /* space needed reverse mapping used space btree */
1972 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1973 min_free += min_t(unsigned int,
1974 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
1975 mp->m_rmap_maxlevels);
1981 * Check if the operation we are fixing up the freelist for should go ahead or
1982 * not. If we are freeing blocks, we always allow it, otherwise the allocation
1983 * is dependent on whether the size and shape of free space available will
1984 * permit the requested allocation to take place.
1987 xfs_alloc_space_available(
1988 struct xfs_alloc_arg *args,
1989 xfs_extlen_t min_free,
1992 struct xfs_perag *pag = args->pag;
1993 xfs_extlen_t longest;
1996 if (flags & XFS_ALLOC_FLAG_FREEING)
1999 /* do we have enough contiguous free space for the allocation? */
2000 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free);
2001 if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
2004 /* do have enough free space remaining for the allocation? */
2005 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2006 min_free - args->total);
2007 if (available < (int)args->minleft)
2014 * Decide whether to use this allocation group for this allocation.
2015 * If so, fix up the btree freelist's size.
2018 xfs_alloc_fix_freelist(
2019 struct xfs_alloc_arg *args, /* allocation argument structure */
2020 int flags) /* XFS_ALLOC_FLAG_... */
2022 struct xfs_mount *mp = args->mp;
2023 struct xfs_perag *pag = args->pag;
2024 struct xfs_trans *tp = args->tp;
2025 struct xfs_buf *agbp = NULL;
2026 struct xfs_buf *agflbp = NULL;
2027 struct xfs_alloc_arg targs; /* local allocation arguments */
2028 xfs_agblock_t bno; /* freelist block */
2029 xfs_extlen_t need; /* total blocks needed in freelist */
2032 if (!pag->pagf_init) {
2033 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2036 if (!pag->pagf_init) {
2037 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2038 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2039 goto out_agbp_relse;
2044 * If this is a metadata preferred pag and we are user data then try
2045 * somewhere else if we are not being asked to try harder at this
2048 if (pag->pagf_metadata && args->userdata &&
2049 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2050 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2051 goto out_agbp_relse;
2054 need = xfs_alloc_min_freelist(mp, pag);
2055 if (!xfs_alloc_space_available(args, need, flags))
2056 goto out_agbp_relse;
2059 * Get the a.g. freespace buffer.
2060 * Can fail if we're not blocking on locks, and it's held.
2063 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2067 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2068 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2073 /* If there isn't enough total space or single-extent, reject it. */
2074 need = xfs_alloc_min_freelist(mp, pag);
2075 if (!xfs_alloc_space_available(args, need, flags))
2076 goto out_agbp_relse;
2079 * Make the freelist shorter if it's too long.
2081 * Note that from this point onwards, we will always release the agf and
2082 * agfl buffers on error. This handles the case where we error out and
2083 * the buffers are clean or may not have been joined to the transaction
2084 * and hence need to be released manually. If they have been joined to
2085 * the transaction, then xfs_trans_brelse() will handle them
2086 * appropriately based on the recursion count and dirty state of the
2089 * XXX (dgc): When we have lots of free space, does this buy us
2090 * anything other than extra overhead when we need to put more blocks
2091 * back on the free list? Maybe we should only do this when space is
2092 * getting low or the AGFL is more than half full?
2094 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2095 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2096 * updating the rmapbt. Both flags are used in xfs_repair while we're
2097 * rebuilding the rmapbt, and neither are used by the kernel. They're
2098 * both required to ensure that rmaps are correctly recorded for the
2099 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2100 * repair/rmap.c in xfsprogs for details.
2102 memset(&targs, 0, sizeof(targs));
2103 if (flags & XFS_ALLOC_FLAG_NORMAP)
2104 xfs_rmap_skip_owner_update(&targs.oinfo);
2106 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2107 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2110 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2112 goto out_agbp_relse;
2113 error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
2116 goto out_agbp_relse;
2117 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2118 xfs_trans_binval(tp, bp);
2124 targs.agno = args->agno;
2125 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
2126 targs.type = XFS_ALLOCTYPE_THIS_AG;
2128 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2130 goto out_agbp_relse;
2132 /* Make the freelist longer if it's too short. */
2133 while (pag->pagf_flcount < need) {
2135 targs.maxlen = need - pag->pagf_flcount;
2137 /* Allocate as many blocks as possible at once. */
2138 error = xfs_alloc_ag_vextent(&targs);
2140 goto out_agflbp_relse;
2143 * Stop if we run out. Won't happen if callers are obeying
2144 * the restrictions correctly. Can happen for free calls
2145 * on a completely full ag.
2147 if (targs.agbno == NULLAGBLOCK) {
2148 if (flags & XFS_ALLOC_FLAG_FREEING)
2150 goto out_agflbp_relse;
2153 * Put each allocated block on the list.
2155 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2156 error = xfs_alloc_put_freelist(tp, agbp,
2159 goto out_agflbp_relse;
2162 xfs_trans_brelse(tp, agflbp);
2167 xfs_trans_brelse(tp, agflbp);
2170 xfs_trans_brelse(tp, agbp);
2177 * Get a block from the freelist.
2178 * Returns with the buffer for the block gotten.
2181 xfs_alloc_get_freelist(
2182 xfs_trans_t *tp, /* transaction pointer */
2183 xfs_buf_t *agbp, /* buffer containing the agf structure */
2184 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2185 int btreeblk) /* destination is a AGF btree */
2187 xfs_agf_t *agf; /* a.g. freespace structure */
2188 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2189 xfs_agblock_t bno; /* block number returned */
2193 xfs_mount_t *mp = tp->t_mountp;
2194 xfs_perag_t *pag; /* per allocation group data */
2197 * Freelist is empty, give up.
2199 agf = XFS_BUF_TO_AGF(agbp);
2200 if (!agf->agf_flcount) {
2201 *bnop = NULLAGBLOCK;
2205 * Read the array of free blocks.
2207 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2214 * Get the block number and update the data structures.
2216 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2217 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2218 be32_add_cpu(&agf->agf_flfirst, 1);
2219 xfs_trans_brelse(tp, agflbp);
2220 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
2221 agf->agf_flfirst = 0;
2223 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2224 be32_add_cpu(&agf->agf_flcount, -1);
2225 xfs_trans_agflist_delta(tp, -1);
2226 pag->pagf_flcount--;
2229 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2231 be32_add_cpu(&agf->agf_btreeblks, 1);
2232 pag->pagf_btreeblks++;
2233 logflags |= XFS_AGF_BTREEBLKS;
2236 xfs_alloc_log_agf(tp, agbp, logflags);
2243 * Log the given fields from the agf structure.
2247 xfs_trans_t *tp, /* transaction pointer */
2248 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2249 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2251 int first; /* first byte offset */
2252 int last; /* last byte offset */
2253 static const short offsets[] = {
2254 offsetof(xfs_agf_t, agf_magicnum),
2255 offsetof(xfs_agf_t, agf_versionnum),
2256 offsetof(xfs_agf_t, agf_seqno),
2257 offsetof(xfs_agf_t, agf_length),
2258 offsetof(xfs_agf_t, agf_roots[0]),
2259 offsetof(xfs_agf_t, agf_levels[0]),
2260 offsetof(xfs_agf_t, agf_flfirst),
2261 offsetof(xfs_agf_t, agf_fllast),
2262 offsetof(xfs_agf_t, agf_flcount),
2263 offsetof(xfs_agf_t, agf_freeblks),
2264 offsetof(xfs_agf_t, agf_longest),
2265 offsetof(xfs_agf_t, agf_btreeblks),
2266 offsetof(xfs_agf_t, agf_uuid),
2270 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2272 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2274 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2275 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2279 * Interface for inode allocation to force the pag data to be initialized.
2282 xfs_alloc_pagf_init(
2283 xfs_mount_t *mp, /* file system mount structure */
2284 xfs_trans_t *tp, /* transaction pointer */
2285 xfs_agnumber_t agno, /* allocation group number */
2286 int flags) /* XFS_ALLOC_FLAGS_... */
2291 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2294 xfs_trans_brelse(tp, bp);
2299 * Put the block on the freelist for the allocation group.
2302 xfs_alloc_put_freelist(
2303 xfs_trans_t *tp, /* transaction pointer */
2304 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2305 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2306 xfs_agblock_t bno, /* block being freed */
2307 int btreeblk) /* block came from a AGF btree */
2309 xfs_agf_t *agf; /* a.g. freespace structure */
2310 __be32 *blockp;/* pointer to array entry */
2313 xfs_mount_t *mp; /* mount structure */
2314 xfs_perag_t *pag; /* per allocation group data */
2318 agf = XFS_BUF_TO_AGF(agbp);
2321 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2322 be32_to_cpu(agf->agf_seqno), &agflbp)))
2324 be32_add_cpu(&agf->agf_fllast, 1);
2325 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2326 agf->agf_fllast = 0;
2328 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2329 be32_add_cpu(&agf->agf_flcount, 1);
2330 xfs_trans_agflist_delta(tp, 1);
2331 pag->pagf_flcount++;
2333 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2335 be32_add_cpu(&agf->agf_btreeblks, -1);
2336 pag->pagf_btreeblks--;
2337 logflags |= XFS_AGF_BTREEBLKS;
2341 xfs_alloc_log_agf(tp, agbp, logflags);
2343 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2345 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2346 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2347 *blockp = cpu_to_be32(bno);
2348 startoff = (char *)blockp - (char *)agflbp->b_addr;
2350 xfs_alloc_log_agf(tp, agbp, logflags);
2352 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2353 xfs_trans_log_buf(tp, agflbp, startoff,
2354 startoff + sizeof(xfs_agblock_t) - 1);
2360 struct xfs_mount *mp,
2363 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
2365 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2366 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2368 if (!xfs_log_check_lsn(mp,
2369 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2373 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2374 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2375 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2376 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2377 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2378 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
2381 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2382 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2385 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2386 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)
2390 * during growfs operations, the perag is not fully initialised,
2391 * so we can't use it for any useful checking. growfs ensures we can't
2392 * use it by using uncached buffers that don't have the perag attached
2393 * so we can detect and avoid this problem.
2395 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2398 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2399 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2407 xfs_agf_read_verify(
2410 struct xfs_mount *mp = bp->b_target->bt_mount;
2412 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2413 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2414 xfs_buf_ioerror(bp, -EFSBADCRC);
2415 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
2416 XFS_ERRTAG_ALLOC_READ_AGF,
2417 XFS_RANDOM_ALLOC_READ_AGF))
2418 xfs_buf_ioerror(bp, -EFSCORRUPTED);
2421 xfs_verifier_error(bp);
2425 xfs_agf_write_verify(
2428 struct xfs_mount *mp = bp->b_target->bt_mount;
2429 struct xfs_buf_log_item *bip = bp->b_fspriv;
2431 if (!xfs_agf_verify(mp, bp)) {
2432 xfs_buf_ioerror(bp, -EFSCORRUPTED);
2433 xfs_verifier_error(bp);
2437 if (!xfs_sb_version_hascrc(&mp->m_sb))
2441 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2443 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2446 const struct xfs_buf_ops xfs_agf_buf_ops = {
2448 .verify_read = xfs_agf_read_verify,
2449 .verify_write = xfs_agf_write_verify,
2453 * Read in the allocation group header (free/alloc section).
2457 struct xfs_mount *mp, /* mount point structure */
2458 struct xfs_trans *tp, /* transaction pointer */
2459 xfs_agnumber_t agno, /* allocation group number */
2460 int flags, /* XFS_BUF_ */
2461 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2465 trace_xfs_read_agf(mp, agno);
2467 ASSERT(agno != NULLAGNUMBER);
2468 error = xfs_trans_read_buf(
2469 mp, tp, mp->m_ddev_targp,
2470 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2471 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2477 ASSERT(!(*bpp)->b_error);
2478 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2483 * Read in the allocation group header (free/alloc section).
2487 struct xfs_mount *mp, /* mount point structure */
2488 struct xfs_trans *tp, /* transaction pointer */
2489 xfs_agnumber_t agno, /* allocation group number */
2490 int flags, /* XFS_ALLOC_FLAG_... */
2491 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2493 struct xfs_agf *agf; /* ag freelist header */
2494 struct xfs_perag *pag; /* per allocation group data */
2497 trace_xfs_alloc_read_agf(mp, agno);
2499 ASSERT(agno != NULLAGNUMBER);
2500 error = xfs_read_agf(mp, tp, agno,
2501 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2507 ASSERT(!(*bpp)->b_error);
2509 agf = XFS_BUF_TO_AGF(*bpp);
2510 pag = xfs_perag_get(mp, agno);
2511 if (!pag->pagf_init) {
2512 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2513 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2514 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2515 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2516 pag->pagf_levels[XFS_BTNUM_BNOi] =
2517 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2518 pag->pagf_levels[XFS_BTNUM_CNTi] =
2519 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2520 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2521 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2522 spin_lock_init(&pag->pagb_lock);
2523 pag->pagb_count = 0;
2524 pag->pagb_tree = RB_ROOT;
2528 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2529 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2530 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2531 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2532 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2533 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2534 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2535 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2536 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2544 * Allocate an extent (variable-size).
2545 * Depending on the allocation type, we either look in a single allocation
2546 * group or loop over the allocation groups to find the result.
2550 xfs_alloc_arg_t *args) /* allocation argument structure */
2552 xfs_agblock_t agsize; /* allocation group size */
2554 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2555 xfs_extlen_t minleft;/* minimum left value, temp copy */
2556 xfs_mount_t *mp; /* mount structure pointer */
2557 xfs_agnumber_t sagno; /* starting allocation group number */
2558 xfs_alloctype_t type; /* input allocation type */
2561 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2564 type = args->otype = args->type;
2565 args->agbno = NULLAGBLOCK;
2567 * Just fix this up, for the case where the last a.g. is shorter
2568 * (or there's only one a.g.) and the caller couldn't easily figure
2569 * that out (xfs_bmap_alloc).
2571 agsize = mp->m_sb.sb_agblocks;
2572 if (args->maxlen > agsize)
2573 args->maxlen = agsize;
2574 if (args->alignment == 0)
2575 args->alignment = 1;
2576 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2577 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2578 ASSERT(args->minlen <= args->maxlen);
2579 ASSERT(args->minlen <= agsize);
2580 ASSERT(args->mod < args->prod);
2581 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2582 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2583 args->minlen > args->maxlen || args->minlen > agsize ||
2584 args->mod >= args->prod) {
2585 args->fsbno = NULLFSBLOCK;
2586 trace_xfs_alloc_vextent_badargs(args);
2589 minleft = args->minleft;
2592 case XFS_ALLOCTYPE_THIS_AG:
2593 case XFS_ALLOCTYPE_NEAR_BNO:
2594 case XFS_ALLOCTYPE_THIS_BNO:
2596 * These three force us into a single a.g.
2598 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2599 args->pag = xfs_perag_get(mp, args->agno);
2601 error = xfs_alloc_fix_freelist(args, 0);
2602 args->minleft = minleft;
2604 trace_xfs_alloc_vextent_nofix(args);
2608 trace_xfs_alloc_vextent_noagbp(args);
2611 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2612 if ((error = xfs_alloc_ag_vextent(args)))
2615 case XFS_ALLOCTYPE_START_BNO:
2617 * Try near allocation first, then anywhere-in-ag after
2618 * the first a.g. fails.
2620 if ((args->userdata & XFS_ALLOC_INITIAL_USER_DATA) &&
2621 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2622 args->fsbno = XFS_AGB_TO_FSB(mp,
2623 ((mp->m_agfrotor / rotorstep) %
2624 mp->m_sb.sb_agcount), 0);
2627 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2628 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2630 case XFS_ALLOCTYPE_ANY_AG:
2631 case XFS_ALLOCTYPE_START_AG:
2632 case XFS_ALLOCTYPE_FIRST_AG:
2634 * Rotate through the allocation groups looking for a winner.
2636 if (type == XFS_ALLOCTYPE_ANY_AG) {
2638 * Start with the last place we left off.
2640 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2641 mp->m_sb.sb_agcount;
2642 args->type = XFS_ALLOCTYPE_THIS_AG;
2643 flags = XFS_ALLOC_FLAG_TRYLOCK;
2644 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2646 * Start with allocation group given by bno.
2648 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2649 args->type = XFS_ALLOCTYPE_THIS_AG;
2653 if (type == XFS_ALLOCTYPE_START_AG)
2654 args->type = XFS_ALLOCTYPE_THIS_AG;
2656 * Start with the given allocation group.
2658 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2659 flags = XFS_ALLOC_FLAG_TRYLOCK;
2662 * Loop over allocation groups twice; first time with
2663 * trylock set, second time without.
2666 args->pag = xfs_perag_get(mp, args->agno);
2667 if (no_min) args->minleft = 0;
2668 error = xfs_alloc_fix_freelist(args, flags);
2669 args->minleft = minleft;
2671 trace_xfs_alloc_vextent_nofix(args);
2675 * If we get a buffer back then the allocation will fly.
2678 if ((error = xfs_alloc_ag_vextent(args)))
2683 trace_xfs_alloc_vextent_loopfailed(args);
2686 * Didn't work, figure out the next iteration.
2688 if (args->agno == sagno &&
2689 type == XFS_ALLOCTYPE_START_BNO)
2690 args->type = XFS_ALLOCTYPE_THIS_AG;
2692 * For the first allocation, we can try any AG to get
2693 * space. However, if we already have allocated a
2694 * block, we don't want to try AGs whose number is below
2695 * sagno. Otherwise, we may end up with out-of-order
2696 * locking of AGF, which might cause deadlock.
2698 if (++(args->agno) == mp->m_sb.sb_agcount) {
2699 if (args->firstblock != NULLFSBLOCK)
2705 * Reached the starting a.g., must either be done
2706 * or switch to non-trylock mode.
2708 if (args->agno == sagno) {
2710 args->agbno = NULLAGBLOCK;
2711 trace_xfs_alloc_vextent_allfailed(args);
2718 if (type == XFS_ALLOCTYPE_START_BNO) {
2719 args->agbno = XFS_FSB_TO_AGBNO(mp,
2721 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2725 xfs_perag_put(args->pag);
2727 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2728 if (args->agno == sagno)
2729 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2730 (mp->m_sb.sb_agcount * rotorstep);
2732 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2733 (mp->m_sb.sb_agcount * rotorstep);
2740 if (args->agbno == NULLAGBLOCK)
2741 args->fsbno = NULLFSBLOCK;
2743 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2745 ASSERT(args->len >= args->minlen);
2746 ASSERT(args->len <= args->maxlen);
2747 ASSERT(args->agbno % args->alignment == 0);
2748 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2752 /* Zero the extent if we were asked to do so */
2753 if (args->userdata & XFS_ALLOC_USERDATA_ZERO) {
2754 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2760 xfs_perag_put(args->pag);
2763 xfs_perag_put(args->pag);
2767 /* Ensure that the freelist is at full capacity. */
2769 xfs_free_extent_fix_freelist(
2770 struct xfs_trans *tp,
2771 xfs_agnumber_t agno,
2772 struct xfs_buf **agbp)
2774 struct xfs_alloc_arg args;
2777 memset(&args, 0, sizeof(struct xfs_alloc_arg));
2779 args.mp = tp->t_mountp;
2783 * validate that the block number is legal - the enables us to detect
2784 * and handle a silent filesystem corruption rather than crashing.
2786 if (args.agno >= args.mp->m_sb.sb_agcount)
2787 return -EFSCORRUPTED;
2789 args.pag = xfs_perag_get(args.mp, args.agno);
2792 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2798 xfs_perag_put(args.pag);
2804 * Just break up the extent address and hand off to xfs_free_ag_extent
2805 * after fixing up the freelist.
2809 struct xfs_trans *tp, /* transaction pointer */
2810 xfs_fsblock_t bno, /* starting block number of extent */
2811 xfs_extlen_t len, /* length of extent */
2812 struct xfs_owner_info *oinfo) /* extent owner */
2814 struct xfs_mount *mp = tp->t_mountp;
2815 struct xfs_buf *agbp;
2816 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
2817 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
2822 if (XFS_TEST_ERROR(false, mp,
2823 XFS_ERRTAG_FREE_EXTENT,
2824 XFS_RANDOM_FREE_EXTENT))
2827 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
2831 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
2833 /* validate the extent size is legal now we have the agf locked */
2834 XFS_WANT_CORRUPTED_GOTO(mp,
2835 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
2838 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, 0);
2842 xfs_extent_busy_insert(tp, agno, agbno, len, 0);
2846 xfs_trans_brelse(tp, agbp);