1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_shared.h"
13 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_trace.h"
19 #include "xfs_trans.h"
23 xfs_extent_busy_insert(
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
32 struct xfs_perag *pag;
34 struct rb_node *parent = NULL;
36 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
40 INIT_LIST_HEAD(&new->list);
43 /* trace before insert to be able to see failed inserts */
44 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
46 pag = xfs_perag_get(tp->t_mountp, new->agno);
47 spin_lock(&pag->pagb_lock);
48 rbp = &pag->pagb_tree.rb_node;
51 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
53 if (new->bno < busyp->bno) {
54 rbp = &(*rbp)->rb_left;
55 ASSERT(new->bno + new->length <= busyp->bno);
56 } else if (new->bno > busyp->bno) {
57 rbp = &(*rbp)->rb_right;
58 ASSERT(bno >= busyp->bno + busyp->length);
64 rb_link_node(&new->rb_node, parent, rbp);
65 rb_insert_color(&new->rb_node, &pag->pagb_tree);
67 list_add(&new->list, &tp->t_busy);
68 spin_unlock(&pag->pagb_lock);
73 * Search for a busy extent within the range of the extent we are about to
74 * allocate. You need to be holding the busy extent tree lock when calling
75 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
76 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
77 * match. This is done so that a non-zero return indicates an overlap that
78 * will require a synchronous transaction, but it can still be
79 * used to distinguish between a partial or exact match.
82 xfs_extent_busy_search(
88 struct xfs_perag *pag;
90 struct xfs_extent_busy *busyp;
93 pag = xfs_perag_get(mp, agno);
94 spin_lock(&pag->pagb_lock);
96 rbp = pag->pagb_tree.rb_node;
98 /* find closest start bno overlap */
100 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
101 if (bno < busyp->bno) {
102 /* may overlap, but exact start block is lower */
103 if (bno + len > busyp->bno)
106 } else if (bno > busyp->bno) {
107 /* may overlap, but exact start block is higher */
108 if (bno < busyp->bno + busyp->length)
112 /* bno matches busyp, length determines exact match */
113 match = (busyp->length == len) ? 1 : -1;
117 spin_unlock(&pag->pagb_lock);
123 * The found free extent [fbno, fend] overlaps part or all of the given busy
124 * extent. If the overlap covers the beginning, the end, or all of the busy
125 * extent, the overlapping portion can be made unbusy and used for the
126 * allocation. We can't split a busy extent because we can't modify a
127 * transaction/CIL context busy list, but we can update an entry's block
130 * Returns true if the extent can safely be reused, or false if the search
131 * needs to be restarted.
134 xfs_extent_busy_update_extent(
135 struct xfs_mount *mp,
136 struct xfs_perag *pag,
137 struct xfs_extent_busy *busyp,
140 bool userdata) __releases(&pag->pagb_lock)
141 __acquires(&pag->pagb_lock)
143 xfs_agblock_t fend = fbno + flen;
144 xfs_agblock_t bbno = busyp->bno;
145 xfs_agblock_t bend = bbno + busyp->length;
148 * This extent is currently being discarded. Give the thread
149 * performing the discard a chance to mark the extent unbusy
152 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
153 spin_unlock(&pag->pagb_lock);
155 spin_lock(&pag->pagb_lock);
160 * If there is a busy extent overlapping a user allocation, we have
161 * no choice but to force the log and retry the search.
163 * Fortunately this does not happen during normal operation, but
164 * only if the filesystem is very low on space and has to dip into
165 * the AGFL for normal allocations.
170 if (bbno < fbno && bend > fend) {
174 * +BBBBBBBBBBBBBBBBB+
180 * We would have to split the busy extent to be able to track
181 * it correct, which we cannot do because we would have to
182 * modify the list of busy extents attached to the transaction
183 * or CIL context, which is immutable.
185 * Force out the log to clear the busy extent and retry the
189 } else if (bbno >= fbno && bend <= fend) {
193 * +BBBBBBBBBBBBBBBBB+
194 * +-----------------+
199 * +BBBBBBBBBBBBBBBBB+
200 * +--------------------------+
205 * +BBBBBBBBBBBBBBBBB+
206 * +--------------------------+
211 * +BBBBBBBBBBBBBBBBB+
212 * +-----------------------------------+
218 * The busy extent is fully covered by the extent we are
219 * allocating, and can simply be removed from the rbtree.
220 * However we cannot remove it from the immutable list
221 * tracking busy extents in the transaction or CIL context,
222 * so set the length to zero to mark it invalid.
224 * We also need to restart the busy extent search from the
225 * tree root, because erasing the node can rearrange the
228 rb_erase(&busyp->rb_node, &pag->pagb_tree);
231 } else if (fend < bend) {
235 * +BBBBBBBBBBBBBBBBB+
241 * +BBBBBBBBBBBBBBBBB+
242 * +------------------+
247 } else if (bbno < fbno) {
251 * +BBBBBBBBBBBBBBBBB+
257 * +BBBBBBBBBBBBBBBBB+
258 * +----------------------+
261 busyp->length = fbno - busyp->bno;
266 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
270 spin_unlock(&pag->pagb_lock);
271 xfs_log_force(mp, XFS_LOG_SYNC);
272 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
273 spin_lock(&pag->pagb_lock);
279 * For a given extent [fbno, flen], make sure we can reuse it safely.
282 xfs_extent_busy_reuse(
283 struct xfs_mount *mp,
289 struct xfs_perag *pag;
294 pag = xfs_perag_get(mp, agno);
295 spin_lock(&pag->pagb_lock);
297 rbp = pag->pagb_tree.rb_node;
299 struct xfs_extent_busy *busyp =
300 rb_entry(rbp, struct xfs_extent_busy, rb_node);
301 xfs_agblock_t bbno = busyp->bno;
302 xfs_agblock_t bend = bbno + busyp->length;
304 if (fbno + flen <= bbno) {
307 } else if (fbno >= bend) {
312 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
316 spin_unlock(&pag->pagb_lock);
321 * For a given extent [fbno, flen], search the busy extent list to find a
322 * subset of the extent that is not busy. If *rlen is smaller than
323 * args->minlen no suitable extent could be found, and the higher level
324 * code needs to force out the log and retry the allocation.
326 * Return the current busy generation for the AG if the extent is busy. This
327 * value can be used to wait for at least one of the currently busy extents
328 * to be cleared. Note that the busy list is not guaranteed to be empty after
329 * the gen is woken. The state of a specific extent must always be confirmed
330 * with another call to xfs_extent_busy_trim() before it can be used.
333 xfs_extent_busy_trim(
334 struct xfs_alloc_arg *args,
346 spin_lock(&args->pag->pagb_lock);
350 rbp = args->pag->pagb_tree.rb_node;
351 while (rbp && flen >= args->minlen) {
352 struct xfs_extent_busy *busyp =
353 rb_entry(rbp, struct xfs_extent_busy, rb_node);
354 xfs_agblock_t fend = fbno + flen;
355 xfs_agblock_t bbno = busyp->bno;
356 xfs_agblock_t bend = bbno + busyp->length;
361 } else if (fbno >= bend) {
367 * If this is a metadata allocation, try to reuse the busy
368 * extent instead of trimming the allocation.
370 if (!xfs_alloc_is_userdata(args->datatype) &&
371 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
372 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
385 * +BBBBBBBBBBBBBBBBB+
391 * +BBBBBBBBBBBBBBBBB+
397 * +BBBBBBBBBBBBBBBBB+
403 * +BBBBBBBBBBBBBBBBB+
404 * +-----------------+
407 * No unbusy region in extent, return failure.
415 * +BBBBBBBBBBBBBBBBB+
416 * +----------------------+
421 * +BBBBBBBBBBBBBBBBB+
422 * +--------------------------+
425 * Needs to be trimmed to:
430 } else if (bend >= fend) {
436 * +BBBBBBBBBBBBBBBBB+
437 * +------------------+
442 * +BBBBBBBBBBBBBBBBB+
443 * +--------------------------+
446 * Needs to be trimmed to:
457 * +BBBBBBBBBBBBBBBBB+
458 * +-----------------------------------+
462 * +-------+ OR +-------+
463 * fbno fend fbno fend
465 * Backward allocation leads to significant
466 * fragmentation of directories, which degrades
467 * directory performance, therefore we always want to
468 * choose the option that produces forward allocation
470 * Preferring the lower bno extent will make the next
471 * request use "fend" as the start of the next
472 * allocation; if the segment is no longer busy at
473 * that point, we'll get a contiguous allocation, but
474 * even if it is still busy, we will get a forward
476 * We try to avoid choosing the segment at "bend",
477 * because that can lead to the next allocation
478 * taking the segment at "fbno", which would be a
479 * backward allocation. We only use the segment at
480 * "fbno" if it is much larger than the current
481 * requested size, because in that case there's a
482 * good chance subsequent allocations will be
485 if (bbno - fbno >= args->maxlen) {
486 /* left candidate fits perfect */
488 } else if (fend - bend >= args->maxlen * 4) {
489 /* right candidate has enough free space */
491 } else if (bbno - fbno >= args->minlen) {
492 /* left candidate fits minimum requirement */
503 if (fbno != *bno || flen != *len) {
504 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
508 *busy_gen = args->pag->pagb_gen;
511 spin_unlock(&args->pag->pagb_lock);
515 * Return a zero extent length as failure indications. All callers
516 * re-check if the trimmed extent satisfies the minlen requirement.
523 xfs_extent_busy_clear_one(
524 struct xfs_mount *mp,
525 struct xfs_perag *pag,
526 struct xfs_extent_busy *busyp)
529 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
531 rb_erase(&busyp->rb_node, &pag->pagb_tree);
534 list_del_init(&busyp->list);
539 xfs_extent_busy_put_pag(
540 struct xfs_perag *pag,
542 __releases(pag->pagb_lock)
546 wake_up_all(&pag->pagb_wait);
549 spin_unlock(&pag->pagb_lock);
554 * Remove all extents on the passed in list from the busy extents tree.
555 * If do_discard is set skip extents that need to be discarded, and mark
556 * these as undergoing a discard operation instead.
559 xfs_extent_busy_clear(
560 struct xfs_mount *mp,
561 struct list_head *list,
564 struct xfs_extent_busy *busyp, *n;
565 struct xfs_perag *pag = NULL;
566 xfs_agnumber_t agno = NULLAGNUMBER;
569 list_for_each_entry_safe(busyp, n, list, list) {
570 if (busyp->agno != agno) {
572 xfs_extent_busy_put_pag(pag, wakeup);
574 pag = xfs_perag_get(mp, agno);
575 spin_lock(&pag->pagb_lock);
579 if (do_discard && busyp->length &&
580 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
581 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
583 xfs_extent_busy_clear_one(mp, pag, busyp);
589 xfs_extent_busy_put_pag(pag, wakeup);
593 * Flush out all busy extents for this AG.
596 xfs_extent_busy_flush(
597 struct xfs_mount *mp,
598 struct xfs_perag *pag,
604 error = xfs_log_force(mp, XFS_LOG_SYNC);
609 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
610 if (busy_gen != READ_ONCE(pag->pagb_gen))
615 finish_wait(&pag->pagb_wait, &wait);
619 xfs_extent_busy_wait_all(
620 struct xfs_mount *mp)
625 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
626 struct xfs_perag *pag = xfs_perag_get(mp, agno);
629 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
630 if (RB_EMPTY_ROOT(&pag->pagb_tree))
634 finish_wait(&pag->pagb_wait, &wait);
641 * Callback for list_sort to sort busy extents by the AG they reside in.
644 xfs_extent_busy_ag_cmp(
646 struct list_head *l1,
647 struct list_head *l2)
649 struct xfs_extent_busy *b1 =
650 container_of(l1, struct xfs_extent_busy, list);
651 struct xfs_extent_busy *b2 =
652 container_of(l2, struct xfs_extent_busy, list);
655 diff = b1->agno - b2->agno;
657 diff = b1->bno - b2->bno;