xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
+ xfs_agnumber_t minimum_agno = 0;
mp = args->mp;
type = args->otype = args->type;
args->agbno = NULLAGBLOCK;
+ if (args->tp->t_firstblock != NULLFSBLOCK)
+ minimum_agno = XFS_FSB_TO_AGNO(mp, args->tp->t_firstblock);
/*
* Just fix this up, for the case where the last a.g. is shorter
* (or there's only one a.g.) and the caller couldn't easily figure
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
args->pag = xfs_perag_get(mp, args->agno);
+
+ if (minimum_agno > args->agno) {
+ trace_xfs_alloc_vextent_skip_deadlock(args);
+ error = 0;
+ break;
+ }
+
error = xfs_alloc_fix_freelist(args, 0);
if (error) {
trace_xfs_alloc_vextent_nofix(args);
case XFS_ALLOCTYPE_FIRST_AG:
/*
* Rotate through the allocation groups looking for a winner.
+ * If we are blocking, we must obey minimum_agno contraints for
+ * avoiding ABBA deadlocks on AGF locking.
*/
if (type == XFS_ALLOCTYPE_FIRST_AG) {
/*
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
args->type = XFS_ALLOCTYPE_THIS_AG;
- sagno = 0;
+ sagno = minimum_agno;
flags = 0;
} else {
/*
args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
flags = XFS_ALLOC_FLAG_TRYLOCK;
}
+
/*
* Loop over allocation groups twice; first time with
* trylock set, second time without.
if (args->agno == sagno &&
type == XFS_ALLOCTYPE_START_BNO)
args->type = XFS_ALLOCTYPE_THIS_AG;
+
/*
- * For the first allocation, we can try any AG to get
- * space. However, if we already have allocated a
- * block, we don't want to try AGs whose number is below
- * sagno. Otherwise, we may end up with out-of-order
- * locking of AGF, which might cause deadlock.
- */
+ * If we are try-locking, we can't deadlock on AGF
+ * locks, so we can wrap all the way back to the first
+ * AG. Otherwise, wrap back to the start AG so we can't
+ * deadlock, and let the end of scan handler decide what
+ * to do next.
+ */
if (++(args->agno) == mp->m_sb.sb_agcount) {
- if (args->tp->t_firstblock != NULLFSBLOCK)
- args->agno = sagno;
- else
+ if (flags & XFS_ALLOC_FLAG_TRYLOCK)
args->agno = 0;
+ else
+ args->agno = sagno;
}
+
/*
* Reached the starting a.g., must either be done
* or switch to non-trylock mode.
break;
}
+ /*
+ * Blocking pass next, so we must obey minimum
+ * agno constraints to avoid ABBA AGF deadlocks.
+ */
flags = 0;
+ if (minimum_agno > sagno)
+ sagno = minimum_agno;
+
if (type == XFS_ALLOCTYPE_START_BNO) {
args->agbno = XFS_FSB_TO_AGBNO(mp,
args->fsbno);
ASSERT(0);
/* NOTREACHED */
}
- if (args->agbno == NULLAGBLOCK)
+ if (args->agbno == NULLAGBLOCK) {
args->fsbno = NULLFSBLOCK;
- else {
+ } else {
args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
#ifdef DEBUG
ASSERT(args->len >= args->minlen);
#endif
}
+
+ /*
+ * We end up here with a locked AGF. If we failed, the caller is likely
+ * going to try to allocate again with different parameters, and that
+ * can widen the AGs that are searched for free space. If we have to do
+ * BMBT block allocation, we have to do a new allocation.
+ *
+ * Hence leaving this function with the AGF locked opens up potential
+ * ABBA AGF deadlocks because a future allocation attempt in this
+ * transaction may attempt to lock a lower number AGF.
+ *
+ * We can't release the AGF until the transaction is commited, so at
+ * this point we must update the "firstblock" tracker to point at this
+ * AG if the tracker is empty or points to a lower AG. This allows the
+ * next allocation attempt to be modified appropriately to avoid
+ * deadlocks.
+ */
+ if (args->agbp &&
+ (args->tp->t_firstblock == NULLFSBLOCK ||
+ args->pag->pag_agno > minimum_agno)) {
+ args->tp->t_firstblock = XFS_AGB_TO_FSB(mp,
+ args->pag->pag_agno, 0);
+ }
xfs_perag_put(args->pag);
return 0;
error0: