1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_extent_busy.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
17 #include "xfs_trans_priv.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_trace.h"
21 #include "xfs_error.h"
22 #include "xfs_defer.h"
23 #include "xfs_inode.h"
24 #include "xfs_dquot_item.h"
25 #include "xfs_dquot.h"
26 #include "xfs_icache.h"
28 struct kmem_cache *xfs_trans_cache;
30 #if defined(CONFIG_TRACEPOINTS)
32 xfs_trans_trace_reservations(
35 struct xfs_trans_res *res;
36 struct xfs_trans_res *end_res;
39 res = (struct xfs_trans_res *)M_RES(mp);
40 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
41 for (i = 0; res < end_res; i++, res++)
42 trace_xfs_trans_resv_calc(mp, i, res);
45 # define xfs_trans_trace_reservations(mp)
49 * Initialize the precomputed transaction reservation values
50 * in the mount structure.
56 xfs_trans_resv_calc(mp, M_RES(mp));
57 xfs_trans_trace_reservations(mp);
61 * Free the transaction structure. If there is more clean up
62 * to do when the structure is freed, add it here.
68 xfs_extent_busy_sort(&tp->t_busy);
69 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
71 trace_xfs_trans_free(tp, _RET_IP_);
72 xfs_trans_clear_context(tp);
73 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
74 sb_end_intwrite(tp->t_mountp->m_super);
75 xfs_trans_free_dqinfo(tp);
76 kmem_cache_free(xfs_trans_cache, tp);
80 * This is called to create a new transaction which will share the
81 * permanent log reservation of the given transaction. The remaining
82 * unused block and rt extent reservations are also inherited. This
83 * implies that the original transaction is no longer allowed to allocate
84 * blocks. Locks and log items, however, are no inherited. They must
85 * be added to the new transaction explicitly.
87 STATIC struct xfs_trans *
91 struct xfs_trans *ntp;
93 trace_xfs_trans_dup(tp, _RET_IP_);
95 ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
98 * Initialize the new transaction structure.
100 ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
101 ntp->t_mountp = tp->t_mountp;
102 INIT_LIST_HEAD(&ntp->t_items);
103 INIT_LIST_HEAD(&ntp->t_busy);
104 INIT_LIST_HEAD(&ntp->t_dfops);
105 ntp->t_firstblock = NULLFSBLOCK;
107 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
108 ASSERT(tp->t_ticket != NULL);
110 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
111 (tp->t_flags & XFS_TRANS_RESERVE) |
112 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
113 (tp->t_flags & XFS_TRANS_RES_FDBLKS);
114 /* We gave our writer reference to the new transaction */
115 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
116 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
118 ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
119 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
120 tp->t_blk_res = tp->t_blk_res_used;
122 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
123 tp->t_rtx_res = tp->t_rtx_res_used;
125 xfs_trans_switch_context(tp, ntp);
127 /* move deferred ops over to the new tp */
128 xfs_defer_move(ntp, tp);
130 xfs_trans_dup_dqinfo(tp, ntp);
135 * This is called to reserve free disk blocks and log space for the
136 * given transaction. This must be done before allocating any resources
137 * within the transaction.
139 * This will return ENOSPC if there are not enough blocks available.
140 * It will sleep waiting for available log space.
141 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
142 * is used by long running transactions. If any one of the reservations
143 * fails then they will all be backed out.
145 * This does not do quota reservations. That typically is done by the
150 struct xfs_trans *tp,
151 struct xfs_trans_res *resp,
155 struct xfs_mount *mp = tp->t_mountp;
157 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
160 * Attempt to reserve the needed disk blocks by decrementing
161 * the number needed from the number available. This will
162 * fail if the count would go below zero.
165 error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
168 tp->t_blk_res += blocks;
172 * Reserve the log space needed for this transaction.
174 if (resp->tr_logres > 0) {
175 bool permanent = false;
177 ASSERT(tp->t_log_res == 0 ||
178 tp->t_log_res == resp->tr_logres);
179 ASSERT(tp->t_log_count == 0 ||
180 tp->t_log_count == resp->tr_logcount);
182 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
183 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
186 ASSERT(tp->t_ticket == NULL);
187 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
190 if (tp->t_ticket != NULL) {
191 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
192 error = xfs_log_regrant(mp, tp->t_ticket);
194 error = xfs_log_reserve(mp, resp->tr_logres,
196 &tp->t_ticket, permanent);
202 tp->t_log_res = resp->tr_logres;
203 tp->t_log_count = resp->tr_logcount;
207 * Attempt to reserve the needed realtime extents by decrementing
208 * the number needed from the number available. This will
209 * fail if the count would go below zero.
212 error = xfs_mod_frextents(mp, -((int64_t)rtextents));
217 tp->t_rtx_res += rtextents;
223 * Error cases jump to one of these labels to undo any
224 * reservations which have already been performed.
227 if (resp->tr_logres > 0) {
228 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
231 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
236 xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
244 struct xfs_mount *mp,
245 struct xfs_trans_res *resp,
249 struct xfs_trans **tpp)
251 struct xfs_trans *tp;
252 bool want_retry = true;
256 * Allocate the handle before we do our freeze accounting and setting up
257 * GFP_NOFS allocation context so that we avoid lockdep false positives
258 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
261 tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
262 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
263 sb_start_intwrite(mp->m_super);
264 xfs_trans_set_context(tp);
267 * Zero-reservation ("empty") transactions can't modify anything, so
268 * they're allowed to run while we're frozen.
270 WARN_ON(resp->tr_logres > 0 &&
271 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
272 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
273 xfs_has_lazysbcount(mp));
275 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
278 INIT_LIST_HEAD(&tp->t_items);
279 INIT_LIST_HEAD(&tp->t_busy);
280 INIT_LIST_HEAD(&tp->t_dfops);
281 tp->t_firstblock = NULLFSBLOCK;
283 error = xfs_trans_reserve(tp, resp, blocks, rtextents);
284 if (error == -ENOSPC && want_retry) {
285 xfs_trans_cancel(tp);
288 * We weren't able to reserve enough space for the transaction.
289 * Flush the other speculative space allocations to free space.
290 * Do not perform a synchronous scan because callers can hold
293 xfs_blockgc_flush_all(mp);
298 xfs_trans_cancel(tp);
302 trace_xfs_trans_alloc(tp, _RET_IP_);
309 * Create an empty transaction with no reservation. This is a defensive
310 * mechanism for routines that query metadata without actually modifying them --
311 * if the metadata being queried is somehow cross-linked (think a btree block
312 * pointer that points higher in the tree), we risk deadlock. However, blocks
313 * grabbed as part of a transaction can be re-grabbed. The verifiers will
314 * notice the corrupt block and the operation will fail back to userspace
315 * without deadlocking.
317 * Note the zero-length reservation; this transaction MUST be cancelled without
320 * Callers should obtain freeze protection to avoid a conflict with fs freezing
321 * where we can be grabbing buffers at the same time that freeze is trying to
322 * drain the buffer LRU list.
325 xfs_trans_alloc_empty(
326 struct xfs_mount *mp,
327 struct xfs_trans **tpp)
329 struct xfs_trans_res resv = {0};
331 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
335 * Record the indicated change to the given field for application
336 * to the file system's superblock when the transaction commits.
337 * For now, just store the change in the transaction structure.
339 * Mark the transaction structure to indicate that the superblock
340 * needs to be updated before committing.
342 * Because we may not be keeping track of allocated/free inodes and
343 * used filesystem blocks in the superblock, we do not mark the
344 * superblock dirty in this transaction if we modify these fields.
345 * We still need to update the transaction deltas so that they get
346 * applied to the incore superblock, but we don't want them to
347 * cause the superblock to get locked and logged if these are the
348 * only fields in the superblock that the transaction modifies.
356 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
357 xfs_mount_t *mp = tp->t_mountp;
360 case XFS_TRANS_SB_ICOUNT:
361 tp->t_icount_delta += delta;
362 if (xfs_has_lazysbcount(mp))
363 flags &= ~XFS_TRANS_SB_DIRTY;
365 case XFS_TRANS_SB_IFREE:
366 tp->t_ifree_delta += delta;
367 if (xfs_has_lazysbcount(mp))
368 flags &= ~XFS_TRANS_SB_DIRTY;
370 case XFS_TRANS_SB_FDBLOCKS:
372 * Track the number of blocks allocated in the transaction.
373 * Make sure it does not exceed the number reserved. If so,
374 * shutdown as this can lead to accounting inconsistency.
377 tp->t_blk_res_used += (uint)-delta;
378 if (tp->t_blk_res_used > tp->t_blk_res)
379 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
380 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
381 int64_t blkres_delta;
384 * Return freed blocks directly to the reservation
385 * instead of the global pool, being careful not to
386 * overflow the trans counter. This is used to preserve
387 * reservation across chains of transaction rolls that
388 * repeatedly free and allocate blocks.
390 blkres_delta = min_t(int64_t, delta,
391 UINT_MAX - tp->t_blk_res);
392 tp->t_blk_res += blkres_delta;
393 delta -= blkres_delta;
395 tp->t_fdblocks_delta += delta;
396 if (xfs_has_lazysbcount(mp))
397 flags &= ~XFS_TRANS_SB_DIRTY;
399 case XFS_TRANS_SB_RES_FDBLOCKS:
401 * The allocation has already been applied to the
402 * in-core superblock's counter. This should only
403 * be applied to the on-disk superblock.
405 tp->t_res_fdblocks_delta += delta;
406 if (xfs_has_lazysbcount(mp))
407 flags &= ~XFS_TRANS_SB_DIRTY;
409 case XFS_TRANS_SB_FREXTENTS:
411 * Track the number of blocks allocated in the
412 * transaction. Make sure it does not exceed the
416 tp->t_rtx_res_used += (uint)-delta;
417 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
419 tp->t_frextents_delta += delta;
421 case XFS_TRANS_SB_RES_FREXTENTS:
423 * The allocation has already been applied to the
424 * in-core superblock's counter. This should only
425 * be applied to the on-disk superblock.
428 tp->t_res_frextents_delta += delta;
430 case XFS_TRANS_SB_DBLOCKS:
431 tp->t_dblocks_delta += delta;
433 case XFS_TRANS_SB_AGCOUNT:
435 tp->t_agcount_delta += delta;
437 case XFS_TRANS_SB_IMAXPCT:
438 tp->t_imaxpct_delta += delta;
440 case XFS_TRANS_SB_REXTSIZE:
441 tp->t_rextsize_delta += delta;
443 case XFS_TRANS_SB_RBMBLOCKS:
444 tp->t_rbmblocks_delta += delta;
446 case XFS_TRANS_SB_RBLOCKS:
447 tp->t_rblocks_delta += delta;
449 case XFS_TRANS_SB_REXTENTS:
450 tp->t_rextents_delta += delta;
452 case XFS_TRANS_SB_REXTSLOG:
453 tp->t_rextslog_delta += delta;
460 tp->t_flags |= flags;
464 * xfs_trans_apply_sb_deltas() is called from the commit code
465 * to bring the superblock buffer into the current transaction
466 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
468 * For now we just look at each field allowed to change and change
472 xfs_trans_apply_sb_deltas(
479 bp = xfs_trans_getsb(tp);
483 * Only update the superblock counters if we are logging them
485 if (!xfs_has_lazysbcount((tp->t_mountp))) {
486 if (tp->t_icount_delta)
487 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
488 if (tp->t_ifree_delta)
489 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
490 if (tp->t_fdblocks_delta)
491 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
492 if (tp->t_res_fdblocks_delta)
493 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
497 * Updating frextents requires careful handling because it does not
498 * behave like the lazysb counters because we cannot rely on log
499 * recovery in older kenels to recompute the value from the rtbitmap.
500 * This means that the ondisk frextents must be consistent with the
503 * Therefore, log the frextents change to the ondisk superblock and
504 * update the incore superblock so that future calls to xfs_log_sb
505 * write the correct value ondisk.
507 * Don't touch m_frextents because it includes incore reservations,
508 * and those are handled by the unreserve function.
510 if (tp->t_frextents_delta || tp->t_res_frextents_delta) {
511 struct xfs_mount *mp = tp->t_mountp;
514 rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta;
516 spin_lock(&mp->m_sb_lock);
517 be64_add_cpu(&sbp->sb_frextents, rtxdelta);
518 mp->m_sb.sb_frextents += rtxdelta;
519 spin_unlock(&mp->m_sb_lock);
522 if (tp->t_dblocks_delta) {
523 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
526 if (tp->t_agcount_delta) {
527 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
530 if (tp->t_imaxpct_delta) {
531 sbp->sb_imax_pct += tp->t_imaxpct_delta;
534 if (tp->t_rextsize_delta) {
535 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
538 if (tp->t_rbmblocks_delta) {
539 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
542 if (tp->t_rblocks_delta) {
543 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
546 if (tp->t_rextents_delta) {
547 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
550 if (tp->t_rextslog_delta) {
551 sbp->sb_rextslog += tp->t_rextslog_delta;
555 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
558 * Log the whole thing, the fields are noncontiguous.
560 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
563 * Since all the modifiable fields are contiguous, we
564 * can get away with this.
566 xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount),
567 offsetof(struct xfs_dsb, sb_frextents) +
568 sizeof(sbp->sb_frextents) - 1);
572 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
573 * apply superblock counter changes to the in-core superblock. The
574 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
575 * applied to the in-core superblock. The idea is that that has already been
578 * If we are not logging superblock counters, then the inode allocated/free and
579 * used block counts are not updated in the on disk superblock. In this case,
580 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
581 * still need to update the incore superblock with the changes.
583 * Deltas for the inode count are +/-64, hence we use a large batch size of 128
584 * so we don't need to take the counter lock on every update.
586 #define XFS_ICOUNT_BATCH 128
589 xfs_trans_unreserve_and_mod_sb(
590 struct xfs_trans *tp)
592 struct xfs_mount *mp = tp->t_mountp;
593 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
594 int64_t blkdelta = 0;
595 int64_t rtxdelta = 0;
597 int64_t ifreedelta = 0;
600 /* calculate deltas */
601 if (tp->t_blk_res > 0)
602 blkdelta = tp->t_blk_res;
603 if ((tp->t_fdblocks_delta != 0) &&
604 (xfs_has_lazysbcount(mp) ||
605 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
606 blkdelta += tp->t_fdblocks_delta;
608 if (tp->t_rtx_res > 0)
609 rtxdelta = tp->t_rtx_res;
610 if ((tp->t_frextents_delta != 0) &&
611 (tp->t_flags & XFS_TRANS_SB_DIRTY))
612 rtxdelta += tp->t_frextents_delta;
614 if (xfs_has_lazysbcount(mp) ||
615 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
616 idelta = tp->t_icount_delta;
617 ifreedelta = tp->t_ifree_delta;
620 /* apply the per-cpu counters */
622 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
627 percpu_counter_add_batch(&mp->m_icount, idelta,
631 percpu_counter_add(&mp->m_ifree, ifreedelta);
634 error = xfs_mod_frextents(mp, rtxdelta);
638 if (!(tp->t_flags & XFS_TRANS_SB_DIRTY))
641 /* apply remaining deltas */
642 spin_lock(&mp->m_sb_lock);
643 mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta;
644 mp->m_sb.sb_icount += idelta;
645 mp->m_sb.sb_ifree += ifreedelta;
647 * Do not touch sb_frextents here because we are dealing with incore
648 * reservation. sb_frextents is not part of the lazy sb counters so it
649 * must be consistent with the ondisk rtbitmap and must never include
650 * incore reservations.
652 mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
653 mp->m_sb.sb_agcount += tp->t_agcount_delta;
654 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
655 mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
656 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
657 mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
658 mp->m_sb.sb_rextents += tp->t_rextents_delta;
659 mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
660 spin_unlock(&mp->m_sb_lock);
663 * Debug checks outside of the spinlock so they don't lock up the
664 * machine if they fail.
666 ASSERT(mp->m_sb.sb_imax_pct >= 0);
667 ASSERT(mp->m_sb.sb_rextslog >= 0);
671 /* Add the given log item to the transaction's list of log items. */
674 struct xfs_trans *tp,
675 struct xfs_log_item *lip)
677 ASSERT(lip->li_log == tp->t_mountp->m_log);
678 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
679 ASSERT(list_empty(&lip->li_trans));
680 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
682 list_add_tail(&lip->li_trans, &tp->t_items);
683 trace_xfs_trans_add_item(tp, _RET_IP_);
687 * Unlink the log item from the transaction. the log item is no longer
688 * considered dirty in this transaction, as the linked transaction has
689 * finished, either by abort or commit completion.
693 struct xfs_log_item *lip)
695 clear_bit(XFS_LI_DIRTY, &lip->li_flags);
696 list_del_init(&lip->li_trans);
699 /* Detach and unlock all of the items in a transaction */
701 xfs_trans_free_items(
702 struct xfs_trans *tp,
705 struct xfs_log_item *lip, *next;
707 trace_xfs_trans_free_items(tp, _RET_IP_);
709 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
710 xfs_trans_del_item(lip);
712 set_bit(XFS_LI_ABORTED, &lip->li_flags);
713 if (lip->li_ops->iop_release)
714 lip->li_ops->iop_release(lip);
719 xfs_log_item_batch_insert(
720 struct xfs_ail *ailp,
721 struct xfs_ail_cursor *cur,
722 struct xfs_log_item **log_items,
724 xfs_lsn_t commit_lsn)
728 spin_lock(&ailp->ail_lock);
729 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
730 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
732 for (i = 0; i < nr_items; i++) {
733 struct xfs_log_item *lip = log_items[i];
735 if (lip->li_ops->iop_unpin)
736 lip->li_ops->iop_unpin(lip, 0);
741 * Bulk operation version of xfs_trans_committed that takes a log vector of
742 * items to insert into the AIL. This uses bulk AIL insertion techniques to
743 * minimise lock traffic.
745 * If we are called with the aborted flag set, it is because a log write during
746 * a CIL checkpoint commit has failed. In this case, all the items in the
747 * checkpoint have already gone through iop_committed and iop_committing, which
748 * means that checkpoint commit abort handling is treated exactly the same
749 * as an iclog write error even though we haven't started any IO yet. Hence in
750 * this case all we need to do is iop_committed processing, followed by an
751 * iop_unpin(aborted) call.
753 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
754 * at the end of the AIL, the insert cursor avoids the need to walk
755 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
756 * call. This saves a lot of needless list walking and is a net win, even
757 * though it slightly increases that amount of AIL lock traffic to set it up
761 xfs_trans_committed_bulk(
762 struct xfs_ail *ailp,
763 struct list_head *lv_chain,
764 xfs_lsn_t commit_lsn,
767 #define LOG_ITEM_BATCH_SIZE 32
768 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
769 struct xfs_log_vec *lv;
770 struct xfs_ail_cursor cur;
773 spin_lock(&ailp->ail_lock);
774 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
775 spin_unlock(&ailp->ail_lock);
777 /* unpin all the log items */
778 list_for_each_entry(lv, lv_chain, lv_list) {
779 struct xfs_log_item *lip = lv->lv_item;
783 set_bit(XFS_LI_ABORTED, &lip->li_flags);
785 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
786 lip->li_ops->iop_release(lip);
790 if (lip->li_ops->iop_committed)
791 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
793 item_lsn = commit_lsn;
795 /* item_lsn of -1 means the item needs no further processing */
796 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
800 * if we are aborting the operation, no point in inserting the
801 * object into the AIL as we are in a shutdown situation.
804 ASSERT(xlog_is_shutdown(ailp->ail_log));
805 if (lip->li_ops->iop_unpin)
806 lip->li_ops->iop_unpin(lip, 1);
810 if (item_lsn != commit_lsn) {
813 * Not a bulk update option due to unusual item_lsn.
814 * Push into AIL immediately, rechecking the lsn once
815 * we have the ail lock. Then unpin the item. This does
816 * not affect the AIL cursor the bulk insert path is
819 spin_lock(&ailp->ail_lock);
820 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
821 xfs_trans_ail_update(ailp, lip, item_lsn);
823 spin_unlock(&ailp->ail_lock);
824 if (lip->li_ops->iop_unpin)
825 lip->li_ops->iop_unpin(lip, 0);
829 /* Item is a candidate for bulk AIL insert. */
830 log_items[i++] = lv->lv_item;
831 if (i >= LOG_ITEM_BATCH_SIZE) {
832 xfs_log_item_batch_insert(ailp, &cur, log_items,
833 LOG_ITEM_BATCH_SIZE, commit_lsn);
838 /* make sure we insert the remainder! */
840 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
842 spin_lock(&ailp->ail_lock);
843 xfs_trans_ail_cursor_done(&cur);
844 spin_unlock(&ailp->ail_lock);
848 * Sort transaction items prior to running precommit operations. This will
849 * attempt to order the items such that they will always be locked in the same
850 * order. Items that have no sort function are moved to the end of the list
851 * and so are locked last.
853 * This may need refinement as different types of objects add sort functions.
855 * Function is more complex than it needs to be because we are comparing 64 bit
856 * values and the function only returns 32 bit values.
859 xfs_trans_precommit_sort(
861 const struct list_head *a,
862 const struct list_head *b)
864 struct xfs_log_item *lia = container_of(a,
865 struct xfs_log_item, li_trans);
866 struct xfs_log_item *lib = container_of(b,
867 struct xfs_log_item, li_trans);
871 * If both items are non-sortable, leave them alone. If only one is
872 * sortable, move the non-sortable item towards the end of the list.
874 if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort)
876 if (!lia->li_ops->iop_sort)
878 if (!lib->li_ops->iop_sort)
881 diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib);
890 * Run transaction precommit functions.
892 * If there is an error in any of the callouts, then stop immediately and
893 * trigger a shutdown to abort the transaction. There is no recovery possible
894 * from errors at this point as the transaction is dirty....
897 xfs_trans_run_precommits(
898 struct xfs_trans *tp)
900 struct xfs_mount *mp = tp->t_mountp;
901 struct xfs_log_item *lip, *n;
905 * Sort the item list to avoid ABBA deadlocks with other transactions
906 * running precommit operations that lock multiple shared items such as
907 * inode cluster buffers.
909 list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort);
912 * Precommit operations can remove the log item from the transaction
913 * if the log item exists purely to delay modifications until they
914 * can be ordered against other operations. Hence we have to use
915 * list_for_each_entry_safe() here.
917 list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
918 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
920 if (lip->li_ops->iop_precommit) {
921 error = lip->li_ops->iop_precommit(tp, lip);
927 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
932 * Commit the given transaction to the log.
934 * XFS disk error handling mechanism is not based on a typical
935 * transaction abort mechanism. Logically after the filesystem
936 * gets marked 'SHUTDOWN', we can't let any new transactions
937 * be durable - ie. committed to disk - because some metadata might
938 * be inconsistent. In such cases, this returns an error, and the
939 * caller may assume that all locked objects joined to the transaction
940 * have already been unlocked as if the commit had succeeded.
941 * Do not reference the transaction structure after this call.
945 struct xfs_trans *tp,
948 struct xfs_mount *mp = tp->t_mountp;
949 struct xlog *log = mp->m_log;
950 xfs_csn_t commit_seq = 0;
952 int sync = tp->t_flags & XFS_TRANS_SYNC;
954 trace_xfs_trans_commit(tp, _RET_IP_);
956 error = xfs_trans_run_precommits(tp);
958 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
959 xfs_defer_cancel(tp);
964 * Finish deferred items on final commit. Only permanent transactions
965 * should ever have deferred ops.
967 WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
968 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
969 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
970 error = xfs_defer_finish_noroll(&tp);
976 * If there is nothing to be logged by the transaction,
977 * then unlock all of the items associated with the
978 * transaction and free the transaction structure.
979 * Also make sure to return any reserved blocks to
982 if (!(tp->t_flags & XFS_TRANS_DIRTY))
986 * We must check against log shutdown here because we cannot abort log
987 * items and leave them dirty, inconsistent and unpinned in memory while
988 * the log is active. This leaves them open to being written back to
989 * disk, and that will lead to on-disk corruption.
991 if (xlog_is_shutdown(log)) {
996 ASSERT(tp->t_ticket != NULL);
999 * If we need to update the superblock, then do it now.
1001 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
1002 xfs_trans_apply_sb_deltas(tp);
1003 xfs_trans_apply_dquot_deltas(tp);
1005 xlog_cil_commit(log, tp, &commit_seq, regrant);
1010 * If the transaction needs to be synchronous, then force the
1011 * log out now and wait for it.
1014 error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL);
1015 XFS_STATS_INC(mp, xs_trans_sync);
1017 XFS_STATS_INC(mp, xs_trans_async);
1023 xfs_trans_unreserve_and_mod_sb(tp);
1026 * It is indeed possible for the transaction to be not dirty but
1027 * the dqinfo portion to be. All that means is that we have some
1028 * (non-persistent) quota reservations that need to be unreserved.
1030 xfs_trans_unreserve_and_mod_dquots(tp);
1032 if (regrant && !xlog_is_shutdown(log))
1033 xfs_log_ticket_regrant(log, tp->t_ticket);
1035 xfs_log_ticket_ungrant(log, tp->t_ticket);
1036 tp->t_ticket = NULL;
1038 xfs_trans_free_items(tp, !!error);
1041 XFS_STATS_INC(mp, xs_trans_empty);
1047 struct xfs_trans *tp)
1049 return __xfs_trans_commit(tp, false);
1053 * Unlock all of the transaction's items and free the transaction. If the
1054 * transaction is dirty, we must shut down the filesystem because there is no
1055 * way to restore them to their previous state.
1057 * If the transaction has made a log reservation, make sure to release it as
1060 * This is a high level function (equivalent to xfs_trans_commit()) and so can
1061 * be called after the transaction has effectively been aborted due to the mount
1062 * being shut down. However, if the mount has not been shut down and the
1063 * transaction is dirty we will shut the mount down and, in doing so, that
1064 * guarantees that the log is shut down, too. Hence we don't need to be as
1065 * careful with shutdown state and dirty items here as we need to be in
1066 * xfs_trans_commit().
1070 struct xfs_trans *tp)
1072 struct xfs_mount *mp = tp->t_mountp;
1073 struct xlog *log = mp->m_log;
1074 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1076 trace_xfs_trans_cancel(tp, _RET_IP_);
1079 * It's never valid to cancel a transaction with deferred ops attached,
1080 * because the transaction is effectively dirty. Complain about this
1081 * loudly before freeing the in-memory defer items.
1083 if (!list_empty(&tp->t_dfops)) {
1084 ASSERT(xfs_is_shutdown(mp) || list_empty(&tp->t_dfops));
1085 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1087 xfs_defer_cancel(tp);
1091 * See if the caller is relying on us to shut down the filesystem. We
1092 * only want an error report if there isn't already a shutdown in
1093 * progress, so we only need to check against the mount shutdown state
1096 if (dirty && !xfs_is_shutdown(mp)) {
1097 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1098 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1101 /* Log items need to be consistent until the log is shut down. */
1102 if (!dirty && !xlog_is_shutdown(log)) {
1103 struct xfs_log_item *lip;
1105 list_for_each_entry(lip, &tp->t_items, li_trans)
1106 ASSERT(!xlog_item_is_intent_done(lip));
1109 xfs_trans_unreserve_and_mod_sb(tp);
1110 xfs_trans_unreserve_and_mod_dquots(tp);
1113 xfs_log_ticket_ungrant(log, tp->t_ticket);
1114 tp->t_ticket = NULL;
1117 xfs_trans_free_items(tp, dirty);
1122 * Roll from one trans in the sequence of PERMANENT transactions to
1123 * the next: permanent transactions are only flushed out when
1124 * committed with xfs_trans_commit(), but we still want as soon
1125 * as possible to let chunks of it go to the log. So we commit the
1126 * chunk we've been working on and get a new transaction to continue.
1130 struct xfs_trans **tpp)
1132 struct xfs_trans *trans = *tpp;
1133 struct xfs_trans_res tres;
1136 trace_xfs_trans_roll(trans, _RET_IP_);
1139 * Copy the critical parameters from one trans to the next.
1141 tres.tr_logres = trans->t_log_res;
1142 tres.tr_logcount = trans->t_log_count;
1144 *tpp = xfs_trans_dup(trans);
1147 * Commit the current transaction.
1148 * If this commit failed, then it'd just unlock those items that
1149 * are not marked ihold. That also means that a filesystem shutdown
1150 * is in progress. The caller takes the responsibility to cancel
1151 * the duplicate transaction that gets returned.
1153 error = __xfs_trans_commit(trans, true);
1158 * Reserve space in the log for the next transaction.
1159 * This also pushes items in the "AIL", the list of logged items,
1160 * out to disk if they are taking up space at the tail of the log
1161 * that we want to use. This requires that either nothing be locked
1162 * across this call, or that anything that is locked be logged in
1163 * the prior and the next transactions.
1165 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1166 return xfs_trans_reserve(*tpp, &tres, 0, 0);
1170 * Allocate an transaction, lock and join the inode to it, and reserve quota.
1172 * The caller must ensure that the on-disk dquots attached to this inode have
1173 * already been allocated and initialized. The caller is responsible for
1174 * releasing ILOCK_EXCL if a new transaction is returned.
1177 xfs_trans_alloc_inode(
1178 struct xfs_inode *ip,
1179 struct xfs_trans_res *resv,
1180 unsigned int dblocks,
1181 unsigned int rblocks,
1183 struct xfs_trans **tpp)
1185 struct xfs_trans *tp;
1186 struct xfs_mount *mp = ip->i_mount;
1187 bool retried = false;
1191 error = xfs_trans_alloc(mp, resv, dblocks,
1192 rblocks / mp->m_sb.sb_rextsize,
1193 force ? XFS_TRANS_RESERVE : 0, &tp);
1197 xfs_ilock(ip, XFS_ILOCK_EXCL);
1198 xfs_trans_ijoin(tp, ip, 0);
1200 error = xfs_qm_dqattach_locked(ip, false);
1202 /* Caller should have allocated the dquots! */
1203 ASSERT(error != -ENOENT);
1207 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
1208 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1209 xfs_trans_cancel(tp);
1210 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1211 xfs_blockgc_free_quota(ip, 0);
1222 xfs_trans_cancel(tp);
1223 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1228 * Allocate an transaction in preparation for inode creation by reserving quota
1229 * against the given dquots. Callers are not required to hold any inode locks.
1232 xfs_trans_alloc_icreate(
1233 struct xfs_mount *mp,
1234 struct xfs_trans_res *resv,
1235 struct xfs_dquot *udqp,
1236 struct xfs_dquot *gdqp,
1237 struct xfs_dquot *pdqp,
1238 unsigned int dblocks,
1239 struct xfs_trans **tpp)
1241 struct xfs_trans *tp;
1242 bool retried = false;
1246 error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
1250 error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
1251 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1252 xfs_trans_cancel(tp);
1253 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1258 xfs_trans_cancel(tp);
1267 * Allocate an transaction, lock and join the inode to it, and reserve quota
1268 * in preparation for inode attribute changes that include uid, gid, or prid
1271 * The caller must ensure that the on-disk dquots attached to this inode have
1272 * already been allocated and initialized. The ILOCK will be dropped when the
1273 * transaction is committed or cancelled.
1276 xfs_trans_alloc_ichange(
1277 struct xfs_inode *ip,
1278 struct xfs_dquot *new_udqp,
1279 struct xfs_dquot *new_gdqp,
1280 struct xfs_dquot *new_pdqp,
1282 struct xfs_trans **tpp)
1284 struct xfs_trans *tp;
1285 struct xfs_mount *mp = ip->i_mount;
1286 struct xfs_dquot *udqp;
1287 struct xfs_dquot *gdqp;
1288 struct xfs_dquot *pdqp;
1289 bool retried = false;
1293 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1297 xfs_ilock(ip, XFS_ILOCK_EXCL);
1298 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1300 error = xfs_qm_dqattach_locked(ip, false);
1302 /* Caller should have allocated the dquots! */
1303 ASSERT(error != -ENOENT);
1308 * For each quota type, skip quota reservations if the inode's dquots
1309 * now match the ones that came from the caller, or the caller didn't
1310 * pass one in. The inode's dquots can change if we drop the ILOCK to
1311 * perform a blockgc scan, so we must preserve the caller's arguments.
1313 udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
1314 gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
1315 pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
1316 if (udqp || gdqp || pdqp) {
1317 unsigned int qflags = XFS_QMOPT_RES_REGBLKS;
1320 qflags |= XFS_QMOPT_FORCE_RES;
1323 * Reserve enough quota to handle blocks on disk and reserved
1324 * for a delayed allocation. We'll actually transfer the
1325 * delalloc reservation between dquots at chown time, even
1326 * though that part is only semi-transactional.
1328 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
1329 pdqp, ip->i_nblocks + ip->i_delayed_blks,
1331 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1332 xfs_trans_cancel(tp);
1333 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1345 xfs_trans_cancel(tp);
1350 * Allocate an transaction, lock and join the directory and child inodes to it,
1351 * and reserve quota for a directory update. If there isn't sufficient space,
1352 * @dblocks will be set to zero for a reservationless directory update and
1353 * @nospace_error will be set to a negative errno describing the space
1354 * constraint we hit.
1356 * The caller must ensure that the on-disk dquots attached to this inode have
1357 * already been allocated and initialized. The ILOCKs will be dropped when the
1358 * transaction is committed or cancelled.
1361 xfs_trans_alloc_dir(
1362 struct xfs_inode *dp,
1363 struct xfs_trans_res *resv,
1364 struct xfs_inode *ip,
1365 unsigned int *dblocks,
1366 struct xfs_trans **tpp,
1369 struct xfs_trans *tp;
1370 struct xfs_mount *mp = ip->i_mount;
1371 unsigned int resblks;
1372 bool retried = false;
1378 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
1379 if (error == -ENOSPC) {
1380 *nospace_error = error;
1382 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
1387 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
1389 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1390 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1392 error = xfs_qm_dqattach_locked(dp, false);
1394 /* Caller should have allocated the dquots! */
1395 ASSERT(error != -ENOENT);
1399 error = xfs_qm_dqattach_locked(ip, false);
1401 /* Caller should have allocated the dquots! */
1402 ASSERT(error != -ENOENT);
1409 error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false);
1410 if (error == -EDQUOT || error == -ENOSPC) {
1412 xfs_trans_cancel(tp);
1413 xfs_blockgc_free_quota(dp, 0);
1418 *nospace_error = error;
1431 xfs_trans_cancel(tp);