1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_log_priv.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_extent_busy.h"
16 #include "xfs_quota.h"
17 #include "xfs_trans.h"
18 #include "xfs_trans_priv.h"
20 #include "xfs_trace.h"
21 #include "xfs_error.h"
22 #include "xfs_defer.h"
23 #include "xfs_inode.h"
24 #include "xfs_dquot_item.h"
25 #include "xfs_dquot.h"
26 #include "xfs_icache.h"
28 kmem_zone_t *xfs_trans_zone;
30 #if defined(CONFIG_TRACEPOINTS)
32 xfs_trans_trace_reservations(
35 struct xfs_trans_res resv;
36 struct xfs_trans_res *res;
37 struct xfs_trans_res *end_res;
40 res = (struct xfs_trans_res *)M_RES(mp);
41 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
42 for (i = 0; res < end_res; i++, res++)
43 trace_xfs_trans_resv_calc(mp, i, res);
44 xfs_log_get_max_trans_res(mp, &resv);
45 trace_xfs_trans_resv_calc(mp, -1, &resv);
48 # define xfs_trans_trace_reservations(mp)
52 * Initialize the precomputed transaction reservation values
53 * in the mount structure.
59 xfs_trans_resv_calc(mp, M_RES(mp));
60 xfs_trans_trace_reservations(mp);
64 * Free the transaction structure. If there is more clean up
65 * to do when the structure is freed, add it here.
71 xfs_extent_busy_sort(&tp->t_busy);
72 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
74 trace_xfs_trans_free(tp, _RET_IP_);
75 xfs_trans_clear_context(tp);
76 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
77 sb_end_intwrite(tp->t_mountp->m_super);
78 xfs_trans_free_dqinfo(tp);
79 kmem_cache_free(xfs_trans_zone, tp);
83 * This is called to create a new transaction which will share the
84 * permanent log reservation of the given transaction. The remaining
85 * unused block and rt extent reservations are also inherited. This
86 * implies that the original transaction is no longer allowed to allocate
87 * blocks. Locks and log items, however, are no inherited. They must
88 * be added to the new transaction explicitly.
90 STATIC struct xfs_trans *
94 struct xfs_trans *ntp;
96 trace_xfs_trans_dup(tp, _RET_IP_);
98 ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
101 * Initialize the new transaction structure.
103 ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
104 ntp->t_mountp = tp->t_mountp;
105 INIT_LIST_HEAD(&ntp->t_items);
106 INIT_LIST_HEAD(&ntp->t_busy);
107 INIT_LIST_HEAD(&ntp->t_dfops);
108 ntp->t_firstblock = NULLFSBLOCK;
110 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
111 ASSERT(tp->t_ticket != NULL);
113 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
114 (tp->t_flags & XFS_TRANS_RESERVE) |
115 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
116 (tp->t_flags & XFS_TRANS_RES_FDBLKS);
117 /* We gave our writer reference to the new transaction */
118 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
119 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
121 ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
122 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
123 tp->t_blk_res = tp->t_blk_res_used;
125 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
126 tp->t_rtx_res = tp->t_rtx_res_used;
128 xfs_trans_switch_context(tp, ntp);
130 /* move deferred ops over to the new tp */
131 xfs_defer_move(ntp, tp);
133 xfs_trans_dup_dqinfo(tp, ntp);
138 * This is called to reserve free disk blocks and log space for the
139 * given transaction. This must be done before allocating any resources
140 * within the transaction.
142 * This will return ENOSPC if there are not enough blocks available.
143 * It will sleep waiting for available log space.
144 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
145 * is used by long running transactions. If any one of the reservations
146 * fails then they will all be backed out.
148 * This does not do quota reservations. That typically is done by the
153 struct xfs_trans *tp,
154 struct xfs_trans_res *resp,
158 struct xfs_mount *mp = tp->t_mountp;
160 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
163 * Attempt to reserve the needed disk blocks by decrementing
164 * the number needed from the number available. This will
165 * fail if the count would go below zero.
168 error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
171 tp->t_blk_res += blocks;
175 * Reserve the log space needed for this transaction.
177 if (resp->tr_logres > 0) {
178 bool permanent = false;
180 ASSERT(tp->t_log_res == 0 ||
181 tp->t_log_res == resp->tr_logres);
182 ASSERT(tp->t_log_count == 0 ||
183 tp->t_log_count == resp->tr_logcount);
185 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
186 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
189 ASSERT(tp->t_ticket == NULL);
190 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
193 if (tp->t_ticket != NULL) {
194 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
195 error = xfs_log_regrant(mp, tp->t_ticket);
197 error = xfs_log_reserve(mp,
200 &tp->t_ticket, XFS_TRANSACTION,
207 tp->t_log_res = resp->tr_logres;
208 tp->t_log_count = resp->tr_logcount;
212 * Attempt to reserve the needed realtime extents by decrementing
213 * the number needed from the number available. This will
214 * fail if the count would go below zero.
217 error = xfs_mod_frextents(mp, -((int64_t)rtextents));
222 tp->t_rtx_res += rtextents;
228 * Error cases jump to one of these labels to undo any
229 * reservations which have already been performed.
232 if (resp->tr_logres > 0) {
233 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
236 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
241 xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
249 struct xfs_mount *mp,
250 struct xfs_trans_res *resp,
254 struct xfs_trans **tpp)
256 struct xfs_trans *tp;
257 bool want_retry = true;
261 * Allocate the handle before we do our freeze accounting and setting up
262 * GFP_NOFS allocation context so that we avoid lockdep false positives
263 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
266 tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
267 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
268 sb_start_intwrite(mp->m_super);
269 xfs_trans_set_context(tp);
272 * Zero-reservation ("empty") transactions can't modify anything, so
273 * they're allowed to run while we're frozen.
275 WARN_ON(resp->tr_logres > 0 &&
276 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
277 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
278 xfs_sb_version_haslazysbcount(&mp->m_sb));
280 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
283 INIT_LIST_HEAD(&tp->t_items);
284 INIT_LIST_HEAD(&tp->t_busy);
285 INIT_LIST_HEAD(&tp->t_dfops);
286 tp->t_firstblock = NULLFSBLOCK;
288 error = xfs_trans_reserve(tp, resp, blocks, rtextents);
289 if (error == -ENOSPC && want_retry) {
290 xfs_trans_cancel(tp);
293 * We weren't able to reserve enough space for the transaction.
294 * Flush the other speculative space allocations to free space.
295 * Do not perform a synchronous scan because callers can hold
298 error = xfs_blockgc_free_space(mp, NULL);
306 xfs_trans_cancel(tp);
310 trace_xfs_trans_alloc(tp, _RET_IP_);
317 * Create an empty transaction with no reservation. This is a defensive
318 * mechanism for routines that query metadata without actually modifying them --
319 * if the metadata being queried is somehow cross-linked (think a btree block
320 * pointer that points higher in the tree), we risk deadlock. However, blocks
321 * grabbed as part of a transaction can be re-grabbed. The verifiers will
322 * notice the corrupt block and the operation will fail back to userspace
323 * without deadlocking.
325 * Note the zero-length reservation; this transaction MUST be cancelled without
328 * Callers should obtain freeze protection to avoid a conflict with fs freezing
329 * where we can be grabbing buffers at the same time that freeze is trying to
330 * drain the buffer LRU list.
333 xfs_trans_alloc_empty(
334 struct xfs_mount *mp,
335 struct xfs_trans **tpp)
337 struct xfs_trans_res resv = {0};
339 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
343 * Record the indicated change to the given field for application
344 * to the file system's superblock when the transaction commits.
345 * For now, just store the change in the transaction structure.
347 * Mark the transaction structure to indicate that the superblock
348 * needs to be updated before committing.
350 * Because we may not be keeping track of allocated/free inodes and
351 * used filesystem blocks in the superblock, we do not mark the
352 * superblock dirty in this transaction if we modify these fields.
353 * We still need to update the transaction deltas so that they get
354 * applied to the incore superblock, but we don't want them to
355 * cause the superblock to get locked and logged if these are the
356 * only fields in the superblock that the transaction modifies.
364 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
365 xfs_mount_t *mp = tp->t_mountp;
368 case XFS_TRANS_SB_ICOUNT:
369 tp->t_icount_delta += delta;
370 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
371 flags &= ~XFS_TRANS_SB_DIRTY;
373 case XFS_TRANS_SB_IFREE:
374 tp->t_ifree_delta += delta;
375 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
376 flags &= ~XFS_TRANS_SB_DIRTY;
378 case XFS_TRANS_SB_FDBLOCKS:
380 * Track the number of blocks allocated in the transaction.
381 * Make sure it does not exceed the number reserved. If so,
382 * shutdown as this can lead to accounting inconsistency.
385 tp->t_blk_res_used += (uint)-delta;
386 if (tp->t_blk_res_used > tp->t_blk_res)
387 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
388 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
389 int64_t blkres_delta;
392 * Return freed blocks directly to the reservation
393 * instead of the global pool, being careful not to
394 * overflow the trans counter. This is used to preserve
395 * reservation across chains of transaction rolls that
396 * repeatedly free and allocate blocks.
398 blkres_delta = min_t(int64_t, delta,
399 UINT_MAX - tp->t_blk_res);
400 tp->t_blk_res += blkres_delta;
401 delta -= blkres_delta;
403 tp->t_fdblocks_delta += delta;
404 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
405 flags &= ~XFS_TRANS_SB_DIRTY;
407 case XFS_TRANS_SB_RES_FDBLOCKS:
409 * The allocation has already been applied to the
410 * in-core superblock's counter. This should only
411 * be applied to the on-disk superblock.
413 tp->t_res_fdblocks_delta += delta;
414 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
415 flags &= ~XFS_TRANS_SB_DIRTY;
417 case XFS_TRANS_SB_FREXTENTS:
419 * Track the number of blocks allocated in the
420 * transaction. Make sure it does not exceed the
424 tp->t_rtx_res_used += (uint)-delta;
425 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
427 tp->t_frextents_delta += delta;
429 case XFS_TRANS_SB_RES_FREXTENTS:
431 * The allocation has already been applied to the
432 * in-core superblock's counter. This should only
433 * be applied to the on-disk superblock.
436 tp->t_res_frextents_delta += delta;
438 case XFS_TRANS_SB_DBLOCKS:
440 tp->t_dblocks_delta += delta;
442 case XFS_TRANS_SB_AGCOUNT:
444 tp->t_agcount_delta += delta;
446 case XFS_TRANS_SB_IMAXPCT:
447 tp->t_imaxpct_delta += delta;
449 case XFS_TRANS_SB_REXTSIZE:
450 tp->t_rextsize_delta += delta;
452 case XFS_TRANS_SB_RBMBLOCKS:
453 tp->t_rbmblocks_delta += delta;
455 case XFS_TRANS_SB_RBLOCKS:
456 tp->t_rblocks_delta += delta;
458 case XFS_TRANS_SB_REXTENTS:
459 tp->t_rextents_delta += delta;
461 case XFS_TRANS_SB_REXTSLOG:
462 tp->t_rextslog_delta += delta;
469 tp->t_flags |= flags;
473 * xfs_trans_apply_sb_deltas() is called from the commit code
474 * to bring the superblock buffer into the current transaction
475 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
477 * For now we just look at each field allowed to change and change
481 xfs_trans_apply_sb_deltas(
488 bp = xfs_trans_getsb(tp);
492 * Check that superblock mods match the mods made to AGF counters.
494 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
495 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
496 tp->t_ag_btree_delta));
499 * Only update the superblock counters if we are logging them
501 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
502 if (tp->t_icount_delta)
503 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
504 if (tp->t_ifree_delta)
505 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
506 if (tp->t_fdblocks_delta)
507 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
508 if (tp->t_res_fdblocks_delta)
509 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
512 if (tp->t_frextents_delta)
513 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
514 if (tp->t_res_frextents_delta)
515 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
517 if (tp->t_dblocks_delta) {
518 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
521 if (tp->t_agcount_delta) {
522 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
525 if (tp->t_imaxpct_delta) {
526 sbp->sb_imax_pct += tp->t_imaxpct_delta;
529 if (tp->t_rextsize_delta) {
530 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
533 if (tp->t_rbmblocks_delta) {
534 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
537 if (tp->t_rblocks_delta) {
538 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
541 if (tp->t_rextents_delta) {
542 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
545 if (tp->t_rextslog_delta) {
546 sbp->sb_rextslog += tp->t_rextslog_delta;
550 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
553 * Log the whole thing, the fields are noncontiguous.
555 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
558 * Since all the modifiable fields are contiguous, we
559 * can get away with this.
561 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
562 offsetof(xfs_dsb_t, sb_frextents) +
563 sizeof(sbp->sb_frextents) - 1);
567 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
568 * apply superblock counter changes to the in-core superblock. The
569 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
570 * applied to the in-core superblock. The idea is that that has already been
573 * If we are not logging superblock counters, then the inode allocated/free and
574 * used block counts are not updated in the on disk superblock. In this case,
575 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
576 * still need to update the incore superblock with the changes.
578 * Deltas for the inode count are +/-64, hence we use a large batch size of 128
579 * so we don't need to take the counter lock on every update.
581 #define XFS_ICOUNT_BATCH 128
584 xfs_trans_unreserve_and_mod_sb(
585 struct xfs_trans *tp)
587 struct xfs_mount *mp = tp->t_mountp;
588 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
589 int64_t blkdelta = 0;
590 int64_t rtxdelta = 0;
592 int64_t ifreedelta = 0;
595 /* calculate deltas */
596 if (tp->t_blk_res > 0)
597 blkdelta = tp->t_blk_res;
598 if ((tp->t_fdblocks_delta != 0) &&
599 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
600 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
601 blkdelta += tp->t_fdblocks_delta;
603 if (tp->t_rtx_res > 0)
604 rtxdelta = tp->t_rtx_res;
605 if ((tp->t_frextents_delta != 0) &&
606 (tp->t_flags & XFS_TRANS_SB_DIRTY))
607 rtxdelta += tp->t_frextents_delta;
609 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
610 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
611 idelta = tp->t_icount_delta;
612 ifreedelta = tp->t_ifree_delta;
615 /* apply the per-cpu counters */
617 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
622 percpu_counter_add_batch(&mp->m_icount, idelta,
625 ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
626 XFS_ICOUNT_BATCH) >= 0);
630 percpu_counter_add(&mp->m_ifree, ifreedelta);
632 ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
635 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
638 /* apply remaining deltas */
639 spin_lock(&mp->m_sb_lock);
640 mp->m_sb.sb_frextents += rtxdelta;
641 mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
642 mp->m_sb.sb_agcount += tp->t_agcount_delta;
643 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
644 mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
645 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
646 mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
647 mp->m_sb.sb_rextents += tp->t_rextents_delta;
648 mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
649 spin_unlock(&mp->m_sb_lock);
652 * Debug checks outside of the spinlock so they don't lock up the
653 * machine if they fail.
655 ASSERT(mp->m_sb.sb_imax_pct >= 0);
656 ASSERT(mp->m_sb.sb_rextslog >= 0);
660 /* Add the given log item to the transaction's list of log items. */
663 struct xfs_trans *tp,
664 struct xfs_log_item *lip)
666 ASSERT(lip->li_mountp == tp->t_mountp);
667 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
668 ASSERT(list_empty(&lip->li_trans));
669 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
671 list_add_tail(&lip->li_trans, &tp->t_items);
672 trace_xfs_trans_add_item(tp, _RET_IP_);
676 * Unlink the log item from the transaction. the log item is no longer
677 * considered dirty in this transaction, as the linked transaction has
678 * finished, either by abort or commit completion.
682 struct xfs_log_item *lip)
684 clear_bit(XFS_LI_DIRTY, &lip->li_flags);
685 list_del_init(&lip->li_trans);
688 /* Detach and unlock all of the items in a transaction */
690 xfs_trans_free_items(
691 struct xfs_trans *tp,
694 struct xfs_log_item *lip, *next;
696 trace_xfs_trans_free_items(tp, _RET_IP_);
698 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
699 xfs_trans_del_item(lip);
701 set_bit(XFS_LI_ABORTED, &lip->li_flags);
702 if (lip->li_ops->iop_release)
703 lip->li_ops->iop_release(lip);
708 xfs_log_item_batch_insert(
709 struct xfs_ail *ailp,
710 struct xfs_ail_cursor *cur,
711 struct xfs_log_item **log_items,
713 xfs_lsn_t commit_lsn)
717 spin_lock(&ailp->ail_lock);
718 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
719 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
721 for (i = 0; i < nr_items; i++) {
722 struct xfs_log_item *lip = log_items[i];
724 if (lip->li_ops->iop_unpin)
725 lip->li_ops->iop_unpin(lip, 0);
730 * Bulk operation version of xfs_trans_committed that takes a log vector of
731 * items to insert into the AIL. This uses bulk AIL insertion techniques to
732 * minimise lock traffic.
734 * If we are called with the aborted flag set, it is because a log write during
735 * a CIL checkpoint commit has failed. In this case, all the items in the
736 * checkpoint have already gone through iop_committed and iop_committing, which
737 * means that checkpoint commit abort handling is treated exactly the same
738 * as an iclog write error even though we haven't started any IO yet. Hence in
739 * this case all we need to do is iop_committed processing, followed by an
740 * iop_unpin(aborted) call.
742 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
743 * at the end of the AIL, the insert cursor avoids the need to walk
744 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
745 * call. This saves a lot of needless list walking and is a net win, even
746 * though it slightly increases that amount of AIL lock traffic to set it up
750 xfs_trans_committed_bulk(
751 struct xfs_ail *ailp,
752 struct xfs_log_vec *log_vector,
753 xfs_lsn_t commit_lsn,
756 #define LOG_ITEM_BATCH_SIZE 32
757 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
758 struct xfs_log_vec *lv;
759 struct xfs_ail_cursor cur;
762 spin_lock(&ailp->ail_lock);
763 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
764 spin_unlock(&ailp->ail_lock);
766 /* unpin all the log items */
767 for (lv = log_vector; lv; lv = lv->lv_next ) {
768 struct xfs_log_item *lip = lv->lv_item;
772 set_bit(XFS_LI_ABORTED, &lip->li_flags);
774 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
775 lip->li_ops->iop_release(lip);
779 if (lip->li_ops->iop_committed)
780 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
782 item_lsn = commit_lsn;
784 /* item_lsn of -1 means the item needs no further processing */
785 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
789 * if we are aborting the operation, no point in inserting the
790 * object into the AIL as we are in a shutdown situation.
793 ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
794 if (lip->li_ops->iop_unpin)
795 lip->li_ops->iop_unpin(lip, 1);
799 if (item_lsn != commit_lsn) {
802 * Not a bulk update option due to unusual item_lsn.
803 * Push into AIL immediately, rechecking the lsn once
804 * we have the ail lock. Then unpin the item. This does
805 * not affect the AIL cursor the bulk insert path is
808 spin_lock(&ailp->ail_lock);
809 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
810 xfs_trans_ail_update(ailp, lip, item_lsn);
812 spin_unlock(&ailp->ail_lock);
813 if (lip->li_ops->iop_unpin)
814 lip->li_ops->iop_unpin(lip, 0);
818 /* Item is a candidate for bulk AIL insert. */
819 log_items[i++] = lv->lv_item;
820 if (i >= LOG_ITEM_BATCH_SIZE) {
821 xfs_log_item_batch_insert(ailp, &cur, log_items,
822 LOG_ITEM_BATCH_SIZE, commit_lsn);
827 /* make sure we insert the remainder! */
829 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
831 spin_lock(&ailp->ail_lock);
832 xfs_trans_ail_cursor_done(&cur);
833 spin_unlock(&ailp->ail_lock);
837 * Commit the given transaction to the log.
839 * XFS disk error handling mechanism is not based on a typical
840 * transaction abort mechanism. Logically after the filesystem
841 * gets marked 'SHUTDOWN', we can't let any new transactions
842 * be durable - ie. committed to disk - because some metadata might
843 * be inconsistent. In such cases, this returns an error, and the
844 * caller may assume that all locked objects joined to the transaction
845 * have already been unlocked as if the commit had succeeded.
846 * Do not reference the transaction structure after this call.
850 struct xfs_trans *tp,
853 struct xfs_mount *mp = tp->t_mountp;
854 xfs_lsn_t commit_lsn = -1;
856 int sync = tp->t_flags & XFS_TRANS_SYNC;
858 trace_xfs_trans_commit(tp, _RET_IP_);
861 * Finish deferred items on final commit. Only permanent transactions
862 * should ever have deferred ops.
864 WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
865 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
866 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
867 error = xfs_defer_finish_noroll(&tp);
873 * If there is nothing to be logged by the transaction,
874 * then unlock all of the items associated with the
875 * transaction and free the transaction structure.
876 * Also make sure to return any reserved blocks to
879 if (!(tp->t_flags & XFS_TRANS_DIRTY))
882 if (XFS_FORCED_SHUTDOWN(mp)) {
887 ASSERT(tp->t_ticket != NULL);
890 * If we need to update the superblock, then do it now.
892 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
893 xfs_trans_apply_sb_deltas(tp);
894 xfs_trans_apply_dquot_deltas(tp);
896 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
901 * If the transaction needs to be synchronous, then force the
902 * log out now and wait for it.
905 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
906 XFS_STATS_INC(mp, xs_trans_sync);
908 XFS_STATS_INC(mp, xs_trans_async);
914 xfs_trans_unreserve_and_mod_sb(tp);
917 * It is indeed possible for the transaction to be not dirty but
918 * the dqinfo portion to be. All that means is that we have some
919 * (non-persistent) quota reservations that need to be unreserved.
921 xfs_trans_unreserve_and_mod_dquots(tp);
923 if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log))
924 xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
926 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
929 xfs_trans_free_items(tp, !!error);
932 XFS_STATS_INC(mp, xs_trans_empty);
938 struct xfs_trans *tp)
940 return __xfs_trans_commit(tp, false);
944 * Unlock all of the transaction's items and free the transaction.
945 * The transaction must not have modified any of its items, because
946 * there is no way to restore them to their previous state.
948 * If the transaction has made a log reservation, make sure to release
953 struct xfs_trans *tp)
955 struct xfs_mount *mp = tp->t_mountp;
956 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
958 trace_xfs_trans_cancel(tp, _RET_IP_);
960 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
961 xfs_defer_cancel(tp);
964 * See if the caller is relying on us to shut down the
965 * filesystem. This happens in paths where we detect
966 * corruption and decide to give up.
968 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
969 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
970 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
973 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
974 struct xfs_log_item *lip;
976 list_for_each_entry(lip, &tp->t_items, li_trans)
977 ASSERT(!xlog_item_is_intent_done(lip));
980 xfs_trans_unreserve_and_mod_sb(tp);
981 xfs_trans_unreserve_and_mod_dquots(tp);
984 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
988 xfs_trans_free_items(tp, dirty);
993 * Roll from one trans in the sequence of PERMANENT transactions to
994 * the next: permanent transactions are only flushed out when
995 * committed with xfs_trans_commit(), but we still want as soon
996 * as possible to let chunks of it go to the log. So we commit the
997 * chunk we've been working on and get a new transaction to continue.
1001 struct xfs_trans **tpp)
1003 struct xfs_trans *trans = *tpp;
1004 struct xfs_trans_res tres;
1007 trace_xfs_trans_roll(trans, _RET_IP_);
1010 * Copy the critical parameters from one trans to the next.
1012 tres.tr_logres = trans->t_log_res;
1013 tres.tr_logcount = trans->t_log_count;
1015 *tpp = xfs_trans_dup(trans);
1018 * Commit the current transaction.
1019 * If this commit failed, then it'd just unlock those items that
1020 * are not marked ihold. That also means that a filesystem shutdown
1021 * is in progress. The caller takes the responsibility to cancel
1022 * the duplicate transaction that gets returned.
1024 error = __xfs_trans_commit(trans, true);
1029 * Reserve space in the log for the next transaction.
1030 * This also pushes items in the "AIL", the list of logged items,
1031 * out to disk if they are taking up space at the tail of the log
1032 * that we want to use. This requires that either nothing be locked
1033 * across this call, or that anything that is locked be logged in
1034 * the prior and the next transactions.
1036 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1037 return xfs_trans_reserve(*tpp, &tres, 0, 0);
1041 * Allocate an transaction, lock and join the inode to it, and reserve quota.
1043 * The caller must ensure that the on-disk dquots attached to this inode have
1044 * already been allocated and initialized. The caller is responsible for
1045 * releasing ILOCK_EXCL if a new transaction is returned.
1048 xfs_trans_alloc_inode(
1049 struct xfs_inode *ip,
1050 struct xfs_trans_res *resv,
1051 unsigned int dblocks,
1052 unsigned int rblocks,
1054 struct xfs_trans **tpp)
1056 struct xfs_trans *tp;
1057 struct xfs_mount *mp = ip->i_mount;
1058 bool retried = false;
1062 error = xfs_trans_alloc(mp, resv, dblocks,
1063 rblocks / mp->m_sb.sb_rextsize,
1064 force ? XFS_TRANS_RESERVE : 0, &tp);
1068 xfs_ilock(ip, XFS_ILOCK_EXCL);
1069 xfs_trans_ijoin(tp, ip, 0);
1071 error = xfs_qm_dqattach_locked(ip, false);
1073 /* Caller should have allocated the dquots! */
1074 ASSERT(error != -ENOENT);
1078 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
1079 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1080 xfs_trans_cancel(tp);
1081 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1082 xfs_blockgc_free_quota(ip, 0);
1093 xfs_trans_cancel(tp);
1094 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1099 * Allocate an transaction in preparation for inode creation by reserving quota
1100 * against the given dquots. Callers are not required to hold any inode locks.
1103 xfs_trans_alloc_icreate(
1104 struct xfs_mount *mp,
1105 struct xfs_trans_res *resv,
1106 struct xfs_dquot *udqp,
1107 struct xfs_dquot *gdqp,
1108 struct xfs_dquot *pdqp,
1109 unsigned int dblocks,
1110 struct xfs_trans **tpp)
1112 struct xfs_trans *tp;
1113 bool retried = false;
1117 error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
1121 error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
1122 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1123 xfs_trans_cancel(tp);
1124 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1129 xfs_trans_cancel(tp);
1138 * Allocate an transaction, lock and join the inode to it, and reserve quota
1139 * in preparation for inode attribute changes that include uid, gid, or prid
1142 * The caller must ensure that the on-disk dquots attached to this inode have
1143 * already been allocated and initialized. The ILOCK will be dropped when the
1144 * transaction is committed or cancelled.
1147 xfs_trans_alloc_ichange(
1148 struct xfs_inode *ip,
1149 struct xfs_dquot *new_udqp,
1150 struct xfs_dquot *new_gdqp,
1151 struct xfs_dquot *new_pdqp,
1153 struct xfs_trans **tpp)
1155 struct xfs_trans *tp;
1156 struct xfs_mount *mp = ip->i_mount;
1157 struct xfs_dquot *udqp;
1158 struct xfs_dquot *gdqp;
1159 struct xfs_dquot *pdqp;
1160 bool retried = false;
1164 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1168 xfs_ilock(ip, XFS_ILOCK_EXCL);
1169 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1171 error = xfs_qm_dqattach_locked(ip, false);
1173 /* Caller should have allocated the dquots! */
1174 ASSERT(error != -ENOENT);
1179 * For each quota type, skip quota reservations if the inode's dquots
1180 * now match the ones that came from the caller, or the caller didn't
1181 * pass one in. The inode's dquots can change if we drop the ILOCK to
1182 * perform a blockgc scan, so we must preserve the caller's arguments.
1184 udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
1185 gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
1186 pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
1187 if (udqp || gdqp || pdqp) {
1188 unsigned int qflags = XFS_QMOPT_RES_REGBLKS;
1191 qflags |= XFS_QMOPT_FORCE_RES;
1194 * Reserve enough quota to handle blocks on disk and reserved
1195 * for a delayed allocation. We'll actually transfer the
1196 * delalloc reservation between dquots at chown time, even
1197 * though that part is only semi-transactional.
1199 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
1200 pdqp, ip->i_d.di_nblocks + ip->i_delayed_blks,
1202 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1203 xfs_trans_cancel(tp);
1204 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1216 xfs_trans_cancel(tp);