}
trace_xfs_defer_init(mp, dop, _RET_IP_);
}
+
+/*
+ * Move state from one xfs_defer_ops to another and reset the source to initial
+ * state. This is primarily used to carry state forward across transaction rolls
+ * with internal dfops.
+ */
+void
+xfs_defer_move(
+ struct xfs_defer_ops *dst,
+ struct xfs_defer_ops *src)
+{
+ ASSERT(dst != src);
+
+ list_splice_init(&src->dop_intake, &dst->dop_intake);
+ list_splice_init(&src->dop_pending, &dst->dop_pending);
+
+ memcpy(dst->dop_inodes, src->dop_inodes, sizeof(dst->dop_inodes));
+ memcpy(dst->dop_bufs, src->dop_bufs, sizeof(dst->dop_bufs));
+ dst->dop_low = src->dop_low;
+
+ xfs_defer_reset(src);
+}
#define __XFS_DEFER_H__
struct xfs_defer_op_type;
+struct xfs_defer_ops;
/*
* Save a log intent item and a list of extents, so that we can replay
XFS_DEFER_OPS_TYPE_MAX,
};
-#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
-#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
-
-struct xfs_defer_ops {
- struct list_head dop_intake; /* unlogged pending work */
- struct list_head dop_pending; /* logged pending work */
-
- /* relog these with each roll */
- struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
- struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
-
- bool dop_low; /* alloc in low mode */
-};
-
void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
struct list_head *h);
int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop);
bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
+void xfs_defer_move(struct xfs_defer_ops *dst, struct xfs_defer_ops *src);
/* Description of a deferred type. */
struct xfs_defer_op_type {
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
ntp->t_pflags = tp->t_pflags;
- ntp->t_dfops = tp->t_dfops;
+
+ /* copy the dfops pointer if it's external, otherwise move it */
+ xfs_defer_init(ntp, &ntp->t_dfops_internal);
+ if (tp->t_dfops != &tp->t_dfops_internal)
+ ntp->t_dfops = tp->t_dfops;
+ else
+ xfs_defer_move(ntp->t_dfops, tp->t_dfops);
xfs_trans_dup_dqinfo(tp, ntp);
INIT_LIST_HEAD(&tp->t_items);
INIT_LIST_HEAD(&tp->t_busy);
tp->t_firstblock = NULLFSBLOCK;
+ /*
+ * We only roll transactions with permanent log reservation. Don't init
+ * ->t_dfops to skip attempts to finish or cancel an empty dfops with a
+ * non-permanent res.
+ */
+ if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES)
+ xfs_defer_init(tp, &tp->t_dfops_internal);
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
if (error) {
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
- ASSERT(!tp->t_dfops ||
- !xfs_defer_has_unfinished_work(tp->t_dfops) || regrant);
-
trace_xfs_trans_commit(tp, _RET_IP_);
+ /* finish deferred items on final commit */
+ if (!regrant && tp->t_dfops) {
+ error = xfs_defer_finish(&tp, tp->t_dfops);
+ if (error) {
+ xfs_defer_cancel(tp->t_dfops);
+ goto out_unreserve;
+ }
+ }
+
/*
* If there is nothing to be logged by the transaction,
* then unlock all of the items associated with the
trace_xfs_trans_cancel(tp, _RET_IP_);
+ if (tp->t_dfops)
+ xfs_defer_cancel(tp->t_dfops);
+
/*
* See if the caller is relying on us to shut down the
* filesystem. This happens in paths where we detect
struct xfs_btree_cur;
struct xfs_cui_log_item;
struct xfs_cud_log_item;
-struct xfs_defer_ops;
struct xfs_bui_log_item;
struct xfs_bud_log_item;
#define XFS_ITEM_LOCKED 2
#define XFS_ITEM_FLUSHING 3
+/*
+ * Deferred operations tracking structure.
+ */
+#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
+struct xfs_defer_ops {
+ struct list_head dop_intake; /* unlogged pending work */
+ struct list_head dop_pending; /* logged pending work */
+
+ /* relog these with each roll */
+ struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
+ struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
+
+ bool dop_low; /* alloc in low mode */
+};
/*
* This is the structure maintained for every active transaction.
struct list_head t_items; /* log item descriptors */
struct list_head t_busy; /* list of busy extents */
unsigned long t_pflags; /* saved process flags state */
+ struct xfs_defer_ops t_dfops_internal;
} xfs_trans_t;
/*