1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2022-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
13 #include "xfs_trace.h"
16 * Use a static key here to reduce the overhead of xfs_drain_rele. If the
17 * compiler supports jump labels, the static branch will be replaced by a nop
18 * sled when there are no xfs_drain_wait callers. Online fsck is currently
19 * the only caller, so this is a reasonable tradeoff.
21 * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
22 * parts of the kernel allocate memory with that lock held, which means that
23 * XFS callers cannot hold any locks that might be used by memory reclaim or
24 * writeback when calling the static_branch_{inc,dec} functions.
26 static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate);
29 xfs_drain_wait_disable(void)
31 static_branch_dec(&xfs_drain_waiter_gate);
35 xfs_drain_wait_enable(void)
37 static_branch_inc(&xfs_drain_waiter_gate);
42 struct xfs_defer_drain *dr)
44 atomic_set(&dr->dr_count, 0);
45 init_waitqueue_head(&dr->dr_waiters);
49 xfs_defer_drain_free(struct xfs_defer_drain *dr)
51 ASSERT(atomic_read(&dr->dr_count) == 0);
54 /* Increase the pending intent count. */
55 static inline void xfs_defer_drain_grab(struct xfs_defer_drain *dr)
57 atomic_inc(&dr->dr_count);
60 static inline bool has_waiters(struct wait_queue_head *wq_head)
63 * This memory barrier is paired with the one in set_current_state on
66 smp_mb__after_atomic();
67 return waitqueue_active(wq_head);
70 /* Decrease the pending intent count, and wake any waiters, if appropriate. */
71 static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr)
73 if (atomic_dec_and_test(&dr->dr_count) &&
74 static_branch_unlikely(&xfs_drain_waiter_gate) &&
75 has_waiters(&dr->dr_waiters))
76 wake_up(&dr->dr_waiters);
79 /* Are there intents pending? */
80 static inline bool xfs_defer_drain_busy(struct xfs_defer_drain *dr)
82 return atomic_read(&dr->dr_count) > 0;
86 * Wait for the pending intent count for a drain to hit zero.
88 * Callers must not hold any locks that would prevent intents from being
91 static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr)
93 return wait_event_killable(dr->dr_waiters, !xfs_defer_drain_busy(dr));
97 * Get a passive reference to an AG and declare an intent to update its
101 xfs_perag_intent_get(
102 struct xfs_mount *mp,
105 struct xfs_perag *pag;
107 pag = xfs_perag_get(mp, agno);
111 xfs_perag_intent_hold(pag);
116 * Release our intent to update this AG's metadata, and then release our
117 * passive ref to the AG.
120 xfs_perag_intent_put(
121 struct xfs_perag *pag)
123 xfs_perag_intent_rele(pag);
128 * Declare an intent to update AG metadata. Other threads that need exclusive
129 * access can decide to back off if they see declared intentions.
132 xfs_perag_intent_hold(
133 struct xfs_perag *pag)
135 trace_xfs_perag_intent_hold(pag, __return_address);
136 xfs_defer_drain_grab(&pag->pag_intents_drain);
139 /* Release our intent to update this AG's metadata. */
141 xfs_perag_intent_rele(
142 struct xfs_perag *pag)
144 trace_xfs_perag_intent_rele(pag, __return_address);
145 xfs_defer_drain_rele(&pag->pag_intents_drain);
149 * Wait for the intent update count for this AG to hit zero.
150 * Callers must not hold any AG header buffers.
153 xfs_perag_intent_drain(
154 struct xfs_perag *pag)
156 trace_xfs_perag_wait_intents(pag, __return_address);
157 return xfs_defer_drain_wait(&pag->pag_intents_drain);
160 /* Has anyone declared an intent to update this AG? */
162 xfs_perag_intent_busy(
163 struct xfs_perag *pag)
165 return xfs_defer_drain_busy(&pag->pag_intents_drain);