1 /* SPDX-License-Identifier: GPL-2.0-only */
3 static inline struct mutex_waiter *
4 __ww_waiter_first(struct mutex *lock)
6 struct mutex_waiter *w;
8 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
9 if (list_entry_is_head(w, &lock->wait_list, list))
15 static inline struct mutex_waiter *
16 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
18 w = list_next_entry(w, list);
19 if (list_entry_is_head(w, &lock->wait_list, list))
25 static inline struct mutex_waiter *
26 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
28 w = list_prev_entry(w, list);
29 if (list_entry_is_head(w, &lock->wait_list, list))
35 static inline struct mutex_waiter *
36 __ww_waiter_last(struct mutex *lock)
38 struct mutex_waiter *w;
40 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
41 if (list_entry_is_head(w, &lock->wait_list, list))
49 * The newer transactions are killed when:
50 * It (the new transaction) makes a request for a lock being held
51 * by an older transaction.
54 * The newer transactions are wounded when:
55 * An older transaction makes a request for a lock being held by
56 * the newer transaction.
60 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
63 static __always_inline void
64 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
66 #ifdef CONFIG_DEBUG_MUTEXES
68 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
69 * but released with a normal mutex_unlock in this call.
71 * This should never happen, always use ww_mutex_unlock.
73 DEBUG_LOCKS_WARN_ON(ww->ctx);
76 * Not quite done after calling ww_acquire_done() ?
78 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
80 if (ww_ctx->contending_lock) {
82 * After -EDEADLK you tried to
83 * acquire a different ww_mutex? Bad!
85 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
88 * You called ww_mutex_lock after receiving -EDEADLK,
89 * but 'forgot' to unlock everything else first?
91 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
92 ww_ctx->contending_lock = NULL;
96 * Naughty, using a different class will lead to undefined behavior!
98 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
105 * Determine if context @a is 'after' context @b. IOW, @a is a younger
106 * transaction than @b and depending on algorithm either needs to wait for
110 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
113 return (signed long)(a->stamp - b->stamp) > 0;
117 * Wait-Die; wake a younger waiter context (when locks held) such that it can
120 * Among waiters with context, only the first one can have other locks acquired
121 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
122 * __ww_mutex_check_kill() wake any but the earliest context.
125 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
126 struct ww_acquire_ctx *ww_ctx)
128 if (!ww_ctx->is_wait_die)
131 if (waiter->ww_ctx->acquired > 0 &&
132 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
133 debug_mutex_wake_waiter(lock, waiter);
134 wake_up_process(waiter->task);
141 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
143 * Wound the lock holder if there are waiters with older transactions than
144 * the lock holders. Even if multiple waiters may wound the lock holder,
145 * it's sufficient that only one does.
147 static bool __ww_mutex_wound(struct mutex *lock,
148 struct ww_acquire_ctx *ww_ctx,
149 struct ww_acquire_ctx *hold_ctx)
151 struct task_struct *owner = __mutex_owner(lock);
153 lockdep_assert_held(&lock->wait_lock);
156 * Possible through __ww_mutex_add_waiter() when we race with
157 * ww_mutex_set_context_fastpath(). In that case we'll get here again
158 * through __ww_mutex_check_waiters().
164 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
165 * it cannot go away because we'll have FLAG_WAITERS set and hold
171 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
172 hold_ctx->wounded = 1;
175 * wake_up_process() paired with set_current_state()
176 * inserts sufficient barriers to make sure @owner either sees
177 * it's wounded in __ww_mutex_check_kill() or has a
178 * wakeup pending to re-read the wounded state.
180 if (owner != current)
181 wake_up_process(owner);
190 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
191 * behind us on the wait-list, check if they need to die, or wound us.
193 * See __ww_mutex_add_waiter() for the list-order construction; basically the
194 * list is ordered by stamp, smallest (oldest) first.
196 * This relies on never mixing wait-die/wound-wait on the same wait-list;
197 * which is currently ensured by that being a ww_class property.
199 * The current task must not be on the wait list.
202 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
204 struct mutex_waiter *cur;
206 lockdep_assert_held(&lock->wait_lock);
208 for (cur = __ww_waiter_first(lock); cur;
209 cur = __ww_waiter_next(lock, cur)) {
214 if (__ww_mutex_die(lock, cur, ww_ctx) ||
215 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
221 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
222 * and wake up any waiters so they can recheck.
224 static __always_inline void
225 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
227 ww_mutex_lock_acquired(lock, ctx);
230 * The lock->ctx update should be visible on all cores before
231 * the WAITERS check is done, otherwise contended waiters might be
232 * missed. The contended waiters will either see ww_ctx == NULL
233 * and keep spinning, or it will acquire wait_lock, add itself
234 * to waiter list and sleep.
236 smp_mb(); /* See comments above and below. */
239 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
241 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
243 * The memory barrier above pairs with the memory barrier in
244 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
245 * and/or !empty list.
247 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
251 * Uh oh, we raced in fastpath, check if any of the waiters need to
254 raw_spin_lock(&lock->base.wait_lock);
255 __ww_mutex_check_waiters(&lock->base, ctx);
256 raw_spin_unlock(&lock->base.wait_lock);
259 static __always_inline int
260 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
262 if (ww_ctx->acquired > 0) {
263 #ifdef CONFIG_DEBUG_MUTEXES
266 ww = container_of(lock, struct ww_mutex, base);
267 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
268 ww_ctx->contending_lock = ww;
277 * Check the wound condition for the current lock acquire.
279 * Wound-Wait: If we're wounded, kill ourself.
281 * Wait-Die: If we're trying to acquire a lock already held by an older
282 * context, kill ourselves.
284 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
285 * look at waiters before us in the wait-list.
288 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
289 struct ww_acquire_ctx *ctx)
291 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
292 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
293 struct mutex_waiter *cur;
295 if (ctx->acquired == 0)
298 if (!ctx->is_wait_die) {
300 return __ww_mutex_kill(lock, ctx);
305 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
306 return __ww_mutex_kill(lock, ctx);
309 * If there is a waiter in front of us that has a context, then its
310 * stamp is earlier than ours and we must kill ourself.
312 for (cur = __ww_waiter_prev(lock, waiter); cur;
313 cur = __ww_waiter_prev(lock, cur)) {
318 return __ww_mutex_kill(lock, ctx);
325 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
326 * first. Such that older contexts are preferred to acquire the lock over
329 * Waiters without context are interspersed in FIFO order.
331 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
332 * older contexts already waiting) to avoid unnecessary waiting and for
333 * Wound-Wait ensure we wound the owning context when it is younger.
336 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
338 struct ww_acquire_ctx *ww_ctx)
340 struct mutex_waiter *cur;
341 struct list_head *pos;
345 __mutex_add_waiter(lock, waiter, &lock->wait_list);
349 is_wait_die = ww_ctx->is_wait_die;
352 * Add the waiter before the first waiter with a higher stamp.
353 * Waiters without a context are skipped to avoid starving
354 * them. Wait-Die waiters may die here. Wound-Wait waiters
355 * never die here, but they are sorted in stamp order and
356 * may wound the lock holder.
358 pos = &lock->wait_list;
359 for (cur = __ww_waiter_last(lock); cur;
360 cur = __ww_waiter_prev(lock, cur)) {
365 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
367 * Wait-Die: if we find an older context waiting, there
368 * is no point in queueing behind it, as we'd have to
369 * die the moment it would acquire the lock.
372 int ret = __ww_mutex_kill(lock, ww_ctx);
383 /* Wait-Die: ensure younger waiters die. */
384 __ww_mutex_die(lock, cur, ww_ctx);
387 __mutex_add_waiter(lock, waiter, pos);
390 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
391 * wound that such that we might proceed.
394 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
397 * See ww_mutex_set_context_fastpath(). Orders setting
398 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
399 * such that either we or the fastpath will wound @ww->ctx.
402 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
408 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
411 #ifdef CONFIG_DEBUG_MUTEXES
412 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
414 if (lock->ctx->acquired > 0)
415 lock->ctx->acquired--;