1 /* SPDX-License-Identifier: GPL-2.0-only */
6 #define MUTEX_WAITER mutex_waiter
8 static inline struct mutex_waiter *
9 __ww_waiter_first(struct mutex *lock)
11 struct mutex_waiter *w;
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
14 if (list_entry_is_head(w, &lock->wait_list, list))
20 static inline struct mutex_waiter *
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
23 w = list_next_entry(w, list);
24 if (list_entry_is_head(w, &lock->wait_list, list))
30 static inline struct mutex_waiter *
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
33 w = list_prev_entry(w, list);
34 if (list_entry_is_head(w, &lock->wait_list, list))
40 static inline struct mutex_waiter *
41 __ww_waiter_last(struct mutex *lock)
43 struct mutex_waiter *w;
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
46 if (list_entry_is_head(w, &lock->wait_list, list))
53 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
55 struct list_head *p = &lock->wait_list;
58 __mutex_add_waiter(lock, waiter, p);
61 static inline struct task_struct *
62 __ww_mutex_owner(struct mutex *lock)
64 return __mutex_owner(lock);
68 __ww_mutex_has_waiters(struct mutex *lock)
70 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
73 static inline void lock_wait_lock(struct mutex *lock)
75 raw_spin_lock(&lock->wait_lock);
78 static inline void unlock_wait_lock(struct mutex *lock)
80 raw_spin_unlock(&lock->wait_lock);
83 static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
85 lockdep_assert_held(&lock->wait_lock);
90 #define MUTEX rt_mutex
91 #define MUTEX_WAITER rt_mutex_waiter
93 static inline struct rt_mutex_waiter *
94 __ww_waiter_first(struct rt_mutex *lock)
96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
99 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
102 static inline struct rt_mutex_waiter *
103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
105 struct rb_node *n = rb_next(&w->tree.entry);
108 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
111 static inline struct rt_mutex_waiter *
112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
114 struct rb_node *n = rb_prev(&w->tree.entry);
117 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
120 static inline struct rt_mutex_waiter *
121 __ww_waiter_last(struct rt_mutex *lock)
123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
126 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
132 /* RT unconditionally adds the waiter first and then removes it on error */
135 static inline struct task_struct *
136 __ww_mutex_owner(struct rt_mutex *lock)
138 return rt_mutex_owner(&lock->rtmutex);
142 __ww_mutex_has_waiters(struct rt_mutex *lock)
144 return rt_mutex_has_waiters(&lock->rtmutex);
147 static inline void lock_wait_lock(struct rt_mutex *lock)
149 raw_spin_lock(&lock->rtmutex.wait_lock);
152 static inline void unlock_wait_lock(struct rt_mutex *lock)
154 raw_spin_unlock(&lock->rtmutex.wait_lock);
157 static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
159 lockdep_assert_held(&lock->rtmutex.wait_lock);
166 * The newer transactions are killed when:
167 * It (the new transaction) makes a request for a lock being held
168 * by an older transaction.
171 * The newer transactions are wounded when:
172 * An older transaction makes a request for a lock being held by
173 * the newer transaction.
177 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
180 static __always_inline void
181 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
183 #ifdef DEBUG_WW_MUTEXES
185 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
186 * but released with a normal mutex_unlock in this call.
188 * This should never happen, always use ww_mutex_unlock.
190 DEBUG_LOCKS_WARN_ON(ww->ctx);
193 * Not quite done after calling ww_acquire_done() ?
195 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
197 if (ww_ctx->contending_lock) {
199 * After -EDEADLK you tried to
200 * acquire a different ww_mutex? Bad!
202 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
205 * You called ww_mutex_lock after receiving -EDEADLK,
206 * but 'forgot' to unlock everything else first?
208 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
209 ww_ctx->contending_lock = NULL;
213 * Naughty, using a different class will lead to undefined behavior!
215 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
222 * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
223 * or, when of equal priority, a younger transaction than @b.
225 * Depending on the algorithm, @a will either need to wait for @b, or die.
228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
231 * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
232 * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
233 * isn't affected by this.
236 /* kernel prio; less is more */
237 int a_prio = a->task->prio;
238 int b_prio = b->task->prio;
240 if (rt_prio(a_prio) || rt_prio(b_prio)) {
248 /* equal static prio */
250 if (dl_prio(a_prio)) {
251 if (dl_time_before(b->task->dl.deadline,
252 a->task->dl.deadline))
255 if (dl_time_before(a->task->dl.deadline,
256 b->task->dl.deadline))
264 /* FIFO order tie break -- bigger is younger */
265 return (signed long)(a->stamp - b->stamp) > 0;
269 * Wait-Die; wake a lesser waiter context (when locks held) such that it can
272 * Among waiters with context, only the first one can have other locks acquired
273 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
274 * __ww_mutex_check_kill() wake any but the earliest context.
277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
278 struct ww_acquire_ctx *ww_ctx)
280 if (!ww_ctx->is_wait_die)
283 if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
285 debug_mutex_wake_waiter(lock, waiter);
287 wake_up_process(waiter->task);
294 * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
296 * Wound the lock holder if there are waiters with more important transactions
297 * than the lock holders. Even if multiple waiters may wound the lock holder,
298 * it's sufficient that only one does.
300 static bool __ww_mutex_wound(struct MUTEX *lock,
301 struct ww_acquire_ctx *ww_ctx,
302 struct ww_acquire_ctx *hold_ctx)
304 struct task_struct *owner = __ww_mutex_owner(lock);
306 lockdep_assert_wait_lock_held(lock);
309 * Possible through __ww_mutex_add_waiter() when we race with
310 * ww_mutex_set_context_fastpath(). In that case we'll get here again
311 * through __ww_mutex_check_waiters().
317 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
318 * it cannot go away because we'll have FLAG_WAITERS set and hold
324 if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
325 hold_ctx->wounded = 1;
328 * wake_up_process() paired with set_current_state()
329 * inserts sufficient barriers to make sure @owner either sees
330 * it's wounded in __ww_mutex_check_kill() or has a
331 * wakeup pending to re-read the wounded state.
333 if (owner != current)
334 wake_up_process(owner);
343 * We just acquired @lock under @ww_ctx, if there are more important contexts
344 * waiting behind us on the wait-list, check if they need to die, or wound us.
346 * See __ww_mutex_add_waiter() for the list-order construction; basically the
347 * list is ordered by stamp, smallest (oldest) first.
349 * This relies on never mixing wait-die/wound-wait on the same wait-list;
350 * which is currently ensured by that being a ww_class property.
352 * The current task must not be on the wait list.
355 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
357 struct MUTEX_WAITER *cur;
359 lockdep_assert_wait_lock_held(lock);
361 for (cur = __ww_waiter_first(lock); cur;
362 cur = __ww_waiter_next(lock, cur)) {
367 if (__ww_mutex_die(lock, cur, ww_ctx) ||
368 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
374 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
375 * and wake up any waiters so they can recheck.
377 static __always_inline void
378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
380 ww_mutex_lock_acquired(lock, ctx);
383 * The lock->ctx update should be visible on all cores before
384 * the WAITERS check is done, otherwise contended waiters might be
385 * missed. The contended waiters will either see ww_ctx == NULL
386 * and keep spinning, or it will acquire wait_lock, add itself
387 * to waiter list and sleep.
389 smp_mb(); /* See comments above and below. */
392 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
394 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
396 * The memory barrier above pairs with the memory barrier in
397 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
398 * and/or !empty list.
400 if (likely(!__ww_mutex_has_waiters(&lock->base)))
404 * Uh oh, we raced in fastpath, check if any of the waiters need to
407 lock_wait_lock(&lock->base);
408 __ww_mutex_check_waiters(&lock->base, ctx);
409 unlock_wait_lock(&lock->base);
412 static __always_inline int
413 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
415 if (ww_ctx->acquired > 0) {
416 #ifdef DEBUG_WW_MUTEXES
419 ww = container_of(lock, struct ww_mutex, base);
420 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
421 ww_ctx->contending_lock = ww;
430 * Check the wound condition for the current lock acquire.
432 * Wound-Wait: If we're wounded, kill ourself.
434 * Wait-Die: If we're trying to acquire a lock already held by an older
435 * context, kill ourselves.
437 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
438 * look at waiters before us in the wait-list.
441 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
442 struct ww_acquire_ctx *ctx)
444 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
445 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
446 struct MUTEX_WAITER *cur;
448 if (ctx->acquired == 0)
451 if (!ctx->is_wait_die) {
453 return __ww_mutex_kill(lock, ctx);
458 if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
459 return __ww_mutex_kill(lock, ctx);
462 * If there is a waiter in front of us that has a context, then its
463 * stamp is earlier than ours and we must kill ourself.
465 for (cur = __ww_waiter_prev(lock, waiter); cur;
466 cur = __ww_waiter_prev(lock, cur)) {
471 return __ww_mutex_kill(lock, ctx);
478 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
479 * first. Such that older contexts are preferred to acquire the lock over
482 * Waiters without context are interspersed in FIFO order.
484 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
485 * older contexts already waiting) to avoid unnecessary waiting and for
486 * Wound-Wait ensure we wound the owning context when it is younger.
489 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
491 struct ww_acquire_ctx *ww_ctx)
493 struct MUTEX_WAITER *cur, *pos = NULL;
497 __ww_waiter_add(lock, waiter, NULL);
501 is_wait_die = ww_ctx->is_wait_die;
504 * Add the waiter before the first waiter with a higher stamp.
505 * Waiters without a context are skipped to avoid starving
506 * them. Wait-Die waiters may die here. Wound-Wait waiters
507 * never die here, but they are sorted in stamp order and
508 * may wound the lock holder.
510 for (cur = __ww_waiter_last(lock); cur;
511 cur = __ww_waiter_prev(lock, cur)) {
516 if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
518 * Wait-Die: if we find an older context waiting, there
519 * is no point in queueing behind it, as we'd have to
520 * die the moment it would acquire the lock.
523 int ret = __ww_mutex_kill(lock, ww_ctx);
534 /* Wait-Die: ensure younger waiters die. */
535 __ww_mutex_die(lock, cur, ww_ctx);
538 __ww_waiter_add(lock, waiter, pos);
541 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
542 * wound that such that we might proceed.
545 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
548 * See ww_mutex_set_context_fastpath(). Orders setting
549 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
550 * such that either we or the fastpath will wound @ww->ctx.
553 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
559 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
562 #ifdef DEBUG_WW_MUTEXES
563 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
565 if (lock->ctx->acquired > 0)
566 lock->ctx->acquired--;