From 653a5b0bd9b405db999d5f4bfe08d34691e2c55a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:52 +0200 Subject: [PATCH] locking/ww_mutex: Abstract out internal lock accesses Accessing the internal wait_lock of mutex and rtmutex is slightly different. Provide helper functions for that. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211304.734635961@linutronix.de --- include/linux/ww_mutex.h | 13 +++++++++---- kernel/locking/ww_mutex.h | 23 +++++++++++++++++++---- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 590aaa2..3438e30 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -19,6 +19,11 @@ #include +#define WW_MUTEX_BASE mutex +#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) mutex_trylock(l) +#define ww_mutex_base_is_locked(b) mutex_is_locked((b)) + struct ww_class { atomic_long_t stamp; struct lock_class_key acquire_key; @@ -29,7 +34,7 @@ struct ww_class { }; struct ww_mutex { - struct mutex base; + struct WW_MUTEX_BASE base; struct ww_acquire_ctx *ctx; #ifdef CONFIG_DEBUG_MUTEXES struct ww_class *ww_class; @@ -82,7 +87,7 @@ struct ww_acquire_ctx { static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); + ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; #ifdef CONFIG_DEBUG_MUTEXES lock->ww_class = ww_class; @@ -330,7 +335,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock); */ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) { - return mutex_trylock(&lock->base); + return ww_mutex_base_trylock(&lock->base); } /*** @@ -354,7 +359,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock) */ static inline bool ww_mutex_is_locked(struct ww_mutex *lock) { - return mutex_is_locked(&lock->base); + return ww_mutex_base_is_locked(&lock->base); } #endif diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h index 31b075f..309f3e4 100644 --- a/kernel/locking/ww_mutex.h +++ b/kernel/locking/ww_mutex.h @@ -68,6 +68,21 @@ __ww_mutex_has_waiters(struct mutex *lock) return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS; } +static inline void lock_wait_lock(struct mutex *lock) +{ + raw_spin_lock(&lock->wait_lock); +} + +static inline void unlock_wait_lock(struct mutex *lock) +{ + raw_spin_unlock(&lock->wait_lock); +} + +static inline void lockdep_assert_wait_lock_held(struct mutex *lock) +{ + lockdep_assert_held(&lock->wait_lock); +} + /* * Wait-Die: * The newer transactions are killed when: @@ -174,7 +189,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock, { struct task_struct *owner = __ww_mutex_owner(lock); - lockdep_assert_held(&lock->wait_lock); + lockdep_assert_wait_lock_held(lock); /* * Possible through __ww_mutex_add_waiter() when we race with @@ -227,7 +242,7 @@ __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx) { struct MUTEX_WAITER *cur; - lockdep_assert_held(&lock->wait_lock); + lockdep_assert_wait_lock_held(lock); for (cur = __ww_waiter_first(lock); cur; cur = __ww_waiter_next(lock, cur)) { @@ -275,9 +290,9 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) * Uh oh, we raced in fastpath, check if any of the waiters need to * die or wound us. */ - raw_spin_lock(&lock->base.wait_lock); + lock_wait_lock(&lock->base); __ww_mutex_check_waiters(&lock->base, ctx); - raw_spin_unlock(&lock->base.wait_lock); + unlock_wait_lock(&lock->base); } static __always_inline int -- 2.7.4