locking/ww_mutex: Abstract out mutex accessors
authorPeter Zijlstra <peterz@infradead.org>
Sun, 15 Aug 2021 21:28:49 +0000 (23:28 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 17 Aug 2021 17:04:57 +0000 (19:04 +0200)
Move the mutex related access from various ww_mutex functions into helper
functions so they can be substituted for rtmutex based ww_mutex later.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211304.622477030@linutronix.de
kernel/locking/ww_mutex.h

index f5aaf2f..842dbed 100644 (file)
@@ -53,6 +53,18 @@ __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_wa
        __mutex_add_waiter(lock, waiter, p);
 }
 
+static inline struct task_struct *
+__ww_mutex_owner(struct mutex *lock)
+{
+       return __mutex_owner(lock);
+}
+
+static inline bool
+__ww_mutex_has_waiters(struct mutex *lock)
+{
+       return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
+}
+
 /*
  * Wait-Die:
  *   The newer transactions are killed when:
@@ -157,7 +169,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
                             struct ww_acquire_ctx *ww_ctx,
                             struct ww_acquire_ctx *hold_ctx)
 {
-       struct task_struct *owner = __mutex_owner(lock);
+       struct task_struct *owner = __ww_mutex_owner(lock);
 
        lockdep_assert_held(&lock->wait_lock);
 
@@ -253,7 +265,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
         * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
         * and/or !empty list.
         */
-       if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
+       if (likely(!__ww_mutex_has_waiters(&lock->base)))
                return;
 
        /*