rtmutex: Wake up the waiters lockless while dropping the read lock.
authorThomas Gleixner <tglx@linutronix.de>
Tue, 28 Sep 2021 15:00:06 +0000 (17:00 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 1 Oct 2021 11:57:52 +0000 (13:57 +0200)
The rw_semaphore and rwlock_t implementation both wake the waiter while
holding the rt_mutex_base::wait_lock acquired.
This can be optimized by waking the waiter lockless outside of the
locked section to avoid a needless contention on the
rt_mutex_base::wait_lock lock.

Extend rt_mutex_wake_q_add() to also accept task and state and use it in
__rwbase_read_unlock().

Suggested-by: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210928150006.597310-3-bigeasy@linutronix.de
kernel/locking/rtmutex.c
kernel/locking/rwbase_rt.c

index cafc259..0c6a48d 100644 (file)
@@ -446,19 +446,26 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
 }
 
 /* RT mutex specific wake_q wrappers */
-static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
-                                               struct rt_mutex_waiter *w)
+static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
+                                                    struct task_struct *task,
+                                                    unsigned int wake_state)
 {
-       if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state == TASK_RTLOCK_WAIT) {
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) {
                if (IS_ENABLED(CONFIG_PROVE_LOCKING))
                        WARN_ON_ONCE(wqh->rtlock_task);
-               get_task_struct(w->task);
-               wqh->rtlock_task = w->task;
+               get_task_struct(task);
+               wqh->rtlock_task = task;
        } else {
-               wake_q_add(&wqh->head, w->task);
+               wake_q_add(&wqh->head, task);
        }
 }
 
+static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
+                                               struct rt_mutex_waiter *w)
+{
+       rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
+}
+
 static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
 {
        if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
index 4ba1508..6b143fb 100644 (file)
@@ -141,6 +141,7 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
 {
        struct rt_mutex_base *rtm = &rwb->rtmutex;
        struct task_struct *owner;
+       DEFINE_RT_WAKE_Q(wqh);
 
        raw_spin_lock_irq(&rtm->wait_lock);
        /*
@@ -151,9 +152,12 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
         */
        owner = rt_mutex_owner(rtm);
        if (owner)
-               wake_up_state(owner, state);
+               rt_mutex_wake_q_add_task(&wqh, owner, state);
 
+       /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
+       preempt_disable();
        raw_spin_unlock_irq(&rtm->wait_lock);
+       rt_mutex_wake_up_q(&wqh);
 }
 
 static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,