locking: Make owner_on_cpu() into <linux/sched.h>
authorKefeng Wang <wangkefeng.wang@huawei.com>
Fri, 3 Dec 2021 07:59:34 +0000 (15:59 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Sat, 4 Dec 2021 09:56:25 +0000 (10:56 +0100)
Move the owner_on_cpu() from kernel/locking/rwsem.c into
include/linux/sched.h with under CONFIG_SMP, then use it
in the mutex/rwsem/rtmutex to simplify the code.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211203075935.136808-2-wangkefeng.wang@huawei.com
include/linux/sched.h
kernel/locking/mutex.c
kernel/locking/rtmutex.c
kernel/locking/rwsem.c

index 78c351e..ff609d9 100644 (file)
@@ -2171,6 +2171,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 #endif
 
 #ifdef CONFIG_SMP
+static inline bool owner_on_cpu(struct task_struct *owner)
+{
+       /*
+        * As lock holder preemption issue, we both skip spinning if
+        * task is not on cpu or its cpu is preempted
+        */
+       return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
+}
+
 /* Returns effective CPU energy utilization, as seen by the scheduler */
 unsigned long sched_cpu_util(int cpu, unsigned long max);
 #endif /* CONFIG_SMP */
index db19136..5e35859 100644 (file)
@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
                /*
                 * Use vcpu_is_preempted to detect lock holder preemption issue.
                 */
-               if (!owner->on_cpu || need_resched() ||
-                               vcpu_is_preempted(task_cpu(owner))) {
+               if (!owner_on_cpu(owner) || need_resched()) {
                        ret = false;
                        break;
                }
@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
         * structure won't go away during the spinning period.
         */
        owner = __mutex_owner(lock);
-
-       /*
-        * As lock holder preemption issue, we both skip spinning if task is not
-        * on cpu or its cpu is preempted
-        */
-
        if (owner)
-               retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
+               retval = owner_on_cpu(owner);
 
        /*
         * If lock->owner is not set, the mutex has been released. Return true
index f896208..0c1f2e3 100644 (file)
@@ -1382,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
                 *    for CONFIG_PREEMPT_RCU=y)
                 *  - the VCPU on which owner runs is preempted
                 */
-               if (!owner->on_cpu || need_resched() ||
-                   rt_mutex_waiter_is_top_waiter(lock, waiter) ||
-                   vcpu_is_preempted(task_cpu(owner))) {
+               if (!owner_on_cpu(owner) || need_resched() ||
+                   rt_mutex_waiter_is_top_waiter(lock, waiter)) {
                        res = false;
                        break;
                }
index c51387a..b92d0a8 100644 (file)
@@ -613,15 +613,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
        return false;
 }
 
-static inline bool owner_on_cpu(struct task_struct *owner)
-{
-       /*
-        * As lock holder preemption issue, we both skip spinning if
-        * task is not on cpu or its cpu is preempted
-        */
-       return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
-}
-
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 {
        struct task_struct *owner;