sched/core: Use READ_ONCE()/WRITE_ONCE() in move_queued_task()/task_rq_lock()
authorAndrea Parri <andrea.parri@amarulasolutions.com>
Mon, 21 Jan 2019 15:52:40 +0000 (16:52 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 5 Apr 2019 20:33:12 +0000 (22:33 +0200)
[ Upstream commit c546951d9c9300065bad253ecdf1ac59ce9d06c8 ]

move_queued_task() synchronizes with task_rq_lock() as follows:

move_queued_task() task_rq_lock()

[S] ->on_rq = MIGRATING [L] rq = task_rq()
WMB (__set_task_cpu()) ACQUIRE (rq->lock);
[S] ->cpu = new_cpu [L] ->on_rq

where "[L] rq = task_rq()" is ordered before "ACQUIRE (rq->lock)" by an
address dependency and, in turn, "ACQUIRE (rq->lock)" is ordered before
"[L] ->on_rq" by the ACQUIRE itself.

Use READ_ONCE() to load ->cpu in task_rq() (c.f., task_cpu()) to honor
this address dependency.  Also, mark the accesses to ->cpu and ->on_rq
with READ_ONCE()/WRITE_ONCE() to comply with the LKMM.

Signed-off-by: Andrea Parri <andrea.parri@amarulasolutions.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: https://lkml.kernel.org/r/20190121155240.27173-1-andrea.parri@amarulasolutions.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/sched.h

index 4abb5bd..5dc024e 100644 (file)
@@ -1737,9 +1737,9 @@ static __always_inline bool need_resched(void)
 static inline unsigned int task_cpu(const struct task_struct *p)
 {
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-       return p->cpu;
+       return READ_ONCE(p->cpu);
 #else
-       return task_thread_info(p)->cpu;
+       return READ_ONCE(task_thread_info(p)->cpu);
 #endif
 }
 
index 152a0b0..9a4f57d 100644 (file)
@@ -107,11 +107,12 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
                 *                                      [L] ->on_rq
                 *      RELEASE (rq->lock)
                 *
-                * If we observe the old CPU in task_rq_lock, the acquire of
+                * If we observe the old CPU in task_rq_lock(), the acquire of
                 * the old rq->lock will fully serialize against the stores.
                 *
-                * If we observe the new CPU in task_rq_lock, the acquire will
-                * pair with the WMB to ensure we must then also see migrating.
+                * If we observe the new CPU in task_rq_lock(), the address
+                * dependency headed by '[L] rq = task_rq()' and the acquire
+                * will pair with the WMB to ensure we then also see migrating.
                 */
                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
                        rq_pin_lock(rq, rf);
@@ -910,7 +911,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
 {
        lockdep_assert_held(&rq->lock);
 
-       p->on_rq = TASK_ON_RQ_MIGRATING;
+       WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
        dequeue_task(rq, p, DEQUEUE_NOCLOCK);
        set_task_cpu(p, new_cpu);
        rq_unlock(rq, rf);
index b631722..4c7a837 100644 (file)
@@ -1331,9 +1331,9 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
         */
        smp_wmb();
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-       p->cpu = cpu;
+       WRITE_ONCE(p->cpu, cpu);
 #else
-       task_thread_info(p)->cpu = cpu;
+       WRITE_ONCE(task_thread_info(p)->cpu, cpu);
 #endif
        p->wake_cpu = cpu;
 #endif
@@ -1434,7 +1434,7 @@ static inline int task_on_rq_queued(struct task_struct *p)
 
 static inline int task_on_rq_migrating(struct task_struct *p)
 {
-       return p->on_rq == TASK_ON_RQ_MIGRATING;
+       return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
 }
 
 /*