sched: Add task_struct pointer to sched_class::set_curr_task
authorPeter Zijlstra <peterz@infradead.org>
Wed, 29 May 2019 20:36:41 +0000 (20:36 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 8 Aug 2019 07:09:31 +0000 (09:09 +0200)
In preparation of further separating pick_next_task() and
set_curr_task() we have to pass the actual task into it, while there,
rename the thing to better pair with put_prev_task().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/a96d1bcdd716db4a4c5da2fece647a1456c0ed78.1559129225.git.vpillai@digitalocean.com
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c

index 364b6d7..0c42207 100644 (file)
@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
        if (queued)
                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
        if (running)
-               set_curr_task(rq, p);
+               set_next_task(rq, p);
 }
 
 /*
@@ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
        if (queued)
                enqueue_task(rq, p, queue_flag);
        if (running)
-               set_curr_task(rq, p);
+               set_next_task(rq, p);
 
        check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
@@ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice)
                        resched_curr(rq);
        }
        if (running)
-               set_curr_task(rq, p);
+               set_next_task(rq, p);
 out_unlock:
        task_rq_unlock(rq, p, &rf);
 }
@@ -4840,7 +4840,7 @@ change:
                enqueue_task(rq, p, queue_flags);
        }
        if (running)
-               set_curr_task(rq, p);
+               set_next_task(rq, p);
 
        check_class_changed(rq, p, prev_class, oldprio);
 
@@ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid)
        if (queued)
                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
        if (running)
-               set_curr_task(rq, p);
+               set_next_task(rq, p);
        task_rq_unlock(rq, p, &rf);
 }
 #endif /* CONFIG_NUMA_BALANCING */
@@ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk)
        if (queued)
                enqueue_task(rq, tsk, queue_flags);
        if (running)
-               set_curr_task(rq, tsk);
+               set_next_task(rq, tsk);
 
        task_rq_unlock(rq, tsk, &rf);
 }
index 2dc2784..6eae793 100644 (file)
@@ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p)
         */
 }
 
-static void set_curr_task_dl(struct rq *rq)
-{
-       set_next_task_dl(rq, rq->curr);
-}
-
 #ifdef CONFIG_SMP
 
 /* Only try algorithms three times */
@@ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = {
 
        .pick_next_task         = pick_next_task_dl,
        .put_prev_task          = put_prev_task_dl,
+       .set_next_task          = set_next_task_dl,
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_dl,
@@ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = {
        .task_woken             = task_woken_dl,
 #endif
 
-       .set_curr_task          = set_curr_task_dl,
        .task_tick              = task_tick_dl,
        .task_fork              = task_fork_dl,
 
index 7d8043f..8ce1b88 100644 (file)
@@ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
  * This routine is mostly called to set cfs_rq->curr field when a task
  * migrates between groups/classes.
  */
-static void set_curr_task_fair(struct rq *rq)
+static void set_next_task_fair(struct rq *rq, struct task_struct *p)
 {
-       struct sched_entity *se = &rq->curr->se;
+       struct sched_entity *se = &p->se;
+
+#ifdef CONFIG_SMP
+       if (task_on_rq_queued(p)) {
+               /*
+                * Move the next running task to the front of the list, so our
+                * cfs_tasks list becomes MRU one.
+                */
+               list_move(&se->group_node, &rq->cfs_tasks);
+       }
+#endif
 
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = {
        .check_preempt_curr     = check_preempt_wakeup,
 
        .pick_next_task         = pick_next_task_fair,
+
        .put_prev_task          = put_prev_task_fair,
+       .set_next_task          = set_next_task_fair,
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_fair,
@@ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = {
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
 
-       .set_curr_task          = set_curr_task_fair,
        .task_tick              = task_tick_fair,
        .task_fork              = task_fork_fair,
 
index 8094093..54194d4 100644 (file)
@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
        resched_curr(rq);
 }
 
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static void set_next_task_idle(struct rq *rq, struct task_struct *next)
+{
+       update_idle_core(rq);
+       schedstat_inc(rq->sched_goidle);
+}
+
 static struct task_struct *
 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
+       struct task_struct *next = rq->idle;
+
        put_prev_task(rq, prev);
-       update_idle_core(rq);
-       schedstat_inc(rq->sched_goidle);
+       set_next_task_idle(rq, next);
 
-       return rq->idle;
+       return next;
 }
 
 /*
@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
        raw_spin_lock_irq(&rq->lock);
 }
 
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
-{
-}
-
 /*
  * scheduler tick hitting a task of our scheduling class.
  *
@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
 {
 }
 
-static void set_curr_task_idle(struct rq *rq)
-{
-}
-
 static void switched_to_idle(struct rq *rq, struct task_struct *p)
 {
        BUG();
@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = {
 
        .pick_next_task         = pick_next_task_idle,
        .put_prev_task          = put_prev_task_idle,
+       .set_next_task          = set_next_task_idle,
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_idle,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
 
-       .set_curr_task          = set_curr_task_idle,
        .task_tick              = task_tick_idle,
 
        .get_rr_interval        = get_rr_interval_idle,
index 40bb710..f71bcbe 100644 (file)
@@ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        }
 }
 
-static void set_curr_task_rt(struct rq *rq)
-{
-       set_next_task_rt(rq, rq->curr);
-}
-
 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
 {
        /*
@@ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = {
 
        .pick_next_task         = pick_next_task_rt,
        .put_prev_task          = put_prev_task_rt,
+       .set_next_task          = set_next_task_rt,
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_rt,
@@ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = {
        .switched_from          = switched_from_rt,
 #endif
 
-       .set_curr_task          = set_curr_task_rt,
        .task_tick              = task_tick_rt,
 
        .get_rr_interval        = get_rr_interval_rt,
index b3449d0..f3c5044 100644 (file)
@@ -1707,6 +1707,7 @@ struct sched_class {
                                               struct task_struct *prev,
                                               struct rq_flags *rf);
        void (*put_prev_task)(struct rq *rq, struct task_struct *p);
+       void (*set_next_task)(struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
@@ -1721,7 +1722,6 @@ struct sched_class {
        void (*rq_offline)(struct rq *rq);
 #endif
 
-       void (*set_curr_task)(struct rq *rq);
        void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
        void (*task_fork)(struct task_struct *p);
        void (*task_dead)(struct task_struct *p);
@@ -1755,9 +1755,10 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
        prev->sched_class->put_prev_task(rq, prev);
 }
 
-static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
+static inline void set_next_task(struct rq *rq, struct task_struct *next)
 {
-       curr->sched_class->set_curr_task(rq);
+       WARN_ON_ONCE(rq->curr != next);
+       next->sched_class->set_next_task(rq, next);
 }
 
 #ifdef CONFIG_SMP
index c183b79..47a3d2a 100644 (file)
@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
        /* we're never preempted */
 }
 
+static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
+{
+       stop->se.exec_start = rq_clock_task(rq);
+}
+
 static struct task_struct *
 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
                return NULL;
 
        put_prev_task(rq, prev);
-
-       stop->se.exec_start = rq_clock_task(rq);
+       set_next_task_stop(rq, stop);
 
        return stop;
 }
@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
 {
 }
 
-static void set_curr_task_stop(struct rq *rq)
-{
-       struct task_struct *stop = rq->stop;
-
-       stop->se.exec_start = rq_clock_task(rq);
-}
-
 static void switched_to_stop(struct rq *rq, struct task_struct *p)
 {
        BUG(); /* its impossible to change to this class */
@@ -128,13 +125,13 @@ const struct sched_class stop_sched_class = {
 
        .pick_next_task         = pick_next_task_stop,
        .put_prev_task          = put_prev_task_stop,
+       .set_next_task          = set_next_task_stop,
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_stop,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
 
-       .set_curr_task          = set_curr_task_stop,
        .task_tick              = task_tick_stop,
 
        .get_rr_interval        = get_rr_interval_stop,