Merge branch 'sched/warnings' into sched/core, to pick up WARN_ON_ONCE() conversion...
[platform/kernel/linux-rpi.git] / kernel / sched / core.c
index 813687a..7d289d8 100644 (file)
@@ -481,8 +481,7 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
  *                             p->se.load, p->rt_priority,
  *                             p->dl.dl_{runtime, deadline, period, flags, bw, density}
  *  - sched_setnuma():         p->numa_preferred_nid
- *  - sched_move_task()/
- *    cpu_cgroup_fork():       p->sched_task_group
+ *  - sched_move_task():       p->sched_task_group
  *  - uclamp_update_active()   p->uclamp*
  *
  * p->state <- TASK_*:
@@ -8861,7 +8860,7 @@ void sched_show_task(struct task_struct *p)
        if (pid_alive(p))
                ppid = task_pid_nr(rcu_dereference(p->real_parent));
        rcu_read_unlock();
-       pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
+       pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
                free, task_pid_nr(p), ppid,
                read_task_thread_flags(p));
 
@@ -9601,9 +9600,6 @@ LIST_HEAD(task_groups);
 static struct kmem_cache *task_group_cache __read_mostly;
 #endif
 
-DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
-DECLARE_PER_CPU(cpumask_var_t, select_rq_mask);
-
 void __init sched_init(void)
 {
        unsigned long ptr = 0;
@@ -9647,14 +9643,6 @@ void __init sched_init(void)
 
 #endif /* CONFIG_RT_GROUP_SCHED */
        }
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       for_each_possible_cpu(i) {
-               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
-                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
-               per_cpu(select_rq_mask, i) = (cpumask_var_t)kzalloc_node(
-                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
-       }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
 
        init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
 
@@ -10163,7 +10151,7 @@ void sched_release_group(struct task_group *tg)
        spin_unlock_irqrestore(&task_group_lock, flags);
 }
 
-static void sched_change_group(struct task_struct *tsk, int type)
+static void sched_change_group(struct task_struct *tsk)
 {
        struct task_group *tg;
 
@@ -10179,7 +10167,7 @@ static void sched_change_group(struct task_struct *tsk, int type)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_change_group)
-               tsk->sched_class->task_change_group(tsk, type);
+               tsk->sched_class->task_change_group(tsk);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
@@ -10210,7 +10198,7 @@ void sched_move_task(struct task_struct *tsk)
        if (running)
                put_prev_task(rq, tsk);
 
-       sched_change_group(tsk, TASK_MOVE_GROUP);
+       sched_change_group(tsk);
 
        if (queued)
                enqueue_task(rq, tsk, queue_flags);
@@ -10288,53 +10276,19 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
        sched_unregister_group(tg);
 }
 
-/*
- * This is called before wake_up_new_task(), therefore we really only
- * have to set its group bits, all the other stuff does not apply.
- */
-static void cpu_cgroup_fork(struct task_struct *task)
-{
-       struct rq_flags rf;
-       struct rq *rq;
-
-       rq = task_rq_lock(task, &rf);
-
-       update_rq_clock(rq);
-       sched_change_group(task, TASK_SET_GROUP);
-
-       task_rq_unlock(rq, task, &rf);
-}
-
+#ifdef CONFIG_RT_GROUP_SCHED
 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *task;
        struct cgroup_subsys_state *css;
-       int ret = 0;
 
        cgroup_taskset_for_each(task, css, tset) {
-#ifdef CONFIG_RT_GROUP_SCHED
                if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
-#endif
-               /*
-                * Serialize against wake_up_new_task() such that if it's
-                * running, we're sure to observe its full state.
-                */
-               raw_spin_lock_irq(&task->pi_lock);
-               /*
-                * Avoid calling sched_move_task() before wake_up_new_task()
-                * has happened. This would lead to problems with PELT, due to
-                * move wanting to detach+attach while we're not attached yet.
-                */
-               if (READ_ONCE(task->__state) == TASK_NEW)
-                       ret = -EINVAL;
-               raw_spin_unlock_irq(&task->pi_lock);
-
-               if (ret)
-                       break;
        }
-       return ret;
+       return 0;
 }
+#endif
 
 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 {
@@ -11170,8 +11124,9 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .css_released   = cpu_cgroup_css_released,
        .css_free       = cpu_cgroup_css_free,
        .css_extra_stat_show = cpu_extra_stat_show,
-       .fork           = cpu_cgroup_fork,
+#ifdef CONFIG_RT_GROUP_SCHED
        .can_attach     = cpu_cgroup_can_attach,
+#endif
        .attach         = cpu_cgroup_attach,
        .legacy_cftypes = cpu_legacy_files,
        .dfl_cftypes    = cpu_files,