Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-exynos.git] / kernel / sched / core.c
index ea8f49a..d8465ee 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/init_task.h>
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
+#include <linux/frame.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -2689,7 +2690,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
 /*
  * context_switch - switch to the new MM and the new thread's register state.
  */
-static inline struct rq *
+static __always_inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
 {
@@ -3174,7 +3175,7 @@ static void __sched notrace __schedule(bool preempt)
                        if (prev->flags & PF_WQ_WORKER) {
                                struct task_struct *to_wakeup;
 
-                               to_wakeup = wq_worker_sleeping(prev, cpu);
+                               to_wakeup = wq_worker_sleeping(prev);
                                if (to_wakeup)
                                        try_to_wake_up_local(to_wakeup);
                        }
@@ -3204,6 +3205,7 @@ static void __sched notrace __schedule(bool preempt)
 
        balance_callback(rq);
 }
+STACK_FRAME_NON_STANDARD(__schedule); /* switch_to() */
 
 static inline void sched_submit_work(struct task_struct *tsk)
 {
@@ -5369,6 +5371,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
        case CPU_UP_PREPARE:
                rq->calc_load_update = calc_load_update;
+               account_reset_rq(rq);
                break;
 
        case CPU_ONLINE:
@@ -7535,7 +7538,7 @@ void set_curr_task(int cpu, struct task_struct *p)
 /* task_group_lock serializes the addition/removal of task groups */
 static DEFINE_SPINLOCK(task_group_lock);
 
-static void free_sched_group(struct task_group *tg)
+static void sched_free_group(struct task_group *tg)
 {
        free_fair_sched_group(tg);
        free_rt_sched_group(tg);
@@ -7561,7 +7564,7 @@ struct task_group *sched_create_group(struct task_group *parent)
        return tg;
 
 err:
-       free_sched_group(tg);
+       sched_free_group(tg);
        return ERR_PTR(-ENOMEM);
 }
 
@@ -7581,17 +7584,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
 }
 
 /* rcu callback to free various structures associated with a task group */
-static void free_sched_group_rcu(struct rcu_head *rhp)
+static void sched_free_group_rcu(struct rcu_head *rhp)
 {
        /* now it should be safe to free those cfs_rqs */
-       free_sched_group(container_of(rhp, struct task_group, rcu));
+       sched_free_group(container_of(rhp, struct task_group, rcu));
 }
 
-/* Destroy runqueue etc associated with a task group */
 void sched_destroy_group(struct task_group *tg)
 {
        /* wait for possible concurrent references to cfs_rqs complete */
-       call_rcu(&tg->rcu, free_sched_group_rcu);
+       call_rcu(&tg->rcu, sched_free_group_rcu);
 }
 
 void sched_offline_group(struct task_group *tg)
@@ -8050,31 +8052,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        if (IS_ERR(tg))
                return ERR_PTR(-ENOMEM);
 
+       sched_online_group(tg, parent);
+
        return &tg->css;
 }
 
-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
 {
        struct task_group *tg = css_tg(css);
-       struct task_group *parent = css_tg(css->parent);
 
-       if (parent)
-               sched_online_group(tg, parent);
-       return 0;
+       sched_offline_group(tg);
 }
 
 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
 {
        struct task_group *tg = css_tg(css);
 
-       sched_destroy_group(tg);
-}
-
-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
-{
-       struct task_group *tg = css_tg(css);
-
-       sched_offline_group(tg);
+       /*
+        * Relies on the RCU grace period between css_released() and this.
+        */
+       sched_free_group(tg);
 }
 
 static void cpu_cgroup_fork(struct task_struct *task)
@@ -8434,14 +8431,13 @@ static struct cftype cpu_files[] = {
 
 struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc      = cpu_cgroup_css_alloc,
+       .css_released   = cpu_cgroup_css_released,
        .css_free       = cpu_cgroup_css_free,
-       .css_online     = cpu_cgroup_css_online,
-       .css_offline    = cpu_cgroup_css_offline,
        .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .legacy_cftypes = cpu_files,
-       .early_init     = 1,
+       .early_init     = true,
 };
 
 #endif /* CONFIG_CGROUP_SCHED */