Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / sched / fair.c
index 5eea870..7a33e59 100644 (file)
@@ -1680,9 +1680,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        }
 
        /* ensure we never gain time by being placed backwards. */
-       vruntime = max_vruntime(se->vruntime, vruntime);
-
-       se->vruntime = vruntime;
+       se->vruntime = max_vruntime(se->vruntime, vruntime);
 }
 
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -2663,7 +2661,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
-static void unthrottle_offline_cfs_rqs(struct rq *rq)
+static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
 
@@ -3254,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  */
 static int select_idle_sibling(struct task_struct *p, int target)
 {
-       int cpu = smp_processor_id();
-       int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
        struct sched_group *sg;
-       int i;
+       int i = task_cpu(p);
 
-       /*
-        * If the task is going to be woken-up on this cpu and if it is
-        * already idle, then it is the right target.
-        */
-       if (target == cpu && idle_cpu(cpu))
-               return cpu;
+       if (idle_cpu(target))
+               return target;
 
        /*
-        * If the task is going to be woken-up on the cpu where it previously
-        * ran and if it is currently idle, then it the right target.
+        * If the prevous cpu is cache affine and idle, don't be stupid.
         */
-       if (target == prev_cpu && idle_cpu(prev_cpu))
-               return prev_cpu;
+       if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+               return i;
 
        /*
         * Otherwise, iterate the domains and find an elegible idle cpu.
@@ -3286,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
                                goto next;
 
                        for_each_cpu(i, sched_group_cpus(sg)) {
-                               if (!idle_cpu(i))
+                               if (i == target || !idle_cpu(i))
                                        goto next;
                        }
 
@@ -6101,7 +6092,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
         * idle runqueue:
         */
        if (rq->cfs.load.weight)
-               rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+               rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
 
        return rr_interval;
 }