EXPERIMENTAL: sched/fair: find idle cpu in the group
authorLukasz Luba <l.luba@partner.samsung.com>
Tue, 4 Sep 2018 12:29:41 +0000 (14:29 +0200)
committerLukasz Luba <l.luba@partner.samsung.com>
Wed, 26 Sep 2018 10:34:58 +0000 (12:34 +0200)
In find_idlest_group take into account any CPU which is actually idle.

Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
kernel/sched/fair.c

index cdd3fc19b8f8b8a73516a58a2e551b72e7b16313..41a124a7d91c6ad41869bbddf366ea8bd491bbcc 100644 (file)
@@ -5668,6 +5668,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
        struct sched_group *most_spare_sg = NULL;
+       struct sched_group *group_with_idle = NULL;
        unsigned long min_runnable_load = ULONG_MAX;
        unsigned long this_runnable_load = ULONG_MAX;
        unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
@@ -5676,6 +5677,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
        int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
        unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
                                (sd->imbalance_pct-100) / 100;
+       bool found_local_idle = false;
+       int found_idle_cpu = -1;
 
        if (sd_flag & SD_BALANCE_WAKE)
                load_idx = sd->wake_idx;
@@ -5717,6 +5720,16 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                        if (spare_cap > max_spare_cap)
                                max_spare_cap = spare_cap;
+
+                       if (idle_cpu(i)) {
+                               if (found_local_idle)
+                                       continue;
+                               if (local_group)
+                                       found_local_idle = true;
+
+                               found_idle_cpu = i;
+                               group_with_idle = group;
+                       }
                }
 
                /* Adjust by relative CPU capacity of the group */
@@ -5777,6 +5790,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                return most_spare_sg;
 
 skip_spare:
+       if (found_idle_cpu != -1)
+               return group_with_idle;
+
        if (!idlest)
                return NULL;