* Decide if the tasks on the busy CPUs in the
* littlest domain would benefit from an idle balance
*/
-static int hmp_packing_ilb_needed(int cpu)
+static int hmp_packing_ilb_needed(int cpu, int ilb_needed)
{
struct hmp_domain *hmp;
- /* always allow ilb on non-slowest domain */
+ /* allow previous decision on non-slowest domain */
if (!hmp_cpu_is_slowest(cpu))
- return 1;
+ return ilb_needed;
/* if disabled, use normal ILB behaviour */
if (!hmp_packing_enabled)
- return 1;
+ return ilb_needed;
hmp = hmp_cpu_domain(cpu);
for_each_cpu_and(cpu, &hmp->cpus, nohz.idle_cpus_mask) {
}
#endif
+DEFINE_PER_CPU(cpumask_var_t, ilb_tmpmask);
+
static inline int find_new_ilb(void)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
#ifdef CONFIG_SCHED_HMP
int call_cpu = smp_processor_id();
- int ilb_needed = 1;
+ int ilb_needed = 0;
+ int cpu;
+ struct cpumask* tmp = per_cpu(ilb_tmpmask, smp_processor_id());
/* restrict nohz balancing to occur in the same hmp domain */
ilb = cpumask_first_and(nohz.idle_cpus_mask,
&((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus);
+
+ /* check to see if it's necessary within this domain */
+ cpumask_andnot(tmp,
+ &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus,
+ nohz.idle_cpus_mask);
+ for_each_cpu(cpu, tmp) {
+ if (cpu_rq(cpu)->nr_running > 1) {
+ ilb_needed = 1;
+ break;
+ }
+ }
+
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
if (ilb < nr_cpu_ids)
- ilb_needed = hmp_packing_ilb_needed(ilb);
+ ilb_needed = hmp_packing_ilb_needed(ilb, ilb_needed);
#endif
if (ilb_needed && ilb < nr_cpu_ids && idle_cpu(ilb))