From: Vincent Guittot Date: Thu, 19 Jul 2018 12:00:06 +0000 (+0200) Subject: sched/fair: Remove #ifdefs from scale_rt_capacity() X-Git-Tag: v4.19~481^2~16 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2e62c4743adc4c7bfcbc1f45118fc7bec58cf30a;p=platform%2Fkernel%2Flinux-rpi.git sched/fair: Remove #ifdefs from scale_rt_capacity() Reuse cpu_util_irq() that has been defined for schedutil and set irq util to 0 when !CONFIG_IRQ_TIME_ACCOUNTING. But the compiler is not able to optimize the sequence (at least with aarch64 GCC 7.2.1): free *= (max - irq); free /= max; when irq is fixed to 0 Add a new inline function scale_irq_capacity() that will scale utilization when irq is accounted. Reuse this funciton in schedutil which applies similar formula. Suggested-by: Ingo Molnar Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Acked-by: Viresh Kumar Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rjw@rjwysocki.net Link: http://lkml.kernel.org/r/1532001606-6689-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c3cf7d9..fc177c06 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -177,7 +177,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) rq->clock_task += delta; -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#ifdef HAVE_SCHED_AVG_IRQ if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) update_irq_load_avg(rq, irq_delta + steal); #endif diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 97dcd44..3fffad3 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -247,8 +247,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) * U' = irq + ------- * U * max */ - util *= (max - irq); - util /= max; + util = scale_irq_capacity(util, irq, max); util += irq; /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d5f7d52..14c3fdd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7551,16 +7551,12 @@ static unsigned long scale_rt_capacity(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long max = arch_scale_cpu_capacity(NULL, cpu); unsigned long used, free; -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) unsigned long irq; -#endif -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - irq = READ_ONCE(rq->avg_irq.util_avg); + irq = cpu_util_irq(rq); if (unlikely(irq >= max)) return 1; -#endif used = READ_ONCE(rq->avg_rt.util_avg); used += READ_ONCE(rq->avg_dl.util_avg); @@ -7569,11 +7565,8 @@ static unsigned long scale_rt_capacity(int cpu) return 1; free = max - used; -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - free *= (max - irq); - free /= max; -#endif - return free; + + return scale_irq_capacity(free, irq, max); } static void update_cpu_capacity(struct sched_domain *sd, int cpu) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ebb4b3c..614170d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -856,6 +856,7 @@ struct rq { struct sched_avg avg_rt; struct sched_avg avg_dl; #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#define HAVE_SCHED_AVG_IRQ struct sched_avg avg_irq; #endif u64 idle_stamp; @@ -2210,17 +2211,32 @@ static inline unsigned long cpu_util_rt(struct rq *rq) { return READ_ONCE(rq->avg_rt.util_avg); } +#endif -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#ifdef HAVE_SCHED_AVG_IRQ static inline unsigned long cpu_util_irq(struct rq *rq) { return rq->avg_irq.util_avg; } + +static inline +unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) +{ + util *= (max - irq); + util /= max; + + return util; + +} #else static inline unsigned long cpu_util_irq(struct rq *rq) { return 0; } -#endif +static inline +unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) +{ + return util; +} #endif