cpufreq: schedutil: Call sugov_update_next_freq() before check to fast_switch_enabled
authorYue Hu <huyue2@yulong.com>
Wed, 24 Feb 2021 06:39:27 +0000 (14:39 +0800)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 18 Mar 2021 18:49:16 +0000 (19:49 +0100)
Note that sugov_update_next_freq() may return false, that means the
caller sugov_fast_switch() will do nothing except fast switch check.

Similarly, sugov_deferred_update() also has unnecessary operations
of raw_spin_{lock,unlock} in sugov_update_single_freq() for that case.

So, let's call sugov_update_next_freq() before the fast switch check
to avoid unnecessary behaviors above. Accordingly, update interface
definition to sugov_deferred_update() and remove sugov_fast_switch()
since we will call cpufreq_driver_fast_switch() directly instead.

Signed-off-by: Yue Hu <huyue2@yulong.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
kernel/sched/cpufreq_schedutil.c

index 50cbad8..6ee9c9b 100644 (file)
@@ -114,19 +114,8 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
        return true;
 }
 
-static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
-                             unsigned int next_freq)
+static void sugov_deferred_update(struct sugov_policy *sg_policy)
 {
-       if (sugov_update_next_freq(sg_policy, time, next_freq))
-               cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
-}
-
-static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
-                                 unsigned int next_freq)
-{
-       if (!sugov_update_next_freq(sg_policy, time, next_freq))
-               return;
-
        if (!sg_policy->work_in_progress) {
                sg_policy->work_in_progress = true;
                irq_work_queue(&sg_policy->irq_work);
@@ -366,16 +355,19 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
                sg_policy->cached_raw_freq = cached_freq;
        }
 
+       if (!sugov_update_next_freq(sg_policy, time, next_f))
+               return;
+
        /*
         * This code runs under rq->lock for the target CPU, so it won't run
         * concurrently on two different CPUs for the same target and it is not
         * necessary to acquire the lock in the fast switch case.
         */
        if (sg_policy->policy->fast_switch_enabled) {
-               sugov_fast_switch(sg_policy, time, next_f);
+               cpufreq_driver_fast_switch(sg_policy->policy, next_f);
        } else {
                raw_spin_lock(&sg_policy->update_lock);
-               sugov_deferred_update(sg_policy, time, next_f);
+               sugov_deferred_update(sg_policy);
                raw_spin_unlock(&sg_policy->update_lock);
        }
 }
@@ -454,12 +446,15 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
        if (sugov_should_update_freq(sg_policy, time)) {
                next_f = sugov_next_freq_shared(sg_cpu, time);
 
+               if (!sugov_update_next_freq(sg_policy, time, next_f))
+                       goto unlock;
+
                if (sg_policy->policy->fast_switch_enabled)
-                       sugov_fast_switch(sg_policy, time, next_f);
+                       cpufreq_driver_fast_switch(sg_policy->policy, next_f);
                else
-                       sugov_deferred_update(sg_policy, time, next_f);
+                       sugov_deferred_update(sg_policy);
        }
-
+unlock:
        raw_spin_unlock(&sg_policy->update_lock);
 }