Merge branches 'pm-cpufreq', 'pm-cpuidle' and 'pm-devfreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 14 Jun 2017 23:51:33 +0000 (01:51 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 14 Jun 2017 23:51:33 +0000 (01:51 +0200)
* pm-cpufreq:
  cpufreq: conservative: Allow down_threshold to take values from 1 to 10
  Revert "cpufreq: schedutil: Reduce frequencies slower"

* pm-cpuidle:
  cpuidle: dt: Add missing 'of_node_put()'

* pm-devfreq:
  PM / devfreq: exynos-ppmu: Staticize event list
  PM / devfreq: exynos-ppmu: Handle return value of clk_prepare_enable
  PM / devfreq: exynos-nocp: Handle return value of clk_prepare_enable

1  2  3 
kernel/sched/cpufreq_schedutil.c

@@@@ -61,11 -61,6 -61,11 +61,11 @@@@ struct sugov_cpu 
        unsigned long util;
        unsigned long max;
        unsigned int flags;
 + 
 +      /* The field below is for single-CPU policies only. */
 + #ifdef CONFIG_NO_HZ_COMMON
 +      unsigned long saved_idle_calls;
 + #endif
   };
   
   static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@@@ -98,20 -93,22 -98,23 +98,20 @@@@ static void sugov_update_commit(struct 
   {
        struct cpufreq_policy *policy = sg_policy->policy;
   
  -     if (sg_policy->next_freq > next_freq)
  -             next_freq = (sg_policy->next_freq + next_freq) >> 1;
  -
 +      if (sg_policy->next_freq == next_freq)
 +              return;
 + 
 +      sg_policy->next_freq = next_freq;
        sg_policy->last_freq_update_time = time;
   
        if (policy->fast_switch_enabled) {
 -              if (sg_policy->next_freq == next_freq) {
 -                      trace_cpu_frequency(policy->cur, smp_processor_id());
 -                      return;
 -              }
 -              sg_policy->next_freq = next_freq;
                next_freq = cpufreq_driver_fast_switch(policy, next_freq);
                if (next_freq == CPUFREQ_ENTRY_INVALID)
                        return;
   
                policy->cur = next_freq;
                trace_cpu_frequency(next_freq, smp_processor_id());
 -      } else if (sg_policy->next_freq != next_freq) {
 -              sg_policy->next_freq = next_freq;
 +      } else {
                sg_policy->work_in_progress = true;
                irq_work_queue(&sg_policy->irq_work);
        }
@@@@ -195,19 -192,6 -198,19 +195,19 @@@@ static void sugov_iowait_boost(struct s
        sg_cpu->iowait_boost >>= 1;
   }
   
 + #ifdef CONFIG_NO_HZ_COMMON
 + static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
 + {
 +      unsigned long idle_calls = tick_nohz_get_idle_calls();
 +      bool ret = idle_calls == sg_cpu->saved_idle_calls;
 + 
 +      sg_cpu->saved_idle_calls = idle_calls;
 +      return ret;
 + }
 + #else
 + static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
 + #endif /* CONFIG_NO_HZ_COMMON */
 + 
   static void sugov_update_single(struct update_util_data *hook, u64 time,
                                unsigned int flags)
   {
        struct cpufreq_policy *policy = sg_policy->policy;
        unsigned long util, max;
        unsigned int next_f;
 +      bool busy;
   
        sugov_set_iowait_boost(sg_cpu, time, flags);
        sg_cpu->last_update = time;
        if (!sugov_should_update_freq(sg_policy, time))
                return;
   
 +      busy = sugov_cpu_is_busy(sg_cpu);
 + 
        if (flags & SCHED_CPUFREQ_RT_DL) {
                next_f = policy->cpuinfo.max_freq;
        } else {
                sugov_get_util(&util, &max);
                sugov_iowait_boost(sg_cpu, &util, &max);
                next_f = get_next_freq(sg_policy, util, max);
 +              /*
 +               * Do not reduce the frequency if the CPU has not been idle
 +               * recently, as the reduction is likely to be premature then.
 +               */
 +              if (busy && next_f < sg_policy->next_freq)
 +                      next_f = sg_policy->next_freq;
        }
        sugov_update_commit(sg_policy, time, next_f);
   }
   
-  static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
 - static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
 -                                         unsigned long util, unsigned long max,
 -                                         unsigned int flags)
++ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
   {
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        struct cpufreq_policy *policy = sg_policy->policy;
 -      unsigned int max_f = policy->cpuinfo.max_freq;
--      u64 last_freq_update_time = sg_policy->last_freq_update_time;
 +      unsigned long util = 0, max = 1;
        unsigned int j;
   
 -      if (flags & SCHED_CPUFREQ_RT_DL)
 -              return max_f;
 - 
 -      sugov_iowait_boost(sg_cpu, &util, &max);
 - 
        for_each_cpu(j, policy->cpus) {
 -              struct sugov_cpu *j_sg_cpu;
 +              struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
                unsigned long j_util, j_max;
                s64 delta_ns;
   
 -              if (j == smp_processor_id())
 -                      continue;
 - 
 -              j_sg_cpu = &per_cpu(sugov_cpu, j);
                /*
                 * If the CPU utilization was last updated before the previous
                 * frequency update and the time elapsed between the last update
                 * enough, don't take the CPU into account as it probably is
                 * idle now (and clear iowait_boost for it).
                 */
--              delta_ns = last_freq_update_time - j_sg_cpu->last_update;
++              delta_ns = time - j_sg_cpu->last_update;
                if (delta_ns > TICK_NSEC) {
                        j_sg_cpu->iowait_boost = 0;
                        continue;
                }
                if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
 -                      return max_f;
 +                      return policy->cpuinfo.max_freq;
   
                j_util = j_sg_cpu->util;
                j_max = j_sg_cpu->max;
@@@@ -303,11 -289,7 -305,11 +302,11 @@@@ static void sugov_update_shared(struct 
        sg_cpu->last_update = time;
   
        if (sugov_should_update_freq(sg_policy, time)) {
 -              next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
 +              if (flags & SCHED_CPUFREQ_RT_DL)
 +                      next_f = sg_policy->policy->cpuinfo.max_freq;
 +              else
-                       next_f = sugov_next_freq_shared(sg_cpu);
++                      next_f = sugov_next_freq_shared(sg_cpu, time);
 + 
                sugov_update_commit(sg_policy, time, next_f);
        }
   
@@@@ -491,6 -473,7 -493,6 +490,6 @@@@ static int sugov_init(struct cpufreq_po
   {
        struct sugov_policy *sg_policy;
        struct sugov_tunables *tunables;
 -      unsigned int lat;
        int ret = 0;
   
        /* State should be equivalent to EXIT */
                goto stop_kthread;
        }
   
 -      tunables->rate_limit_us = LATENCY_MULTIPLIER;
 -      lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 -      if (lat)
 -              tunables->rate_limit_us *= lat;
 +      if (policy->transition_delay_us) {
 +              tunables->rate_limit_us = policy->transition_delay_us;
 +      } else {
 +              unsigned int lat;
 + 
 +              tunables->rate_limit_us = LATENCY_MULTIPLIER;
 +              lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 +              if (lat)
 +                      tunables->rate_limit_us *= lat;
 +      }
   
        policy->governor_data = sg_policy;
        sg_policy->tunables = tunables;