cpufreq: intel_pstate: Change intel_pstate_get_hwp_max() argument
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 7 Jan 2021 18:43:30 +0000 (19:43 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Mar 2021 10:38:41 +0000 (11:38 +0100)
commit a45ee4d4e13b0e35a8ec7ea0bf9267243d57b302 upstream.

All of the callers of intel_pstate_get_hwp_max() access the struct
cpudata object that corresponds to the given CPU already and the
function itself needs to access that object (in order to update
hwp_cap_cached), so modify the code to pass a struct cpudata pointer
to it instead of the CPU number.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Chen Yu <yu.c.chen@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/cpufreq/intel_pstate.c

index cb95da6..5594790 100644 (file)
@@ -829,13 +829,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
        NULL,
 };
 
-static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
+static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
                                     int *current_max)
 {
        u64 cap;
 
-       rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
-       WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
+       rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+       WRITE_ONCE(cpu->hwp_cap_cached, cap);
        if (global.no_turbo || global.turbo_disabled)
                *current_max = HWP_GUARANTEED_PERF(cap);
        else
@@ -1223,7 +1223,7 @@ static void update_qos_request(enum freq_qos_req_type type)
                        continue;
 
                if (hwp_active)
-                       intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
+                       intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
                else
                        turbo_max = cpu->pstate.turbo_pstate;
 
@@ -1733,7 +1733,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        if (hwp_active && !hwp_mode_bdw) {
                unsigned int phy_max, current_max;
 
-               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               intel_pstate_get_hwp_max(cpu, &phy_max, &current_max);
                cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
                cpu->pstate.turbo_pstate = phy_max;
        } else {
@@ -2217,7 +2217,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
         * rather than pure ratios.
         */
        if (hwp_active) {
-               intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+               intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
        } else {
                max_state = global.no_turbo || global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
@@ -2332,7 +2332,7 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
        if (hwp_active) {
                int max_state, turbo_max;
 
-               intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+               intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
                max_freq = max_state * cpu->pstate.scaling;
        } else {
                max_freq = intel_pstate_get_max_freq(cpu);
@@ -2675,7 +2675,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        if (hwp_active) {
                u64 value;
 
-               intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
+               intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
                policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
                rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
                WRITE_ONCE(cpu->hwp_req_cached, value);