cpufreq/amd-pstate: Fix scaling_min_freq and scaling_max_freq update
authorWyes Karny <wyes.karny@amd.com>
Fri, 17 Nov 2023 06:38:39 +0000 (06:38 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Dec 2023 07:52:24 +0000 (08:52 +0100)
[ Upstream commit febab20caebac959fdc3d7520bc52de8b1184455 ]

When amd_pstate is running, writing to scaling_min_freq and
scaling_max_freq has no effect. These values are only passed to the
policy level, but not to the platform level. This means that the
platform does not know about the frequency limits set by the user.

To fix this, update the min_perf and max_perf values at the platform
level whenever the user changes the scaling_min_freq and scaling_max_freq
values.

Fixes: ffa5096a7c33 ("cpufreq: amd-pstate: implement Pstate EPP support for the AMD processors")
Acked-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Wyes Karny <wyes.karny@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/cpufreq/amd-pstate.c
include/linux/amd-pstate.h

index 300f81d..3313d1d 100644 (file)
@@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
                highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
 
        WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+       WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
        WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
        WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
        WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
-
+       WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
        return 0;
 }
 
@@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
                highest_perf = cppc_perf.highest_perf;
 
        WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+       WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
        WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
        WRITE_ONCE(cpudata->lowest_nonlinear_perf,
                   cppc_perf.lowest_nonlinear_perf);
        WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
 
        if (cppc_state == AMD_PSTATE_ACTIVE)
                return 0;
@@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
        u64 prev = READ_ONCE(cpudata->cppc_req_cached);
        u64 value = prev;
 
+       min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
+       max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
        des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
 
        if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
@@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
        return 0;
 }
 
+static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+{
+       u32 max_limit_perf, min_limit_perf;
+       struct amd_cpudata *cpudata = policy->driver_data;
+
+       max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+       min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+       WRITE_ONCE(cpudata->max_limit_freq, policy->max);
+       WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+
+       return 0;
+}
+
 static int amd_pstate_update_freq(struct cpufreq_policy *policy,
                                  unsigned int target_freq, bool fast_switch)
 {
@@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
        if (!cpudata->max_freq)
                return -ENODEV;
 
+       if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+               amd_pstate_update_min_max_limit(policy);
+
        cap_perf = READ_ONCE(cpudata->highest_perf);
        min_perf = READ_ONCE(cpudata->lowest_perf);
        max_perf = cap_perf;
@@ -534,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
        struct amd_cpudata *cpudata = policy->driver_data;
        unsigned int target_freq;
 
+       if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+               amd_pstate_update_min_max_limit(policy);
+
+
        cap_perf = READ_ONCE(cpudata->highest_perf);
        lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
        max_freq = READ_ONCE(cpudata->max_freq);
@@ -747,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
        /* Initial processor data capability frequencies */
        cpudata->max_freq = max_freq;
        cpudata->min_freq = min_freq;
+       cpudata->max_limit_freq = max_freq;
+       cpudata->min_limit_freq = min_freq;
        cpudata->nominal_freq = nominal_freq;
        cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
 
@@ -1185,16 +1215,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static void amd_pstate_epp_init(unsigned int cpu)
+static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 {
-       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
        struct amd_cpudata *cpudata = policy->driver_data;
-       u32 max_perf, min_perf;
+       u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
        u64 value;
        s16 epp;
 
        max_perf = READ_ONCE(cpudata->highest_perf);
        min_perf = READ_ONCE(cpudata->lowest_perf);
+       max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+       min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+       max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
+       min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
+
+       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
 
        value = READ_ONCE(cpudata->cppc_req_cached);
 
@@ -1212,9 +1251,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
        value &= ~AMD_CPPC_DES_PERF(~0L);
        value |= AMD_CPPC_DES_PERF(0);
 
-       if (cpudata->epp_policy == cpudata->policy)
-               goto skip_epp;
-
        cpudata->epp_policy = cpudata->policy;
 
        /* Get BIOS pre-defined epp value */
@@ -1224,7 +1260,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
                 * This return value can only be negative for shared_memory
                 * systems where EPP register read/write not supported.
                 */
-               goto skip_epp;
+               return;
        }
 
        if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1238,8 +1274,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
 
        WRITE_ONCE(cpudata->cppc_req_cached, value);
        amd_pstate_set_epp(cpudata, epp);
-skip_epp:
-       cpufreq_cpu_put(policy);
 }
 
 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1254,7 +1288,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
 
        cpudata->policy = policy->policy;
 
-       amd_pstate_epp_init(policy->cpu);
+       amd_pstate_epp_update_limit(policy);
 
        return 0;
 }
index 446394f..6ad02ad 100644 (file)
@@ -70,6 +70,10 @@ struct amd_cpudata {
        u32     nominal_perf;
        u32     lowest_nonlinear_perf;
        u32     lowest_perf;
+       u32     min_limit_perf;
+       u32     max_limit_perf;
+       u32     min_limit_freq;
+       u32     max_limit_freq;
 
        u32     max_freq;
        u32     min_freq;