arm64: topology: Reorder init_amu_fie() a bit
authorViresh Kumar <viresh.kumar@linaro.org>
Fri, 8 Jan 2021 11:16:52 +0000 (16:46 +0530)
committerWill Deacon <will@kernel.org>
Wed, 20 Jan 2021 12:49:40 +0000 (12:49 +0000)
This patch does a couple of optimizations in init_amu_fie(), like early
exits from paths where we don't need to continue any further, avoid the
enable/disable dance, moving the calls to
topology_scale_freq_invariant() just when we need them, instead of at
the top of the routine, and avoiding calling it for the third time.

Reviewed-by: Ionela Voinescu <ionela.voinescu@arm.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Tested-by: Ionela Voinescu <ionela.voinescu@arm.com>
Link: https://lore.kernel.org/r/a732e71ab9ec28c354eb28dd898c9b47d490863f.1610104461.git.viresh.kumar@linaro.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/topology.c

index ebadc73..57267d6 100644 (file)
@@ -221,8 +221,8 @@ static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
 
 static int __init init_amu_fie(void)
 {
-       bool invariance_status = topology_scale_freq_invariant();
        cpumask_var_t valid_cpus;
+       bool invariant;
        int ret = 0;
        int cpu;
 
@@ -249,18 +249,19 @@ static int __init init_amu_fie(void)
        if (cpumask_equal(valid_cpus, cpu_present_mask))
                cpumask_copy(amu_fie_cpus, cpu_present_mask);
 
-       if (!cpumask_empty(amu_fie_cpus)) {
-               pr_info("CPUs[%*pbl]: counters will be used for FIE.",
-                       cpumask_pr_args(amu_fie_cpus));
-               static_branch_enable(&amu_fie_key);
-       }
+       if (cpumask_empty(amu_fie_cpus))
+               goto free_valid_mask;
 
-       /*
-        * If the system is not fully invariant after AMU init, disable
-        * partial use of counters for frequency invariance.
-        */
-       if (!topology_scale_freq_invariant())
-               static_branch_disable(&amu_fie_key);
+       invariant = topology_scale_freq_invariant();
+
+       /* We aren't fully invariant yet */
+       if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask))
+               goto free_valid_mask;
+
+       static_branch_enable(&amu_fie_key);
+
+       pr_info("CPUs[%*pbl]: counters will be used for FIE.",
+               cpumask_pr_args(amu_fie_cpus));
 
        /*
         * Task scheduler behavior depends on frequency invariance support,
@@ -268,7 +269,7 @@ static int __init init_amu_fie(void)
         * a result of counter initialisation and use, retrigger the build of
         * scheduling domains to ensure the information is propagated properly.
         */
-       if (invariance_status != topology_scale_freq_invariant())
+       if (!invariant)
                rebuild_sched_domains_energy();
 
 free_valid_mask: