x86/aperfmperf: Restructure arch_scale_freq_tick()
authorThomas Gleixner <tglx@linutronix.de>
Fri, 15 Apr 2022 19:19:57 +0000 (21:19 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 27 Apr 2022 18:22:19 +0000 (20:22 +0200)
Preparation for sharing code with the CPU frequency portion of the
aperf/mperf code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/r/20220415161206.706185092@linutronix.de
arch/x86/kernel/cpu/aperfmperf.c

index 6922c77..6220503 100644 (file)
@@ -477,22 +477,9 @@ static DECLARE_WORK(disable_freq_invariance_work,
 
 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
 
-void arch_scale_freq_tick(void)
+static void scale_freq_tick(u64 acnt, u64 mcnt)
 {
-       struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
-       u64 aperf, mperf, acnt, mcnt, freq_scale;
-
-       if (!arch_scale_freq_invariant())
-               return;
-
-       rdmsrl(MSR_IA32_APERF, aperf);
-       rdmsrl(MSR_IA32_MPERF, mperf);
-
-       acnt = aperf - s->aperf;
-       mcnt = mperf - s->mperf;
-
-       s->aperf = aperf;
-       s->mperf = mperf;
+       u64 freq_scale;
 
        if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
                goto error;
@@ -514,4 +501,23 @@ error:
        pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
        schedule_work(&disable_freq_invariance_work);
 }
+
+void arch_scale_freq_tick(void)
+{
+       struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
+       u64 acnt, mcnt, aperf, mperf;
+
+       if (!arch_scale_freq_invariant())
+               return;
+
+       rdmsrl(MSR_IA32_APERF, aperf);
+       rdmsrl(MSR_IA32_MPERF, mperf);
+       acnt = aperf - s->aperf;
+       mcnt = mperf - s->mperf;
+
+       s->aperf = aperf;
+       s->mperf = mperf;
+
+       scale_freq_tick(acnt, mcnt);
+}
 #endif /* CONFIG_X86_64 && CONFIG_SMP */