2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cpufreq.h>
15 #include <linux/kthread.h>
16 #include <uapi/linux/sched/types.h>
17 #include <linux/slab.h>
18 #include <trace/events/power.h>
22 #define SUGOV_KTHREAD_PRIORITY 50
24 struct sugov_tunables {
25 struct gov_attr_set attr_set;
26 unsigned int rate_limit_us;
30 struct cpufreq_policy *policy;
32 struct sugov_tunables *tunables;
33 struct list_head tunables_hook;
35 raw_spinlock_t update_lock; /* For shared policies */
36 u64 last_freq_update_time;
37 s64 freq_update_delay_ns;
38 unsigned int next_freq;
39 unsigned int cached_raw_freq;
41 /* The next fields are only needed if fast switch cannot be used. */
42 struct irq_work irq_work;
43 struct kthread_work work;
44 struct mutex work_lock;
45 struct kthread_worker worker;
46 struct task_struct *thread;
47 bool work_in_progress;
49 bool need_freq_update;
53 struct update_util_data update_util;
54 struct sugov_policy *sg_policy;
56 unsigned long iowait_boost;
57 unsigned long iowait_boost_max;
60 /* The fields below are only needed when sharing a policy. */
65 /* The field below is for single-CPU policies only. */
66 #ifdef CONFIG_NO_HZ_COMMON
67 unsigned long saved_idle_calls;
71 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
73 /************************ Governor internals ***********************/
75 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
79 if (sg_policy->work_in_progress)
82 if (unlikely(sg_policy->need_freq_update)) {
83 sg_policy->need_freq_update = false;
85 * This happens when limits change, so forget the previous
86 * next_freq value and force an update.
88 sg_policy->next_freq = UINT_MAX;
92 delta_ns = time - sg_policy->last_freq_update_time;
93 return delta_ns >= sg_policy->freq_update_delay_ns;
96 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
97 unsigned int next_freq)
99 struct cpufreq_policy *policy = sg_policy->policy;
101 if (sg_policy->next_freq == next_freq)
104 if (sg_policy->next_freq > next_freq)
105 next_freq = (sg_policy->next_freq + next_freq) >> 1;
107 sg_policy->next_freq = next_freq;
108 sg_policy->last_freq_update_time = time;
110 if (policy->fast_switch_enabled) {
111 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
112 if (next_freq == CPUFREQ_ENTRY_INVALID)
115 policy->cur = next_freq;
116 trace_cpu_frequency(next_freq, smp_processor_id());
118 sg_policy->work_in_progress = true;
119 irq_work_queue(&sg_policy->irq_work);
124 * get_next_freq - Compute a new frequency for a given cpufreq policy.
125 * @sg_policy: schedutil policy object to compute the new frequency for.
126 * @util: Current CPU utilization.
127 * @max: CPU capacity.
129 * If the utilization is frequency-invariant, choose the new frequency to be
130 * proportional to it, that is
132 * next_freq = C * max_freq * util / max
134 * Otherwise, approximate the would-be frequency-invariant utilization by
135 * util_raw * (curr_freq / max_freq) which leads to
137 * next_freq = C * curr_freq * util_raw / max
139 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
141 * The lowest driver-supported frequency which is equal or greater than the raw
142 * next_freq (as calculated above) is returned, subject to policy min/max and
143 * cpufreq driver limitations.
145 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
146 unsigned long util, unsigned long max)
148 struct cpufreq_policy *policy = sg_policy->policy;
149 unsigned int freq = arch_scale_freq_invariant() ?
150 policy->cpuinfo.max_freq : policy->cur;
152 freq = (freq + (freq >> 2)) * util / max;
154 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
155 return sg_policy->next_freq;
156 sg_policy->cached_raw_freq = freq;
157 return cpufreq_driver_resolve_freq(policy, freq);
160 static void sugov_get_util(unsigned long *util, unsigned long *max)
162 struct rq *rq = this_rq();
163 unsigned long cfs_max;
165 cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
167 *util = min(rq->cfs.avg.util_avg, cfs_max);
171 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
174 if (flags & SCHED_CPUFREQ_IOWAIT) {
175 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
176 } else if (sg_cpu->iowait_boost) {
177 s64 delta_ns = time - sg_cpu->last_update;
179 /* Clear iowait_boost if the CPU apprears to have been idle. */
180 if (delta_ns > TICK_NSEC)
181 sg_cpu->iowait_boost = 0;
185 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
188 unsigned long boost_util = sg_cpu->iowait_boost;
189 unsigned long boost_max = sg_cpu->iowait_boost_max;
194 if (*util * boost_max < *max * boost_util) {
198 sg_cpu->iowait_boost >>= 1;
201 #ifdef CONFIG_NO_HZ_COMMON
202 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
204 unsigned long idle_calls = tick_nohz_get_idle_calls();
205 bool ret = idle_calls == sg_cpu->saved_idle_calls;
207 sg_cpu->saved_idle_calls = idle_calls;
211 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
212 #endif /* CONFIG_NO_HZ_COMMON */
214 static void sugov_update_single(struct update_util_data *hook, u64 time,
217 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
218 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
219 struct cpufreq_policy *policy = sg_policy->policy;
220 unsigned long util, max;
224 sugov_set_iowait_boost(sg_cpu, time, flags);
225 sg_cpu->last_update = time;
227 if (!sugov_should_update_freq(sg_policy, time))
230 busy = sugov_cpu_is_busy(sg_cpu);
232 if (flags & SCHED_CPUFREQ_RT_DL) {
233 next_f = policy->cpuinfo.max_freq;
235 sugov_get_util(&util, &max);
236 sugov_iowait_boost(sg_cpu, &util, &max);
237 next_f = get_next_freq(sg_policy, util, max);
239 * Do not reduce the frequency if the CPU has not been idle
240 * recently, as the reduction is likely to be premature then.
242 if (busy && next_f < sg_policy->next_freq)
243 next_f = sg_policy->next_freq;
245 sugov_update_commit(sg_policy, time, next_f);
248 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
250 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
251 struct cpufreq_policy *policy = sg_policy->policy;
252 unsigned long util = 0, max = 1;
255 for_each_cpu(j, policy->cpus) {
256 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
257 unsigned long j_util, j_max;
261 * If the CPU utilization was last updated before the previous
262 * frequency update and the time elapsed between the last update
263 * of the CPU utilization and the last frequency update is long
264 * enough, don't take the CPU into account as it probably is
265 * idle now (and clear iowait_boost for it).
267 delta_ns = time - j_sg_cpu->last_update;
268 if (delta_ns > TICK_NSEC) {
269 j_sg_cpu->iowait_boost = 0;
272 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
273 return policy->cpuinfo.max_freq;
275 j_util = j_sg_cpu->util;
276 j_max = j_sg_cpu->max;
277 if (j_util * max > j_max * util) {
282 sugov_iowait_boost(j_sg_cpu, &util, &max);
285 return get_next_freq(sg_policy, util, max);
288 static void sugov_update_shared(struct update_util_data *hook, u64 time,
291 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
292 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
293 unsigned long util, max;
296 sugov_get_util(&util, &max);
298 raw_spin_lock(&sg_policy->update_lock);
302 sg_cpu->flags = flags;
304 sugov_set_iowait_boost(sg_cpu, time, flags);
305 sg_cpu->last_update = time;
307 if (sugov_should_update_freq(sg_policy, time)) {
308 if (flags & SCHED_CPUFREQ_RT_DL)
309 next_f = sg_policy->policy->cpuinfo.max_freq;
311 next_f = sugov_next_freq_shared(sg_cpu, time);
313 sugov_update_commit(sg_policy, time, next_f);
316 raw_spin_unlock(&sg_policy->update_lock);
319 static void sugov_work(struct kthread_work *work)
321 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
323 mutex_lock(&sg_policy->work_lock);
324 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
326 mutex_unlock(&sg_policy->work_lock);
328 sg_policy->work_in_progress = false;
331 static void sugov_irq_work(struct irq_work *irq_work)
333 struct sugov_policy *sg_policy;
335 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
338 * For RT and deadline tasks, the schedutil governor shoots the
339 * frequency to maximum. Special care must be taken to ensure that this
340 * kthread doesn't result in the same behavior.
342 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
343 * updated only at the end of the sugov_work() function and before that
344 * the schedutil governor rejects all other frequency scaling requests.
346 * There is a very rare case though, where the RT thread yields right
347 * after the work_in_progress flag is cleared. The effects of that are
350 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
353 /************************** sysfs interface ************************/
355 static struct sugov_tunables *global_tunables;
356 static DEFINE_MUTEX(global_tunables_lock);
358 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
360 return container_of(attr_set, struct sugov_tunables, attr_set);
363 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
365 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
367 return sprintf(buf, "%u\n", tunables->rate_limit_us);
370 static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
373 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
374 struct sugov_policy *sg_policy;
375 unsigned int rate_limit_us;
377 if (kstrtouint(buf, 10, &rate_limit_us))
380 tunables->rate_limit_us = rate_limit_us;
382 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
383 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
388 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
390 static struct attribute *sugov_attributes[] = {
395 static struct kobj_type sugov_tunables_ktype = {
396 .default_attrs = sugov_attributes,
397 .sysfs_ops = &governor_sysfs_ops,
400 /********************** cpufreq governor interface *********************/
402 static struct cpufreq_governor schedutil_gov;
404 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
406 struct sugov_policy *sg_policy;
408 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
412 sg_policy->policy = policy;
413 raw_spin_lock_init(&sg_policy->update_lock);
417 static void sugov_policy_free(struct sugov_policy *sg_policy)
422 static int sugov_kthread_create(struct sugov_policy *sg_policy)
424 struct task_struct *thread;
425 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
426 struct cpufreq_policy *policy = sg_policy->policy;
429 /* kthread only required for slow path */
430 if (policy->fast_switch_enabled)
433 kthread_init_work(&sg_policy->work, sugov_work);
434 kthread_init_worker(&sg_policy->worker);
435 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
437 cpumask_first(policy->related_cpus));
438 if (IS_ERR(thread)) {
439 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
440 return PTR_ERR(thread);
443 ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
445 kthread_stop(thread);
446 pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
450 sg_policy->thread = thread;
451 kthread_bind_mask(thread, policy->related_cpus);
452 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
453 mutex_init(&sg_policy->work_lock);
455 wake_up_process(thread);
460 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
462 /* kthread only required for slow path */
463 if (sg_policy->policy->fast_switch_enabled)
466 kthread_flush_worker(&sg_policy->worker);
467 kthread_stop(sg_policy->thread);
468 mutex_destroy(&sg_policy->work_lock);
471 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
473 struct sugov_tunables *tunables;
475 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
477 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
478 if (!have_governor_per_policy())
479 global_tunables = tunables;
484 static void sugov_tunables_free(struct sugov_tunables *tunables)
486 if (!have_governor_per_policy())
487 global_tunables = NULL;
492 static int sugov_init(struct cpufreq_policy *policy)
494 struct sugov_policy *sg_policy;
495 struct sugov_tunables *tunables;
498 /* State should be equivalent to EXIT */
499 if (policy->governor_data)
502 cpufreq_enable_fast_switch(policy);
504 sg_policy = sugov_policy_alloc(policy);
507 goto disable_fast_switch;
510 ret = sugov_kthread_create(sg_policy);
514 mutex_lock(&global_tunables_lock);
516 if (global_tunables) {
517 if (WARN_ON(have_governor_per_policy())) {
521 policy->governor_data = sg_policy;
522 sg_policy->tunables = global_tunables;
524 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
528 tunables = sugov_tunables_alloc(sg_policy);
534 if (policy->transition_delay_us) {
535 tunables->rate_limit_us = policy->transition_delay_us;
539 tunables->rate_limit_us = LATENCY_MULTIPLIER;
540 lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
542 tunables->rate_limit_us *= lat;
545 policy->governor_data = sg_policy;
546 sg_policy->tunables = tunables;
548 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
549 get_governor_parent_kobj(policy), "%s",
555 mutex_unlock(&global_tunables_lock);
559 policy->governor_data = NULL;
560 sugov_tunables_free(tunables);
563 sugov_kthread_stop(sg_policy);
566 mutex_unlock(&global_tunables_lock);
568 sugov_policy_free(sg_policy);
571 cpufreq_disable_fast_switch(policy);
573 pr_err("initialization failed (error %d)\n", ret);
577 static void sugov_exit(struct cpufreq_policy *policy)
579 struct sugov_policy *sg_policy = policy->governor_data;
580 struct sugov_tunables *tunables = sg_policy->tunables;
583 mutex_lock(&global_tunables_lock);
585 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
586 policy->governor_data = NULL;
588 sugov_tunables_free(tunables);
590 mutex_unlock(&global_tunables_lock);
592 sugov_kthread_stop(sg_policy);
593 sugov_policy_free(sg_policy);
594 cpufreq_disable_fast_switch(policy);
597 static int sugov_start(struct cpufreq_policy *policy)
599 struct sugov_policy *sg_policy = policy->governor_data;
602 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
603 sg_policy->last_freq_update_time = 0;
604 sg_policy->next_freq = UINT_MAX;
605 sg_policy->work_in_progress = false;
606 sg_policy->need_freq_update = false;
607 sg_policy->cached_raw_freq = 0;
609 for_each_cpu(cpu, policy->cpus) {
610 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
612 memset(sg_cpu, 0, sizeof(*sg_cpu));
613 sg_cpu->sg_policy = sg_policy;
614 sg_cpu->flags = SCHED_CPUFREQ_RT;
615 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
616 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
617 policy_is_shared(policy) ?
618 sugov_update_shared :
619 sugov_update_single);
624 static void sugov_stop(struct cpufreq_policy *policy)
626 struct sugov_policy *sg_policy = policy->governor_data;
629 for_each_cpu(cpu, policy->cpus)
630 cpufreq_remove_update_util_hook(cpu);
634 if (!policy->fast_switch_enabled) {
635 irq_work_sync(&sg_policy->irq_work);
636 kthread_cancel_work_sync(&sg_policy->work);
640 static void sugov_limits(struct cpufreq_policy *policy)
642 struct sugov_policy *sg_policy = policy->governor_data;
644 if (!policy->fast_switch_enabled) {
645 mutex_lock(&sg_policy->work_lock);
646 cpufreq_policy_apply_limits(policy);
647 mutex_unlock(&sg_policy->work_lock);
650 sg_policy->need_freq_update = true;
653 static struct cpufreq_governor schedutil_gov = {
655 .owner = THIS_MODULE,
658 .start = sugov_start,
660 .limits = sugov_limits,
663 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
664 struct cpufreq_governor *cpufreq_default_governor(void)
666 return &schedutil_gov;
670 static int __init sugov_register(void)
672 return cpufreq_register_governor(&schedutil_gov);
674 fs_initcall(sugov_register);