2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <trace/events/power.h>
18 struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
24 struct cpufreq_policy *policy;
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
29 raw_spinlock_t update_lock; /* For shared policies */
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
44 bool need_freq_update;
48 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
52 bool iowait_boost_pending;
53 unsigned int iowait_boost;
60 /* The field below is for single-CPU policies only: */
61 #ifdef CONFIG_NO_HZ_COMMON
62 unsigned long saved_idle_calls;
66 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
68 /************************ Governor internals ***********************/
70 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
75 * Since cpufreq_update_util() is called with rq->lock held for
76 * the @target_cpu, our per-CPU data is fully serialized.
78 * However, drivers cannot in general deal with cross-CPU
79 * requests, so while get_next_freq() will work, our
80 * sugov_update_commit() call may not for the fast switching platforms.
82 * Hence stop here for remote requests if they aren't supported
83 * by the hardware, as calculating the frequency is pointless if
84 * we cannot in fact act on it.
86 * For the slow switching platforms, the kthread is always scheduled on
87 * the right set of CPUs and any CPU can find the next frequency and
88 * schedule the kthread.
90 if (sg_policy->policy->fast_switch_enabled &&
91 !cpufreq_this_cpu_can_update(sg_policy->policy))
94 if (unlikely(sg_policy->limits_changed)) {
95 sg_policy->limits_changed = false;
96 sg_policy->need_freq_update = true;
100 delta_ns = time - sg_policy->last_freq_update_time;
102 return delta_ns >= sg_policy->freq_update_delay_ns;
105 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
106 unsigned int next_freq)
108 if (sg_policy->next_freq == next_freq)
111 sg_policy->next_freq = next_freq;
112 sg_policy->last_freq_update_time = time;
117 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
118 unsigned int next_freq)
120 struct cpufreq_policy *policy = sg_policy->policy;
122 if (!sugov_update_next_freq(sg_policy, time, next_freq))
125 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
129 policy->cur = next_freq;
130 trace_cpu_frequency(next_freq, smp_processor_id());
133 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
134 unsigned int next_freq)
136 if (!sugov_update_next_freq(sg_policy, time, next_freq))
139 if (!sg_policy->work_in_progress) {
140 sg_policy->work_in_progress = true;
141 irq_work_queue(&sg_policy->irq_work);
146 * get_next_freq - Compute a new frequency for a given cpufreq policy.
147 * @sg_policy: schedutil policy object to compute the new frequency for.
148 * @util: Current CPU utilization.
149 * @max: CPU capacity.
151 * If the utilization is frequency-invariant, choose the new frequency to be
152 * proportional to it, that is
154 * next_freq = C * max_freq * util / max
156 * Otherwise, approximate the would-be frequency-invariant utilization by
157 * util_raw * (curr_freq / max_freq) which leads to
159 * next_freq = C * curr_freq * util_raw / max
161 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
163 * The lowest driver-supported frequency which is equal or greater than the raw
164 * next_freq (as calculated above) is returned, subject to policy min/max and
165 * cpufreq driver limitations.
167 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
168 unsigned long util, unsigned long max)
170 struct cpufreq_policy *policy = sg_policy->policy;
171 unsigned int freq = arch_scale_freq_invariant() ?
172 policy->cpuinfo.max_freq : policy->cur;
174 freq = (freq + (freq >> 2)) * util / max;
176 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
177 return sg_policy->next_freq;
179 sg_policy->need_freq_update = false;
180 sg_policy->cached_raw_freq = freq;
181 return cpufreq_driver_resolve_freq(policy, freq);
185 * This function computes an effective utilization for the given CPU, to be
186 * used for frequency selection given the linear relation: f = u * f_max.
188 * The scheduler tracks the following metrics:
190 * cpu_util_{cfs,rt,dl,irq}()
193 * Where the cfs,rt and dl util numbers are tracked with the same metric and
194 * synchronized windows and are thus directly comparable.
196 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
197 * which excludes things like IRQ and steal-time. These latter are then accrued
198 * in the irq utilization.
200 * The DL bandwidth number otoh is not a measured metric but a value computed
201 * based on the task model parameters and gives the minimal utilization
202 * required to meet deadlines.
204 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
206 struct rq *rq = cpu_rq(sg_cpu->cpu);
207 unsigned long util, irq, max;
209 sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
210 sg_cpu->bw_dl = cpu_bw_dl(rq);
212 if (rt_rq_is_runnable(&rq->rt))
216 * Early check to see if IRQ/steal time saturates the CPU, can be
217 * because of inaccuracies in how we track these -- see
218 * update_irq_load_avg().
220 irq = cpu_util_irq(rq);
221 if (unlikely(irq >= max))
225 * Because the time spend on RT/DL tasks is visible as 'lost' time to
226 * CFS tasks and we use the same metric to track the effective
227 * utilization (PELT windows are synchronized) we can directly add them
228 * to obtain the CPU's actual utilization.
230 util = cpu_util_cfs(rq);
231 util += cpu_util_rt(rq);
234 * We do not make cpu_util_dl() a permanent part of this sum because we
235 * want to use cpu_bw_dl() later on, but we need to check if the
236 * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
237 * f_max when there is no idle time.
239 * NOTE: numerical errors or stop class might cause us to not quite hit
240 * saturation when we should -- something for later.
242 if ((util + cpu_util_dl(rq)) >= max)
246 * There is still idle time; further improve the number by using the
247 * irq metric. Because IRQ/steal time is hidden from the task clock we
248 * need to scale the task numbers:
251 * U' = irq + ------- * U
254 util = scale_irq_capacity(util, irq, max);
258 * Bandwidth required by DEADLINE must always be granted while, for
259 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
260 * to gracefully reduce the frequency when no tasks show up for longer
263 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
264 * bw_dl as requested freq. However, cpufreq is not yet ready for such
265 * an interface. So, we only do the latter for now.
267 return min(max, util + sg_cpu->bw_dl);
271 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
272 * @sg_cpu: the sugov data for the CPU to boost
273 * @time: the update time from the caller
274 * @set_iowait_boost: true if an IO boost has been requested
276 * The IO wait boost of a task is disabled after a tick since the last update
277 * of a CPU. If a new IO wait boost is requested after more then a tick, then
278 * we enable the boost starting from the minimum frequency, which improves
279 * energy efficiency by ignoring sporadic wakeups from IO.
281 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
282 bool set_iowait_boost)
284 s64 delta_ns = time - sg_cpu->last_update;
286 /* Reset boost only if a tick has elapsed since last request */
287 if (delta_ns <= TICK_NSEC)
290 sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
291 sg_cpu->iowait_boost_pending = set_iowait_boost;
297 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
298 * @sg_cpu: the sugov data for the CPU to boost
299 * @time: the update time from the caller
300 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
302 * Each time a task wakes up after an IO operation, the CPU utilization can be
303 * boosted to a certain utilization which doubles at each "frequent and
304 * successive" wakeup from IO, ranging from the utilization of the minimum
305 * OPP to the utilization of the maximum OPP.
306 * To keep doubling, an IO boost has to be requested at least once per tick,
307 * otherwise we restart from the utilization of the minimum OPP.
309 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
312 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
314 /* Reset boost if the CPU appears to have been idle enough */
315 if (sg_cpu->iowait_boost &&
316 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
319 /* Boost only tasks waking up after IO */
320 if (!set_iowait_boost)
323 /* Ensure boost doubles only one time at each request */
324 if (sg_cpu->iowait_boost_pending)
326 sg_cpu->iowait_boost_pending = true;
328 /* Double the boost at each request */
329 if (sg_cpu->iowait_boost) {
330 sg_cpu->iowait_boost =
331 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
335 /* First wakeup after IO: start with minimum boost */
336 sg_cpu->iowait_boost = sg_cpu->min;
340 * sugov_iowait_apply() - Apply the IO boost to a CPU.
341 * @sg_cpu: the sugov data for the cpu to boost
342 * @time: the update time from the caller
343 * @util: the utilization to (eventually) boost
344 * @max: the maximum value the utilization can be boosted to
346 * A CPU running a task which woken up after an IO operation can have its
347 * utilization boosted to speed up the completion of those IO operations.
348 * The IO boost value is increased each time a task wakes up from IO, in
349 * sugov_iowait_apply(), and it's instead decreased by this function,
350 * each time an increase has not been requested (!iowait_boost_pending).
352 * A CPU which also appears to have been idle for at least one tick has also
353 * its IO boost utilization reset.
355 * This mechanism is designed to boost high frequently IO waiting tasks, while
356 * being more conservative on tasks which does sporadic IO operations.
358 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
359 unsigned long util, unsigned long max)
363 /* No boost currently required */
364 if (!sg_cpu->iowait_boost)
367 /* Reset boost if the CPU appears to have been idle enough */
368 if (sugov_iowait_reset(sg_cpu, time, false))
371 if (!sg_cpu->iowait_boost_pending) {
373 * No boost pending; reduce the boost value.
375 sg_cpu->iowait_boost >>= 1;
376 if (sg_cpu->iowait_boost < sg_cpu->min) {
377 sg_cpu->iowait_boost = 0;
382 sg_cpu->iowait_boost_pending = false;
385 * @util is already in capacity scale; convert iowait_boost
386 * into the same scale so we can compare.
388 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
389 return max(boost, util);
392 #ifdef CONFIG_NO_HZ_COMMON
393 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
395 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
396 bool ret = idle_calls == sg_cpu->saved_idle_calls;
398 sg_cpu->saved_idle_calls = idle_calls;
402 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
403 #endif /* CONFIG_NO_HZ_COMMON */
406 * Make sugov_should_update_freq() ignore the rate limit when DL
407 * has increased the utilization.
409 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
411 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
412 sg_policy->limits_changed = true;
415 static void sugov_update_single(struct update_util_data *hook, u64 time,
418 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
419 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
420 unsigned long util, max;
424 sugov_iowait_boost(sg_cpu, time, flags);
425 sg_cpu->last_update = time;
427 ignore_dl_rate_limit(sg_cpu, sg_policy);
429 if (!sugov_should_update_freq(sg_policy, time))
432 /* Limits may have changed, don't skip frequency update */
433 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
435 util = sugov_get_util(sg_cpu);
437 util = sugov_iowait_apply(sg_cpu, time, util, max);
438 next_f = get_next_freq(sg_policy, util, max);
440 * Do not reduce the frequency if the CPU has not been idle
441 * recently, as the reduction is likely to be premature then.
443 if (busy && next_f < sg_policy->next_freq) {
444 next_f = sg_policy->next_freq;
446 /* Reset cached freq as next_freq has changed */
447 sg_policy->cached_raw_freq = 0;
451 * This code runs under rq->lock for the target CPU, so it won't run
452 * concurrently on two different CPUs for the same target and it is not
453 * necessary to acquire the lock in the fast switch case.
455 if (sg_policy->policy->fast_switch_enabled) {
456 sugov_fast_switch(sg_policy, time, next_f);
458 raw_spin_lock(&sg_policy->update_lock);
459 sugov_deferred_update(sg_policy, time, next_f);
460 raw_spin_unlock(&sg_policy->update_lock);
464 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
466 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
467 struct cpufreq_policy *policy = sg_policy->policy;
468 unsigned long util = 0, max = 1;
471 for_each_cpu(j, policy->cpus) {
472 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
473 unsigned long j_util, j_max;
475 j_util = sugov_get_util(j_sg_cpu);
476 j_max = j_sg_cpu->max;
477 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
479 if (j_util * max > j_max * util) {
485 return get_next_freq(sg_policy, util, max);
489 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
491 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
492 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
495 raw_spin_lock(&sg_policy->update_lock);
497 sugov_iowait_boost(sg_cpu, time, flags);
498 sg_cpu->last_update = time;
500 ignore_dl_rate_limit(sg_cpu, sg_policy);
502 if (sugov_should_update_freq(sg_policy, time)) {
503 next_f = sugov_next_freq_shared(sg_cpu, time);
505 if (sg_policy->policy->fast_switch_enabled)
506 sugov_fast_switch(sg_policy, time, next_f);
508 sugov_deferred_update(sg_policy, time, next_f);
511 raw_spin_unlock(&sg_policy->update_lock);
514 static void sugov_work(struct kthread_work *work)
516 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
521 * Hold sg_policy->update_lock shortly to handle the case where:
522 * incase sg_policy->next_freq is read here, and then updated by
523 * sugov_deferred_update() just before work_in_progress is set to false
524 * here, we may miss queueing the new update.
526 * Note: If a work was queued after the update_lock is released,
527 * sugov_work() will just be called again by kthread_work code; and the
528 * request will be proceed before the sugov thread sleeps.
530 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
531 freq = sg_policy->next_freq;
532 sg_policy->work_in_progress = false;
533 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
535 mutex_lock(&sg_policy->work_lock);
536 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
537 mutex_unlock(&sg_policy->work_lock);
540 static void sugov_irq_work(struct irq_work *irq_work)
542 struct sugov_policy *sg_policy;
544 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
546 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
549 /************************** sysfs interface ************************/
551 static struct sugov_tunables *global_tunables;
552 static DEFINE_MUTEX(global_tunables_lock);
554 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
556 return container_of(attr_set, struct sugov_tunables, attr_set);
559 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
561 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
563 return sprintf(buf, "%u\n", tunables->rate_limit_us);
567 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
569 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
570 struct sugov_policy *sg_policy;
571 unsigned int rate_limit_us;
573 if (kstrtouint(buf, 10, &rate_limit_us))
576 tunables->rate_limit_us = rate_limit_us;
578 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
579 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
584 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
586 static struct attribute *sugov_attributes[] = {
591 static struct kobj_type sugov_tunables_ktype = {
592 .default_attrs = sugov_attributes,
593 .sysfs_ops = &governor_sysfs_ops,
596 /********************** cpufreq governor interface *********************/
598 static struct cpufreq_governor schedutil_gov;
600 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
602 struct sugov_policy *sg_policy;
604 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
608 sg_policy->policy = policy;
609 raw_spin_lock_init(&sg_policy->update_lock);
613 static void sugov_policy_free(struct sugov_policy *sg_policy)
618 static int sugov_kthread_create(struct sugov_policy *sg_policy)
620 struct task_struct *thread;
621 struct sched_attr attr = {
622 .size = sizeof(struct sched_attr),
623 .sched_policy = SCHED_DEADLINE,
624 .sched_flags = SCHED_FLAG_SUGOV,
628 * Fake (unused) bandwidth; workaround to "fix"
629 * priority inheritance.
631 .sched_runtime = 1000000,
632 .sched_deadline = 10000000,
633 .sched_period = 10000000,
635 struct cpufreq_policy *policy = sg_policy->policy;
638 /* kthread only required for slow path */
639 if (policy->fast_switch_enabled)
642 kthread_init_work(&sg_policy->work, sugov_work);
643 kthread_init_worker(&sg_policy->worker);
644 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
646 cpumask_first(policy->related_cpus));
647 if (IS_ERR(thread)) {
648 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
649 return PTR_ERR(thread);
652 ret = sched_setattr_nocheck(thread, &attr);
654 kthread_stop(thread);
655 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
659 sg_policy->thread = thread;
660 kthread_bind_mask(thread, policy->related_cpus);
661 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
662 mutex_init(&sg_policy->work_lock);
664 wake_up_process(thread);
669 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
671 /* kthread only required for slow path */
672 if (sg_policy->policy->fast_switch_enabled)
675 kthread_flush_worker(&sg_policy->worker);
676 kthread_stop(sg_policy->thread);
677 mutex_destroy(&sg_policy->work_lock);
680 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
682 struct sugov_tunables *tunables;
684 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
686 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
687 if (!have_governor_per_policy())
688 global_tunables = tunables;
693 static void sugov_tunables_free(struct sugov_tunables *tunables)
695 if (!have_governor_per_policy())
696 global_tunables = NULL;
701 static int sugov_init(struct cpufreq_policy *policy)
703 struct sugov_policy *sg_policy;
704 struct sugov_tunables *tunables;
707 /* State should be equivalent to EXIT */
708 if (policy->governor_data)
711 cpufreq_enable_fast_switch(policy);
713 sg_policy = sugov_policy_alloc(policy);
716 goto disable_fast_switch;
719 ret = sugov_kthread_create(sg_policy);
723 mutex_lock(&global_tunables_lock);
725 if (global_tunables) {
726 if (WARN_ON(have_governor_per_policy())) {
730 policy->governor_data = sg_policy;
731 sg_policy->tunables = global_tunables;
733 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
737 tunables = sugov_tunables_alloc(sg_policy);
743 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
745 policy->governor_data = sg_policy;
746 sg_policy->tunables = tunables;
748 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
749 get_governor_parent_kobj(policy), "%s",
755 mutex_unlock(&global_tunables_lock);
759 kobject_put(&tunables->attr_set.kobj);
760 policy->governor_data = NULL;
761 sugov_tunables_free(tunables);
764 sugov_kthread_stop(sg_policy);
765 mutex_unlock(&global_tunables_lock);
768 sugov_policy_free(sg_policy);
771 cpufreq_disable_fast_switch(policy);
773 pr_err("initialization failed (error %d)\n", ret);
777 static void sugov_exit(struct cpufreq_policy *policy)
779 struct sugov_policy *sg_policy = policy->governor_data;
780 struct sugov_tunables *tunables = sg_policy->tunables;
783 mutex_lock(&global_tunables_lock);
785 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
786 policy->governor_data = NULL;
788 sugov_tunables_free(tunables);
790 mutex_unlock(&global_tunables_lock);
792 sugov_kthread_stop(sg_policy);
793 sugov_policy_free(sg_policy);
794 cpufreq_disable_fast_switch(policy);
797 static int sugov_start(struct cpufreq_policy *policy)
799 struct sugov_policy *sg_policy = policy->governor_data;
802 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
803 sg_policy->last_freq_update_time = 0;
804 sg_policy->next_freq = 0;
805 sg_policy->work_in_progress = false;
806 sg_policy->limits_changed = false;
807 sg_policy->need_freq_update = false;
808 sg_policy->cached_raw_freq = 0;
810 for_each_cpu(cpu, policy->cpus) {
811 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
813 memset(sg_cpu, 0, sizeof(*sg_cpu));
815 sg_cpu->sg_policy = sg_policy;
817 (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
818 policy->cpuinfo.max_freq;
821 for_each_cpu(cpu, policy->cpus) {
822 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
824 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
825 policy_is_shared(policy) ?
826 sugov_update_shared :
827 sugov_update_single);
832 static void sugov_stop(struct cpufreq_policy *policy)
834 struct sugov_policy *sg_policy = policy->governor_data;
837 for_each_cpu(cpu, policy->cpus)
838 cpufreq_remove_update_util_hook(cpu);
842 if (!policy->fast_switch_enabled) {
843 irq_work_sync(&sg_policy->irq_work);
844 kthread_cancel_work_sync(&sg_policy->work);
848 static void sugov_limits(struct cpufreq_policy *policy)
850 struct sugov_policy *sg_policy = policy->governor_data;
852 if (!policy->fast_switch_enabled) {
853 mutex_lock(&sg_policy->work_lock);
854 cpufreq_policy_apply_limits(policy);
855 mutex_unlock(&sg_policy->work_lock);
858 sg_policy->limits_changed = true;
861 static struct cpufreq_governor schedutil_gov = {
863 .owner = THIS_MODULE,
864 .dynamic_switching = true,
867 .start = sugov_start,
869 .limits = sugov_limits,
872 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
873 struct cpufreq_governor *cpufreq_default_governor(void)
875 return &schedutil_gov;
879 static int __init sugov_register(void)
881 return cpufreq_register_governor(&schedutil_gov);
883 fs_initcall(sugov_register);