2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/sched/cpufreq.h>
17 #include <trace/events/power.h>
19 struct sugov_tunables {
20 struct gov_attr_set attr_set;
21 unsigned int up_rate_limit_us;
22 unsigned int down_rate_limit_us;
26 struct cpufreq_policy *policy;
28 struct sugov_tunables *tunables;
29 struct list_head tunables_hook;
31 raw_spinlock_t update_lock; /* For shared policies */
32 u64 last_freq_update_time;
33 s64 min_rate_limit_ns;
35 s64 down_rate_delay_ns;
36 unsigned int next_freq;
37 unsigned int cached_raw_freq;
39 /* The next fields are only needed if fast switch cannot be used: */
40 struct irq_work irq_work;
41 struct kthread_work work;
42 struct mutex work_lock;
43 struct kthread_worker worker;
44 struct task_struct *thread;
45 bool work_in_progress;
48 bool need_freq_update;
52 struct update_util_data update_util;
53 struct sugov_policy *sg_policy;
56 bool iowait_boost_pending;
57 unsigned int iowait_boost;
64 /* The field below is for single-CPU policies only: */
65 #ifdef CONFIG_NO_HZ_COMMON
66 unsigned long saved_idle_calls;
70 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
72 /************************ Governor internals ***********************/
74 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
79 * Since cpufreq_update_util() is called with rq->lock held for
80 * the @target_cpu, our per-CPU data is fully serialized.
82 * However, drivers cannot in general deal with cross-CPU
83 * requests, so while get_next_freq() will work, our
84 * sugov_update_commit() call may not for the fast switching platforms.
86 * Hence stop here for remote requests if they aren't supported
87 * by the hardware, as calculating the frequency is pointless if
88 * we cannot in fact act on it.
90 * For the slow switching platforms, the kthread is always scheduled on
91 * the right set of CPUs and any CPU can find the next frequency and
92 * schedule the kthread.
94 if (sg_policy->policy->fast_switch_enabled &&
95 !cpufreq_this_cpu_can_update(sg_policy->policy))
98 if (unlikely(sg_policy->limits_changed)) {
99 sg_policy->limits_changed = false;
100 sg_policy->need_freq_update = true;
104 /* No need to recalculate next freq for min_rate_limit_us
105 * at least. However we might still decide to further rate
106 * limit once frequency change direction is decided, according
107 * to the separate rate limits.
110 delta_ns = time - sg_policy->last_freq_update_time;
111 return delta_ns >= sg_policy->min_rate_limit_ns;
114 static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
115 unsigned int next_freq)
119 delta_ns = time - sg_policy->last_freq_update_time;
121 if (next_freq > sg_policy->next_freq &&
122 delta_ns < sg_policy->up_rate_delay_ns)
125 if (next_freq < sg_policy->next_freq &&
126 delta_ns < sg_policy->down_rate_delay_ns)
132 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
133 unsigned int next_freq)
135 if (sg_policy->next_freq == next_freq)
138 if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
141 sg_policy->next_freq = next_freq;
142 sg_policy->last_freq_update_time = time;
147 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
148 unsigned int next_freq)
150 struct cpufreq_policy *policy = sg_policy->policy;
152 if (!sugov_update_next_freq(sg_policy, time, next_freq))
155 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
159 policy->cur = next_freq;
160 trace_cpu_frequency(next_freq, smp_processor_id());
163 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
164 unsigned int next_freq)
166 if (!sugov_update_next_freq(sg_policy, time, next_freq))
169 if (!sg_policy->work_in_progress) {
170 sg_policy->work_in_progress = true;
171 irq_work_queue(&sg_policy->irq_work);
176 * get_next_freq - Compute a new frequency for a given cpufreq policy.
177 * @sg_policy: schedutil policy object to compute the new frequency for.
178 * @util: Current CPU utilization.
179 * @max: CPU capacity.
181 * If the utilization is frequency-invariant, choose the new frequency to be
182 * proportional to it, that is
184 * next_freq = C * max_freq * util / max
186 * Otherwise, approximate the would-be frequency-invariant utilization by
187 * util_raw * (curr_freq / max_freq) which leads to
189 * next_freq = C * curr_freq * util_raw / max
191 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
193 * The lowest driver-supported frequency which is equal or greater than the raw
194 * next_freq (as calculated above) is returned, subject to policy min/max and
195 * cpufreq driver limitations.
197 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
198 unsigned long util, unsigned long max)
200 struct cpufreq_policy *policy = sg_policy->policy;
201 unsigned int freq = arch_scale_freq_invariant() ?
202 policy->cpuinfo.max_freq : policy->cur;
204 freq = map_util_freq(util, freq, max);
206 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
207 return sg_policy->next_freq;
209 sg_policy->need_freq_update = false;
210 sg_policy->cached_raw_freq = freq;
211 return cpufreq_driver_resolve_freq(policy, freq);
215 * This function computes an effective utilization for the given CPU, to be
216 * used for frequency selection given the linear relation: f = u * f_max.
218 * The scheduler tracks the following metrics:
220 * cpu_util_{cfs,rt,dl,irq}()
223 * Where the cfs,rt and dl util numbers are tracked with the same metric and
224 * synchronized windows and are thus directly comparable.
226 * The @util parameter passed to this function is assumed to be the aggregation
227 * of RT and CFS util numbers. The cases of DL and IRQ are managed here.
229 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
230 * which excludes things like IRQ and steal-time. These latter are then accrued
231 * in the irq utilization.
233 * The DL bandwidth number otoh is not a measured metric but a value computed
234 * based on the task model parameters and gives the minimal utilization
235 * required to meet deadlines.
237 unsigned long schedutil_freq_util(int cpu, unsigned long util,
238 unsigned long max, enum schedutil_type type)
240 unsigned long dl_util, irq;
241 struct rq *rq = cpu_rq(cpu);
243 if (sched_feat(SUGOV_RT_MAX_FREQ) && type == FREQUENCY_UTIL &&
244 rt_rq_is_runnable(&rq->rt))
248 * Early check to see if IRQ/steal time saturates the CPU, can be
249 * because of inaccuracies in how we track these -- see
250 * update_irq_load_avg().
252 irq = cpu_util_irq(rq);
253 if (unlikely(irq >= max))
257 * The function is called with @util defined as the aggregation (the
258 * sum) of RT and CFS signals, hence leaving the special case of DL
259 * to be delt with. The exact way of doing things depend on the calling
262 dl_util = cpu_util_dl(rq);
265 * For frequency selection we do not make cpu_util_dl() a permanent part
266 * of this sum because we want to use cpu_bw_dl() later on, but we need
267 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
268 * that we select f_max when there is no idle time.
270 * NOTE: numerical errors or stop class might cause us to not quite hit
271 * saturation when we should -- something for later.
273 if (util + dl_util >= max)
277 * OTOH, for energy computation we need the estimated running time, so
278 * include util_dl and ignore dl_bw.
280 if (type == ENERGY_UTIL)
284 * There is still idle time; further improve the number by using the
285 * irq metric. Because IRQ/steal time is hidden from the task clock we
286 * need to scale the task numbers:
289 * U' = irq + ------- * U
292 util = scale_irq_capacity(util, irq, max);
296 * Bandwidth required by DEADLINE must always be granted while, for
297 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
298 * to gracefully reduce the frequency when no tasks show up for longer
301 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
302 * bw_dl as requested freq. However, cpufreq is not yet ready for such
303 * an interface. So, we only do the latter for now.
305 if (type == FREQUENCY_UTIL)
306 util += cpu_bw_dl(rq);
308 return min(max, util);
311 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
313 struct rq *rq = cpu_rq(sg_cpu->cpu);
314 unsigned long util = boosted_cpu_util(sg_cpu->cpu, cpu_util_rt(rq));
315 unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
318 sg_cpu->bw_dl = cpu_bw_dl(rq);
320 return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL);
324 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
325 * @sg_cpu: the sugov data for the CPU to boost
326 * @time: the update time from the caller
327 * @set_iowait_boost: true if an IO boost has been requested
329 * The IO wait boost of a task is disabled after a tick since the last update
330 * of a CPU. If a new IO wait boost is requested after more then a tick, then
331 * we enable the boost starting from the minimum frequency, which improves
332 * energy efficiency by ignoring sporadic wakeups from IO.
334 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
335 bool set_iowait_boost)
337 s64 delta_ns = time - sg_cpu->last_update;
339 /* Reset boost only if a tick has elapsed since last request */
340 if (delta_ns <= TICK_NSEC)
343 sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
344 sg_cpu->iowait_boost_pending = set_iowait_boost;
350 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
351 * @sg_cpu: the sugov data for the CPU to boost
352 * @time: the update time from the caller
353 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
355 * Each time a task wakes up after an IO operation, the CPU utilization can be
356 * boosted to a certain utilization which doubles at each "frequent and
357 * successive" wakeup from IO, ranging from the utilization of the minimum
358 * OPP to the utilization of the maximum OPP.
359 * To keep doubling, an IO boost has to be requested at least once per tick,
360 * otherwise we restart from the utilization of the minimum OPP.
362 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
365 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
367 /* Reset boost if the CPU appears to have been idle enough */
368 if (sg_cpu->iowait_boost &&
369 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
372 /* Boost only tasks waking up after IO */
373 if (!set_iowait_boost)
376 /* Ensure boost doubles only one time at each request */
377 if (sg_cpu->iowait_boost_pending)
379 sg_cpu->iowait_boost_pending = true;
381 /* Double the boost at each request */
382 if (sg_cpu->iowait_boost) {
383 sg_cpu->iowait_boost =
384 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
388 /* First wakeup after IO: start with minimum boost */
389 sg_cpu->iowait_boost = sg_cpu->min;
393 * sugov_iowait_apply() - Apply the IO boost to a CPU.
394 * @sg_cpu: the sugov data for the cpu to boost
395 * @time: the update time from the caller
396 * @util: the utilization to (eventually) boost
397 * @max: the maximum value the utilization can be boosted to
399 * A CPU running a task which woken up after an IO operation can have its
400 * utilization boosted to speed up the completion of those IO operations.
401 * The IO boost value is increased each time a task wakes up from IO, in
402 * sugov_iowait_apply(), and it's instead decreased by this function,
403 * each time an increase has not been requested (!iowait_boost_pending).
405 * A CPU which also appears to have been idle for at least one tick has also
406 * its IO boost utilization reset.
408 * This mechanism is designed to boost high frequently IO waiting tasks, while
409 * being more conservative on tasks which does sporadic IO operations.
411 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
412 unsigned long util, unsigned long max)
416 /* No boost currently required */
417 if (!sg_cpu->iowait_boost)
420 /* Reset boost if the CPU appears to have been idle enough */
421 if (sugov_iowait_reset(sg_cpu, time, false))
424 if (!sg_cpu->iowait_boost_pending) {
426 * No boost pending; reduce the boost value.
428 sg_cpu->iowait_boost >>= 1;
429 if (sg_cpu->iowait_boost < sg_cpu->min) {
430 sg_cpu->iowait_boost = 0;
435 sg_cpu->iowait_boost_pending = false;
438 * @util is already in capacity scale; convert iowait_boost
439 * into the same scale so we can compare.
441 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
442 return max(boost, util);
445 #ifdef CONFIG_NO_HZ_COMMON
446 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
448 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
449 bool ret = idle_calls == sg_cpu->saved_idle_calls;
451 sg_cpu->saved_idle_calls = idle_calls;
455 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
456 #endif /* CONFIG_NO_HZ_COMMON */
459 * Make sugov_should_update_freq() ignore the rate limit when DL
460 * has increased the utilization.
462 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
464 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
465 sg_policy->limits_changed = true;
468 static void sugov_update_single(struct update_util_data *hook, u64 time,
471 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
472 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
473 unsigned long util, max;
477 sugov_iowait_boost(sg_cpu, time, flags);
478 sg_cpu->last_update = time;
480 ignore_dl_rate_limit(sg_cpu, sg_policy);
482 if (!sugov_should_update_freq(sg_policy, time))
485 /* Limits may have changed, don't skip frequency update */
486 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
488 util = sugov_get_util(sg_cpu);
490 util = sugov_iowait_apply(sg_cpu, time, util, max);
491 next_f = get_next_freq(sg_policy, util, max);
493 * Do not reduce the frequency if the CPU has not been idle
494 * recently, as the reduction is likely to be premature then.
496 if (busy && next_f < sg_policy->next_freq) {
497 next_f = sg_policy->next_freq;
499 /* Reset cached freq as next_freq has changed */
500 sg_policy->cached_raw_freq = 0;
504 * This code runs under rq->lock for the target CPU, so it won't run
505 * concurrently on two different CPUs for the same target and it is not
506 * necessary to acquire the lock in the fast switch case.
508 if (sg_policy->policy->fast_switch_enabled) {
509 sugov_fast_switch(sg_policy, time, next_f);
511 raw_spin_lock(&sg_policy->update_lock);
512 sugov_deferred_update(sg_policy, time, next_f);
513 raw_spin_unlock(&sg_policy->update_lock);
517 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
519 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
520 struct cpufreq_policy *policy = sg_policy->policy;
521 unsigned long util = 0, max = 1;
524 for_each_cpu(j, policy->cpus) {
525 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
526 unsigned long j_util, j_max;
528 j_util = sugov_get_util(j_sg_cpu);
529 j_max = j_sg_cpu->max;
530 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
532 if (j_util * max > j_max * util) {
538 return get_next_freq(sg_policy, util, max);
542 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
544 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
545 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
548 raw_spin_lock(&sg_policy->update_lock);
550 sugov_iowait_boost(sg_cpu, time, flags);
551 sg_cpu->last_update = time;
553 ignore_dl_rate_limit(sg_cpu, sg_policy);
555 if (sugov_should_update_freq(sg_policy, time)) {
556 next_f = sugov_next_freq_shared(sg_cpu, time);
558 if (sg_policy->policy->fast_switch_enabled)
559 sugov_fast_switch(sg_policy, time, next_f);
561 sugov_deferred_update(sg_policy, time, next_f);
564 raw_spin_unlock(&sg_policy->update_lock);
567 static void sugov_work(struct kthread_work *work)
569 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
574 * Hold sg_policy->update_lock shortly to handle the case where:
575 * incase sg_policy->next_freq is read here, and then updated by
576 * sugov_deferred_update() just before work_in_progress is set to false
577 * here, we may miss queueing the new update.
579 * Note: If a work was queued after the update_lock is released,
580 * sugov_work() will just be called again by kthread_work code; and the
581 * request will be proceed before the sugov thread sleeps.
583 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
584 freq = sg_policy->next_freq;
585 sg_policy->work_in_progress = false;
586 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
588 mutex_lock(&sg_policy->work_lock);
589 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
590 mutex_unlock(&sg_policy->work_lock);
593 static void sugov_irq_work(struct irq_work *irq_work)
595 struct sugov_policy *sg_policy;
597 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
599 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
602 /************************** sysfs interface ************************/
604 static struct sugov_tunables *global_tunables;
605 static DEFINE_MUTEX(global_tunables_lock);
607 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
609 return container_of(attr_set, struct sugov_tunables, attr_set);
612 static DEFINE_MUTEX(min_rate_lock);
614 static void update_min_rate_limit_ns(struct sugov_policy *sg_policy)
616 mutex_lock(&min_rate_lock);
617 sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
618 sg_policy->down_rate_delay_ns);
619 mutex_unlock(&min_rate_lock);
622 static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
624 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
626 return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
629 static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
631 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
633 return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
636 static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
637 const char *buf, size_t count)
639 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
640 struct sugov_policy *sg_policy;
641 unsigned int rate_limit_us;
643 if (kstrtouint(buf, 10, &rate_limit_us))
646 tunables->up_rate_limit_us = rate_limit_us;
648 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
649 sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
650 update_min_rate_limit_ns(sg_policy);
656 static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
657 const char *buf, size_t count)
659 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
660 struct sugov_policy *sg_policy;
661 unsigned int rate_limit_us;
663 if (kstrtouint(buf, 10, &rate_limit_us))
666 tunables->down_rate_limit_us = rate_limit_us;
668 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
669 sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
670 update_min_rate_limit_ns(sg_policy);
676 static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
677 static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
679 static struct attribute *sugov_attributes[] = {
680 &up_rate_limit_us.attr,
681 &down_rate_limit_us.attr,
685 static struct kobj_type sugov_tunables_ktype = {
686 .default_attrs = sugov_attributes,
687 .sysfs_ops = &governor_sysfs_ops,
690 /********************** cpufreq governor interface *********************/
692 struct cpufreq_governor schedutil_gov;
694 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
696 struct sugov_policy *sg_policy;
698 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
702 sg_policy->policy = policy;
703 raw_spin_lock_init(&sg_policy->update_lock);
707 static void sugov_policy_free(struct sugov_policy *sg_policy)
712 static int sugov_kthread_create(struct sugov_policy *sg_policy)
714 struct task_struct *thread;
715 struct sched_attr attr = {
716 .size = sizeof(struct sched_attr),
717 .sched_policy = SCHED_DEADLINE,
718 .sched_flags = SCHED_FLAG_SUGOV,
722 * Fake (unused) bandwidth; workaround to "fix"
723 * priority inheritance.
725 .sched_runtime = 1000000,
726 .sched_deadline = 10000000,
727 .sched_period = 10000000,
729 struct cpufreq_policy *policy = sg_policy->policy;
732 /* kthread only required for slow path */
733 if (policy->fast_switch_enabled)
736 kthread_init_work(&sg_policy->work, sugov_work);
737 kthread_init_worker(&sg_policy->worker);
738 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
740 cpumask_first(policy->related_cpus));
741 if (IS_ERR(thread)) {
742 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
743 return PTR_ERR(thread);
746 ret = sched_setattr_nocheck(thread, &attr);
748 kthread_stop(thread);
749 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
753 sg_policy->thread = thread;
754 kthread_bind_mask(thread, policy->related_cpus);
755 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
756 mutex_init(&sg_policy->work_lock);
758 wake_up_process(thread);
763 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
765 /* kthread only required for slow path */
766 if (sg_policy->policy->fast_switch_enabled)
769 kthread_flush_worker(&sg_policy->worker);
770 kthread_stop(sg_policy->thread);
771 mutex_destroy(&sg_policy->work_lock);
774 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
776 struct sugov_tunables *tunables;
778 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
780 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
781 if (!have_governor_per_policy())
782 global_tunables = tunables;
787 static void sugov_tunables_free(struct sugov_tunables *tunables)
789 if (!have_governor_per_policy())
790 global_tunables = NULL;
795 static int sugov_init(struct cpufreq_policy *policy)
797 struct sugov_policy *sg_policy;
798 struct sugov_tunables *tunables;
801 /* State should be equivalent to EXIT */
802 if (policy->governor_data)
805 cpufreq_enable_fast_switch(policy);
807 sg_policy = sugov_policy_alloc(policy);
810 goto disable_fast_switch;
813 ret = sugov_kthread_create(sg_policy);
817 mutex_lock(&global_tunables_lock);
819 if (global_tunables) {
820 if (WARN_ON(have_governor_per_policy())) {
824 policy->governor_data = sg_policy;
825 sg_policy->tunables = global_tunables;
827 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
831 tunables = sugov_tunables_alloc(sg_policy);
837 tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
838 tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
840 policy->governor_data = sg_policy;
841 sg_policy->tunables = tunables;
843 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
844 get_governor_parent_kobj(policy), "%s",
850 mutex_unlock(&global_tunables_lock);
854 kobject_put(&tunables->attr_set.kobj);
855 policy->governor_data = NULL;
856 sugov_tunables_free(tunables);
859 sugov_kthread_stop(sg_policy);
860 mutex_unlock(&global_tunables_lock);
863 sugov_policy_free(sg_policy);
866 cpufreq_disable_fast_switch(policy);
868 pr_err("initialization failed (error %d)\n", ret);
872 static void sugov_exit(struct cpufreq_policy *policy)
874 struct sugov_policy *sg_policy = policy->governor_data;
875 struct sugov_tunables *tunables = sg_policy->tunables;
878 mutex_lock(&global_tunables_lock);
880 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
881 policy->governor_data = NULL;
883 sugov_tunables_free(tunables);
885 mutex_unlock(&global_tunables_lock);
887 sugov_kthread_stop(sg_policy);
888 sugov_policy_free(sg_policy);
889 cpufreq_disable_fast_switch(policy);
892 static int sugov_start(struct cpufreq_policy *policy)
894 struct sugov_policy *sg_policy = policy->governor_data;
897 sg_policy->up_rate_delay_ns =
898 sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
899 sg_policy->down_rate_delay_ns =
900 sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
901 update_min_rate_limit_ns(sg_policy);
902 sg_policy->last_freq_update_time = 0;
903 sg_policy->next_freq = 0;
904 sg_policy->work_in_progress = false;
905 sg_policy->limits_changed = false;
906 sg_policy->need_freq_update = false;
907 sg_policy->cached_raw_freq = 0;
909 for_each_cpu(cpu, policy->cpus) {
910 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
912 memset(sg_cpu, 0, sizeof(*sg_cpu));
914 sg_cpu->sg_policy = sg_policy;
916 (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
917 policy->cpuinfo.max_freq;
920 for_each_cpu(cpu, policy->cpus) {
921 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
923 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
924 policy_is_shared(policy) ?
925 sugov_update_shared :
926 sugov_update_single);
931 static void sugov_stop(struct cpufreq_policy *policy)
933 struct sugov_policy *sg_policy = policy->governor_data;
936 for_each_cpu(cpu, policy->cpus)
937 cpufreq_remove_update_util_hook(cpu);
941 if (!policy->fast_switch_enabled) {
942 irq_work_sync(&sg_policy->irq_work);
943 kthread_cancel_work_sync(&sg_policy->work);
947 static void sugov_limits(struct cpufreq_policy *policy)
949 struct sugov_policy *sg_policy = policy->governor_data;
951 if (!policy->fast_switch_enabled) {
952 mutex_lock(&sg_policy->work_lock);
953 cpufreq_policy_apply_limits(policy);
954 mutex_unlock(&sg_policy->work_lock);
957 sg_policy->limits_changed = true;
960 struct cpufreq_governor schedutil_gov = {
962 .owner = THIS_MODULE,
963 .dynamic_switching = true,
966 .start = sugov_start,
968 .limits = sugov_limits,
971 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
972 struct cpufreq_governor *cpufreq_default_governor(void)
974 return &schedutil_gov;
978 static int __init sugov_register(void)
980 return cpufreq_register_governor(&schedutil_gov);
982 fs_initcall(sugov_register);
984 #ifdef CONFIG_ENERGY_MODEL
985 extern bool sched_energy_update;
986 extern struct mutex sched_energy_mutex;
988 static void rebuild_sd_workfn(struct work_struct *work)
990 mutex_lock(&sched_energy_mutex);
991 sched_energy_update = true;
992 rebuild_sched_domains();
993 sched_energy_update = false;
994 mutex_unlock(&sched_energy_mutex);
996 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
999 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
1000 * on governor changes to make sure the scheduler knows about it.
1002 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
1003 struct cpufreq_governor *old_gov)
1005 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
1007 * When called from the cpufreq_register_driver() path, the
1008 * cpu_hotplug_lock is already held, so use a work item to
1009 * avoid nested locking in rebuild_sched_domains().
1011 schedule_work(&rebuild_sd_work);