2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/sched/cpufreq.h>
17 #include <trace/events/power.h>
19 struct sugov_tunables {
20 struct gov_attr_set attr_set;
21 unsigned int up_rate_limit_us;
22 unsigned int down_rate_limit_us;
26 struct cpufreq_policy *policy;
28 struct sugov_tunables *tunables;
29 struct list_head tunables_hook;
31 raw_spinlock_t update_lock; /* For shared policies */
32 u64 last_freq_update_time;
33 s64 min_rate_limit_ns;
35 s64 down_rate_delay_ns;
36 unsigned int next_freq;
37 unsigned int cached_raw_freq;
39 /* The next fields are only needed if fast switch cannot be used: */
40 struct irq_work irq_work;
41 struct kthread_work work;
42 struct mutex work_lock;
43 struct kthread_worker worker;
44 struct task_struct *thread;
45 bool work_in_progress;
47 bool need_freq_update;
51 struct update_util_data update_util;
52 struct sugov_policy *sg_policy;
55 bool iowait_boost_pending;
56 unsigned int iowait_boost;
63 /* The field below is for single-CPU policies only: */
64 #ifdef CONFIG_NO_HZ_COMMON
65 unsigned long saved_idle_calls;
69 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
71 /************************ Governor internals ***********************/
73 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
78 * Since cpufreq_update_util() is called with rq->lock held for
79 * the @target_cpu, our per-CPU data is fully serialized.
81 * However, drivers cannot in general deal with cross-CPU
82 * requests, so while get_next_freq() will work, our
83 * sugov_update_commit() call may not for the fast switching platforms.
85 * Hence stop here for remote requests if they aren't supported
86 * by the hardware, as calculating the frequency is pointless if
87 * we cannot in fact act on it.
89 * For the slow switching platforms, the kthread is always scheduled on
90 * the right set of CPUs and any CPU can find the next frequency and
91 * schedule the kthread.
93 if (sg_policy->policy->fast_switch_enabled &&
94 !cpufreq_this_cpu_can_update(sg_policy->policy))
97 if (unlikely(sg_policy->need_freq_update))
100 /* No need to recalculate next freq for min_rate_limit_us
101 * at least. However we might still decide to further rate
102 * limit once frequency change direction is decided, according
103 * to the separate rate limits.
106 delta_ns = time - sg_policy->last_freq_update_time;
107 return delta_ns >= sg_policy->min_rate_limit_ns;
110 static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
111 unsigned int next_freq)
115 delta_ns = time - sg_policy->last_freq_update_time;
117 if (next_freq > sg_policy->next_freq &&
118 delta_ns < sg_policy->up_rate_delay_ns)
121 if (next_freq < sg_policy->next_freq &&
122 delta_ns < sg_policy->down_rate_delay_ns)
128 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
129 unsigned int next_freq)
131 if (sg_policy->next_freq == next_freq)
134 if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
137 sg_policy->next_freq = next_freq;
138 sg_policy->last_freq_update_time = time;
143 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
144 unsigned int next_freq)
146 struct cpufreq_policy *policy = sg_policy->policy;
148 if (!sugov_update_next_freq(sg_policy, time, next_freq))
151 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
155 policy->cur = next_freq;
156 trace_cpu_frequency(next_freq, smp_processor_id());
159 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
160 unsigned int next_freq)
162 if (!sugov_update_next_freq(sg_policy, time, next_freq))
165 if (!sg_policy->work_in_progress) {
166 sg_policy->work_in_progress = true;
167 irq_work_queue(&sg_policy->irq_work);
172 * get_next_freq - Compute a new frequency for a given cpufreq policy.
173 * @sg_policy: schedutil policy object to compute the new frequency for.
174 * @util: Current CPU utilization.
175 * @max: CPU capacity.
177 * If the utilization is frequency-invariant, choose the new frequency to be
178 * proportional to it, that is
180 * next_freq = C * max_freq * util / max
182 * Otherwise, approximate the would-be frequency-invariant utilization by
183 * util_raw * (curr_freq / max_freq) which leads to
185 * next_freq = C * curr_freq * util_raw / max
187 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
189 * The lowest driver-supported frequency which is equal or greater than the raw
190 * next_freq (as calculated above) is returned, subject to policy min/max and
191 * cpufreq driver limitations.
193 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
194 unsigned long util, unsigned long max)
196 struct cpufreq_policy *policy = sg_policy->policy;
197 unsigned int freq = arch_scale_freq_invariant() ?
198 policy->cpuinfo.max_freq : policy->cur;
200 freq = map_util_freq(util, freq, max);
202 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
203 return sg_policy->next_freq;
205 sg_policy->need_freq_update = false;
206 sg_policy->cached_raw_freq = freq;
207 return cpufreq_driver_resolve_freq(policy, freq);
211 * This function computes an effective utilization for the given CPU, to be
212 * used for frequency selection given the linear relation: f = u * f_max.
214 * The scheduler tracks the following metrics:
216 * cpu_util_{cfs,rt,dl,irq}()
219 * Where the cfs,rt and dl util numbers are tracked with the same metric and
220 * synchronized windows and are thus directly comparable.
222 * The @util parameter passed to this function is assumed to be the aggregation
223 * of RT and CFS util numbers. The cases of DL and IRQ are managed here.
225 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
226 * which excludes things like IRQ and steal-time. These latter are then accrued
227 * in the irq utilization.
229 * The DL bandwidth number otoh is not a measured metric but a value computed
230 * based on the task model parameters and gives the minimal utilization
231 * required to meet deadlines.
233 unsigned long schedutil_freq_util(int cpu, unsigned long util,
234 unsigned long max, enum schedutil_type type)
236 unsigned long dl_util, irq;
237 struct rq *rq = cpu_rq(cpu);
239 if (sched_feat(SUGOV_RT_MAX_FREQ) && type == FREQUENCY_UTIL &&
240 rt_rq_is_runnable(&rq->rt))
244 * Early check to see if IRQ/steal time saturates the CPU, can be
245 * because of inaccuracies in how we track these -- see
246 * update_irq_load_avg().
248 irq = cpu_util_irq(rq);
249 if (unlikely(irq >= max))
253 * The function is called with @util defined as the aggregation (the
254 * sum) of RT and CFS signals, hence leaving the special case of DL
255 * to be delt with. The exact way of doing things depend on the calling
258 dl_util = cpu_util_dl(rq);
261 * For frequency selection we do not make cpu_util_dl() a permanent part
262 * of this sum because we want to use cpu_bw_dl() later on, but we need
263 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
264 * that we select f_max when there is no idle time.
266 * NOTE: numerical errors or stop class might cause us to not quite hit
267 * saturation when we should -- something for later.
269 if (util + dl_util >= max)
273 * OTOH, for energy computation we need the estimated running time, so
274 * include util_dl and ignore dl_bw.
276 if (type == ENERGY_UTIL)
280 * There is still idle time; further improve the number by using the
281 * irq metric. Because IRQ/steal time is hidden from the task clock we
282 * need to scale the task numbers:
285 * U' = irq + ------- * U
288 util = scale_irq_capacity(util, irq, max);
292 * Bandwidth required by DEADLINE must always be granted while, for
293 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
294 * to gracefully reduce the frequency when no tasks show up for longer
297 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
298 * bw_dl as requested freq. However, cpufreq is not yet ready for such
299 * an interface. So, we only do the latter for now.
301 if (type == FREQUENCY_UTIL)
302 util += cpu_bw_dl(rq);
304 return min(max, util);
307 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
309 struct rq *rq = cpu_rq(sg_cpu->cpu);
310 unsigned long util = boosted_cpu_util(sg_cpu->cpu, cpu_util_rt(rq));
311 unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
314 sg_cpu->bw_dl = cpu_bw_dl(rq);
316 return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL);
320 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
321 * @sg_cpu: the sugov data for the CPU to boost
322 * @time: the update time from the caller
323 * @set_iowait_boost: true if an IO boost has been requested
325 * The IO wait boost of a task is disabled after a tick since the last update
326 * of a CPU. If a new IO wait boost is requested after more then a tick, then
327 * we enable the boost starting from the minimum frequency, which improves
328 * energy efficiency by ignoring sporadic wakeups from IO.
330 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
331 bool set_iowait_boost)
333 s64 delta_ns = time - sg_cpu->last_update;
335 /* Reset boost only if a tick has elapsed since last request */
336 if (delta_ns <= TICK_NSEC)
339 sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
340 sg_cpu->iowait_boost_pending = set_iowait_boost;
346 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
347 * @sg_cpu: the sugov data for the CPU to boost
348 * @time: the update time from the caller
349 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
351 * Each time a task wakes up after an IO operation, the CPU utilization can be
352 * boosted to a certain utilization which doubles at each "frequent and
353 * successive" wakeup from IO, ranging from the utilization of the minimum
354 * OPP to the utilization of the maximum OPP.
355 * To keep doubling, an IO boost has to be requested at least once per tick,
356 * otherwise we restart from the utilization of the minimum OPP.
358 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
361 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
363 /* Reset boost if the CPU appears to have been idle enough */
364 if (sg_cpu->iowait_boost &&
365 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
368 /* Boost only tasks waking up after IO */
369 if (!set_iowait_boost)
372 /* Ensure boost doubles only one time at each request */
373 if (sg_cpu->iowait_boost_pending)
375 sg_cpu->iowait_boost_pending = true;
377 /* Double the boost at each request */
378 if (sg_cpu->iowait_boost) {
379 sg_cpu->iowait_boost =
380 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
384 /* First wakeup after IO: start with minimum boost */
385 sg_cpu->iowait_boost = sg_cpu->min;
389 * sugov_iowait_apply() - Apply the IO boost to a CPU.
390 * @sg_cpu: the sugov data for the cpu to boost
391 * @time: the update time from the caller
392 * @util: the utilization to (eventually) boost
393 * @max: the maximum value the utilization can be boosted to
395 * A CPU running a task which woken up after an IO operation can have its
396 * utilization boosted to speed up the completion of those IO operations.
397 * The IO boost value is increased each time a task wakes up from IO, in
398 * sugov_iowait_apply(), and it's instead decreased by this function,
399 * each time an increase has not been requested (!iowait_boost_pending).
401 * A CPU which also appears to have been idle for at least one tick has also
402 * its IO boost utilization reset.
404 * This mechanism is designed to boost high frequently IO waiting tasks, while
405 * being more conservative on tasks which does sporadic IO operations.
407 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
408 unsigned long util, unsigned long max)
412 /* No boost currently required */
413 if (!sg_cpu->iowait_boost)
416 /* Reset boost if the CPU appears to have been idle enough */
417 if (sugov_iowait_reset(sg_cpu, time, false))
420 if (!sg_cpu->iowait_boost_pending) {
422 * No boost pending; reduce the boost value.
424 sg_cpu->iowait_boost >>= 1;
425 if (sg_cpu->iowait_boost < sg_cpu->min) {
426 sg_cpu->iowait_boost = 0;
431 sg_cpu->iowait_boost_pending = false;
434 * @util is already in capacity scale; convert iowait_boost
435 * into the same scale so we can compare.
437 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
438 return max(boost, util);
441 #ifdef CONFIG_NO_HZ_COMMON
442 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
444 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
445 bool ret = idle_calls == sg_cpu->saved_idle_calls;
447 sg_cpu->saved_idle_calls = idle_calls;
451 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
452 #endif /* CONFIG_NO_HZ_COMMON */
455 * Make sugov_should_update_freq() ignore the rate limit when DL
456 * has increased the utilization.
458 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
460 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
461 sg_policy->need_freq_update = true;
464 static void sugov_update_single(struct update_util_data *hook, u64 time,
467 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
468 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
469 unsigned long util, max;
473 sugov_iowait_boost(sg_cpu, time, flags);
474 sg_cpu->last_update = time;
476 ignore_dl_rate_limit(sg_cpu, sg_policy);
478 if (!sugov_should_update_freq(sg_policy, time))
481 busy = sugov_cpu_is_busy(sg_cpu);
483 util = sugov_get_util(sg_cpu);
485 util = sugov_iowait_apply(sg_cpu, time, util, max);
486 next_f = get_next_freq(sg_policy, util, max);
488 * Do not reduce the frequency if the CPU has not been idle
489 * recently, as the reduction is likely to be premature then.
491 if (busy && next_f < sg_policy->next_freq) {
492 next_f = sg_policy->next_freq;
494 /* Reset cached freq as next_freq has changed */
495 sg_policy->cached_raw_freq = 0;
499 * This code runs under rq->lock for the target CPU, so it won't run
500 * concurrently on two different CPUs for the same target and it is not
501 * necessary to acquire the lock in the fast switch case.
503 if (sg_policy->policy->fast_switch_enabled) {
504 sugov_fast_switch(sg_policy, time, next_f);
506 raw_spin_lock(&sg_policy->update_lock);
507 sugov_deferred_update(sg_policy, time, next_f);
508 raw_spin_unlock(&sg_policy->update_lock);
512 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
514 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
515 struct cpufreq_policy *policy = sg_policy->policy;
516 unsigned long util = 0, max = 1;
519 for_each_cpu(j, policy->cpus) {
520 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
521 unsigned long j_util, j_max;
523 j_util = sugov_get_util(j_sg_cpu);
524 j_max = j_sg_cpu->max;
525 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
527 if (j_util * max > j_max * util) {
533 return get_next_freq(sg_policy, util, max);
537 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
539 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
540 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
543 raw_spin_lock(&sg_policy->update_lock);
545 sugov_iowait_boost(sg_cpu, time, flags);
546 sg_cpu->last_update = time;
548 ignore_dl_rate_limit(sg_cpu, sg_policy);
550 if (sugov_should_update_freq(sg_policy, time)) {
551 next_f = sugov_next_freq_shared(sg_cpu, time);
553 if (sg_policy->policy->fast_switch_enabled)
554 sugov_fast_switch(sg_policy, time, next_f);
556 sugov_deferred_update(sg_policy, time, next_f);
559 raw_spin_unlock(&sg_policy->update_lock);
562 static void sugov_work(struct kthread_work *work)
564 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
569 * Hold sg_policy->update_lock shortly to handle the case where:
570 * incase sg_policy->next_freq is read here, and then updated by
571 * sugov_deferred_update() just before work_in_progress is set to false
572 * here, we may miss queueing the new update.
574 * Note: If a work was queued after the update_lock is released,
575 * sugov_work() will just be called again by kthread_work code; and the
576 * request will be proceed before the sugov thread sleeps.
578 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
579 freq = sg_policy->next_freq;
580 sg_policy->work_in_progress = false;
581 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
583 mutex_lock(&sg_policy->work_lock);
584 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
585 mutex_unlock(&sg_policy->work_lock);
588 static void sugov_irq_work(struct irq_work *irq_work)
590 struct sugov_policy *sg_policy;
592 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
594 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
597 /************************** sysfs interface ************************/
599 static struct sugov_tunables *global_tunables;
600 static DEFINE_MUTEX(global_tunables_lock);
602 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
604 return container_of(attr_set, struct sugov_tunables, attr_set);
607 static DEFINE_MUTEX(min_rate_lock);
609 static void update_min_rate_limit_ns(struct sugov_policy *sg_policy)
611 mutex_lock(&min_rate_lock);
612 sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
613 sg_policy->down_rate_delay_ns);
614 mutex_unlock(&min_rate_lock);
617 static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
619 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
621 return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
624 static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
626 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
628 return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
631 static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
632 const char *buf, size_t count)
634 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
635 struct sugov_policy *sg_policy;
636 unsigned int rate_limit_us;
638 if (kstrtouint(buf, 10, &rate_limit_us))
641 tunables->up_rate_limit_us = rate_limit_us;
643 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
644 sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
645 update_min_rate_limit_ns(sg_policy);
651 static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
652 const char *buf, size_t count)
654 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
655 struct sugov_policy *sg_policy;
656 unsigned int rate_limit_us;
658 if (kstrtouint(buf, 10, &rate_limit_us))
661 tunables->down_rate_limit_us = rate_limit_us;
663 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
664 sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
665 update_min_rate_limit_ns(sg_policy);
671 static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
672 static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
674 static struct attribute *sugov_attributes[] = {
675 &up_rate_limit_us.attr,
676 &down_rate_limit_us.attr,
680 static struct kobj_type sugov_tunables_ktype = {
681 .default_attrs = sugov_attributes,
682 .sysfs_ops = &governor_sysfs_ops,
685 /********************** cpufreq governor interface *********************/
687 struct cpufreq_governor schedutil_gov;
689 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
691 struct sugov_policy *sg_policy;
693 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
697 sg_policy->policy = policy;
698 raw_spin_lock_init(&sg_policy->update_lock);
702 static void sugov_policy_free(struct sugov_policy *sg_policy)
707 static int sugov_kthread_create(struct sugov_policy *sg_policy)
709 struct task_struct *thread;
710 struct sched_attr attr = {
711 .size = sizeof(struct sched_attr),
712 .sched_policy = SCHED_DEADLINE,
713 .sched_flags = SCHED_FLAG_SUGOV,
717 * Fake (unused) bandwidth; workaround to "fix"
718 * priority inheritance.
720 .sched_runtime = 1000000,
721 .sched_deadline = 10000000,
722 .sched_period = 10000000,
724 struct cpufreq_policy *policy = sg_policy->policy;
727 /* kthread only required for slow path */
728 if (policy->fast_switch_enabled)
731 kthread_init_work(&sg_policy->work, sugov_work);
732 kthread_init_worker(&sg_policy->worker);
733 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
735 cpumask_first(policy->related_cpus));
736 if (IS_ERR(thread)) {
737 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
738 return PTR_ERR(thread);
741 ret = sched_setattr_nocheck(thread, &attr);
743 kthread_stop(thread);
744 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
748 sg_policy->thread = thread;
749 kthread_bind_mask(thread, policy->related_cpus);
750 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
751 mutex_init(&sg_policy->work_lock);
753 wake_up_process(thread);
758 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
760 /* kthread only required for slow path */
761 if (sg_policy->policy->fast_switch_enabled)
764 kthread_flush_worker(&sg_policy->worker);
765 kthread_stop(sg_policy->thread);
766 mutex_destroy(&sg_policy->work_lock);
769 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
771 struct sugov_tunables *tunables;
773 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
775 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
776 if (!have_governor_per_policy())
777 global_tunables = tunables;
782 static void sugov_tunables_free(struct sugov_tunables *tunables)
784 if (!have_governor_per_policy())
785 global_tunables = NULL;
790 static int sugov_init(struct cpufreq_policy *policy)
792 struct sugov_policy *sg_policy;
793 struct sugov_tunables *tunables;
796 /* State should be equivalent to EXIT */
797 if (policy->governor_data)
800 cpufreq_enable_fast_switch(policy);
802 sg_policy = sugov_policy_alloc(policy);
805 goto disable_fast_switch;
808 ret = sugov_kthread_create(sg_policy);
812 mutex_lock(&global_tunables_lock);
814 if (global_tunables) {
815 if (WARN_ON(have_governor_per_policy())) {
819 policy->governor_data = sg_policy;
820 sg_policy->tunables = global_tunables;
822 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
826 tunables = sugov_tunables_alloc(sg_policy);
832 tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
833 tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
835 policy->governor_data = sg_policy;
836 sg_policy->tunables = tunables;
838 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
839 get_governor_parent_kobj(policy), "%s",
845 mutex_unlock(&global_tunables_lock);
849 kobject_put(&tunables->attr_set.kobj);
850 policy->governor_data = NULL;
851 sugov_tunables_free(tunables);
854 sugov_kthread_stop(sg_policy);
855 mutex_unlock(&global_tunables_lock);
858 sugov_policy_free(sg_policy);
861 cpufreq_disable_fast_switch(policy);
863 pr_err("initialization failed (error %d)\n", ret);
867 static void sugov_exit(struct cpufreq_policy *policy)
869 struct sugov_policy *sg_policy = policy->governor_data;
870 struct sugov_tunables *tunables = sg_policy->tunables;
873 mutex_lock(&global_tunables_lock);
875 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
876 policy->governor_data = NULL;
878 sugov_tunables_free(tunables);
880 mutex_unlock(&global_tunables_lock);
882 sugov_kthread_stop(sg_policy);
883 sugov_policy_free(sg_policy);
884 cpufreq_disable_fast_switch(policy);
887 static int sugov_start(struct cpufreq_policy *policy)
889 struct sugov_policy *sg_policy = policy->governor_data;
892 sg_policy->up_rate_delay_ns =
893 sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
894 sg_policy->down_rate_delay_ns =
895 sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
896 update_min_rate_limit_ns(sg_policy);
897 sg_policy->last_freq_update_time = 0;
898 sg_policy->next_freq = 0;
899 sg_policy->work_in_progress = false;
900 sg_policy->need_freq_update = false;
901 sg_policy->cached_raw_freq = 0;
903 for_each_cpu(cpu, policy->cpus) {
904 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
906 memset(sg_cpu, 0, sizeof(*sg_cpu));
908 sg_cpu->sg_policy = sg_policy;
910 (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
911 policy->cpuinfo.max_freq;
914 for_each_cpu(cpu, policy->cpus) {
915 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
917 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
918 policy_is_shared(policy) ?
919 sugov_update_shared :
920 sugov_update_single);
925 static void sugov_stop(struct cpufreq_policy *policy)
927 struct sugov_policy *sg_policy = policy->governor_data;
930 for_each_cpu(cpu, policy->cpus)
931 cpufreq_remove_update_util_hook(cpu);
935 if (!policy->fast_switch_enabled) {
936 irq_work_sync(&sg_policy->irq_work);
937 kthread_cancel_work_sync(&sg_policy->work);
941 static void sugov_limits(struct cpufreq_policy *policy)
943 struct sugov_policy *sg_policy = policy->governor_data;
945 if (!policy->fast_switch_enabled) {
946 mutex_lock(&sg_policy->work_lock);
947 cpufreq_policy_apply_limits(policy);
948 mutex_unlock(&sg_policy->work_lock);
951 sg_policy->need_freq_update = true;
954 struct cpufreq_governor schedutil_gov = {
956 .owner = THIS_MODULE,
957 .dynamic_switching = true,
960 .start = sugov_start,
962 .limits = sugov_limits,
965 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
966 struct cpufreq_governor *cpufreq_default_governor(void)
968 return &schedutil_gov;
972 static int __init sugov_register(void)
974 return cpufreq_register_governor(&schedutil_gov);
976 fs_initcall(sugov_register);
978 #ifdef CONFIG_ENERGY_MODEL
979 extern bool sched_energy_update;
980 extern struct mutex sched_energy_mutex;
982 static void rebuild_sd_workfn(struct work_struct *work)
984 mutex_lock(&sched_energy_mutex);
985 sched_energy_update = true;
986 rebuild_sched_domains();
987 sched_energy_update = false;
988 mutex_unlock(&sched_energy_mutex);
990 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
993 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
994 * on governor changes to make sure the scheduler knows about it.
996 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
997 struct cpufreq_governor *old_gov)
999 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
1001 * When called from the cpufreq_register_driver() path, the
1002 * cpu_hotplug_lock is already held, so use a work item to
1003 * avoid nested locking in rebuild_sched_domains().
1005 schedule_work(&rebuild_sd_work);