2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
23 #include "cpufreq_governor.h"
25 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
27 if (have_governor_per_policy())
28 return dbs_data->cdata->attr_group_gov_pol;
30 return dbs_data->cdata->attr_group_gov_sys;
33 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
35 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
36 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
38 struct cpufreq_policy *policy;
39 unsigned int sampling_rate;
40 unsigned int max_load = 0;
41 unsigned int ignore_nice;
44 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
45 struct od_cpu_dbs_info_s *od_dbs_info =
46 dbs_data->cdata->get_cpu_dbs_info_s(cpu);
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
54 sampling_rate = od_tuners->sampling_rate;
55 sampling_rate *= od_dbs_info->rate_mult;
57 ignore_nice = od_tuners->ignore_nice_load;
59 sampling_rate = cs_tuners->sampling_rate;
60 ignore_nice = cs_tuners->ignore_nice_load;
63 policy = cdbs->cur_policy;
65 /* Get Absolute Load */
66 for_each_cpu(j, policy->cpus) {
67 struct cpu_dbs_info *j_cdbs;
68 u64 cur_wall_time, cur_idle_time;
69 unsigned int idle_time, wall_time;
73 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
76 * For the purpose of ondemand, waiting for disk IO is
77 * an indication that you're performance critical, and
78 * not that the system is actually idle. So do not add
79 * the iowait time to the cpu idle time.
81 if (dbs_data->cdata->governor == GOV_ONDEMAND)
82 io_busy = od_tuners->io_is_busy;
83 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
85 wall_time = (unsigned int)
86 (cur_wall_time - j_cdbs->prev_cpu_wall);
87 j_cdbs->prev_cpu_wall = cur_wall_time;
89 idle_time = (unsigned int)
90 (cur_idle_time - j_cdbs->prev_cpu_idle);
91 j_cdbs->prev_cpu_idle = cur_idle_time;
95 unsigned long cur_nice_jiffies;
97 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
100 * Assumption: nice time between sampling periods will
101 * be less than 2^32 jiffies for 32 bit sys
103 cur_nice_jiffies = (unsigned long)
104 cputime64_to_jiffies64(cur_nice);
106 cdbs->prev_cpu_nice =
107 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
108 idle_time += jiffies_to_usecs(cur_nice_jiffies);
111 if (unlikely(!wall_time || wall_time < idle_time))
115 * If the CPU had gone completely idle, and a task just woke up
116 * on this CPU now, it would be unfair to calculate 'load' the
117 * usual way for this elapsed time-window, because it will show
118 * near-zero load, irrespective of how CPU intensive that task
119 * actually is. This is undesirable for latency-sensitive bursty
122 * To avoid this, we reuse the 'load' from the previous
123 * time-window and give this task a chance to start with a
124 * reasonably high CPU frequency. (However, we shouldn't over-do
125 * this copy, lest we get stuck at a high load (high frequency)
126 * for too long, even when the current system load has actually
127 * dropped down. So we perform the copy only once, upon the
128 * first wake-up from idle.)
130 * Detecting this situation is easy: the governor's deferrable
131 * timer would not have fired during CPU-idle periods. Hence
132 * an unusually large 'wall_time' (as compared to the sampling
133 * rate) indicates this scenario.
135 * prev_load can be zero in two cases and we must recalculate it
137 * - during long idle intervals
138 * - explicitly set to zero
140 if (unlikely(wall_time > (2 * sampling_rate) &&
141 j_cdbs->prev_load)) {
142 load = j_cdbs->prev_load;
145 * Perform a destructive copy, to ensure that we copy
146 * the previous load only once, upon the first wake-up
149 j_cdbs->prev_load = 0;
151 load = 100 * (wall_time - idle_time) / wall_time;
152 j_cdbs->prev_load = load;
159 dbs_data->cdata->gov_check_cpu(cpu, max_load);
161 EXPORT_SYMBOL_GPL(dbs_check_cpu);
163 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
166 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
168 mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
171 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
172 unsigned int delay, bool all_cpus)
176 mutex_lock(&cpufreq_governor_lock);
177 if (!policy->governor_enabled)
182 * Use raw_smp_processor_id() to avoid preemptible warnings.
183 * We know that this is only called with all_cpus == false from
184 * works that have been queued with *_work_on() functions and
185 * those works are canceled during CPU_DOWN_PREPARE so they
186 * can't possibly run on any other CPU.
188 __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
190 for_each_cpu(i, policy->cpus)
191 __gov_queue_work(i, dbs_data, delay);
195 mutex_unlock(&cpufreq_governor_lock);
197 EXPORT_SYMBOL_GPL(gov_queue_work);
199 static inline void gov_cancel_work(struct dbs_data *dbs_data,
200 struct cpufreq_policy *policy)
202 struct cpu_dbs_info *cdbs;
205 for_each_cpu(i, policy->cpus) {
206 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
207 cancel_delayed_work_sync(&cdbs->dwork);
211 /* Will return if we need to evaluate cpu load again or not */
212 bool need_load_eval(struct cpu_dbs_info *cdbs, unsigned int sampling_rate)
214 if (policy_is_shared(cdbs->cur_policy)) {
215 ktime_t time_now = ktime_get();
216 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
218 /* Do nothing if we recently have sampled */
219 if (delta_us < (s64)(sampling_rate / 2))
222 cdbs->time_stamp = time_now;
227 EXPORT_SYMBOL_GPL(need_load_eval);
229 static void set_sampling_rate(struct dbs_data *dbs_data,
230 unsigned int sampling_rate)
232 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
233 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
234 cs_tuners->sampling_rate = sampling_rate;
236 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
237 od_tuners->sampling_rate = sampling_rate;
241 static int cpufreq_governor_init(struct cpufreq_policy *policy,
242 struct dbs_data *dbs_data,
243 struct common_dbs_data *cdata)
245 unsigned int latency;
249 if (WARN_ON(have_governor_per_policy()))
251 dbs_data->usage_count++;
252 policy->governor_data = dbs_data;
256 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
260 dbs_data->cdata = cdata;
261 dbs_data->usage_count = 1;
263 ret = cdata->init(dbs_data, !policy->governor->initialized);
267 /* policy latency is in ns. Convert it to us first */
268 latency = policy->cpuinfo.transition_latency / 1000;
272 /* Bring kernel and HW constraints together */
273 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
274 MIN_LATENCY_MULTIPLIER * latency);
275 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
276 latency * LATENCY_MULTIPLIER));
278 if (!have_governor_per_policy()) {
279 if (WARN_ON(cpufreq_get_global_kobject())) {
283 cdata->gdbs_data = dbs_data;
286 ret = sysfs_create_group(get_governor_parent_kobj(policy),
287 get_sysfs_attr(dbs_data));
291 policy->governor_data = dbs_data;
296 if (!have_governor_per_policy()) {
297 cdata->gdbs_data = NULL;
298 cpufreq_put_global_kobject();
301 cdata->exit(dbs_data, !policy->governor->initialized);
307 static void cpufreq_governor_exit(struct cpufreq_policy *policy,
308 struct dbs_data *dbs_data)
310 struct common_dbs_data *cdata = dbs_data->cdata;
312 policy->governor_data = NULL;
313 if (!--dbs_data->usage_count) {
314 sysfs_remove_group(get_governor_parent_kobj(policy),
315 get_sysfs_attr(dbs_data));
317 if (!have_governor_per_policy()) {
318 cdata->gdbs_data = NULL;
319 cpufreq_put_global_kobject();
322 cdata->exit(dbs_data, policy->governor->initialized == 1);
327 static int cpufreq_governor_start(struct cpufreq_policy *policy,
328 struct dbs_data *dbs_data)
330 struct common_dbs_data *cdata = dbs_data->cdata;
331 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
332 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
338 if (cdata->governor == GOV_CONSERVATIVE) {
339 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
341 sampling_rate = cs_tuners->sampling_rate;
342 ignore_nice = cs_tuners->ignore_nice_load;
344 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
346 sampling_rate = od_tuners->sampling_rate;
347 ignore_nice = od_tuners->ignore_nice_load;
348 io_busy = od_tuners->io_is_busy;
351 for_each_cpu(j, policy->cpus) {
352 struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
353 unsigned int prev_load;
355 j_cdbs->cur_policy = policy;
356 j_cdbs->prev_cpu_idle =
357 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
359 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
360 j_cdbs->prev_cpu_idle);
361 j_cdbs->prev_load = 100 * prev_load /
362 (unsigned int)j_cdbs->prev_cpu_wall;
365 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
367 mutex_init(&j_cdbs->timer_mutex);
368 INIT_DEFERRABLE_WORK(&j_cdbs->dwork, cdata->gov_dbs_timer);
371 if (cdata->governor == GOV_CONSERVATIVE) {
372 struct cs_cpu_dbs_info_s *cs_dbs_info =
373 cdata->get_cpu_dbs_info_s(cpu);
375 cs_dbs_info->down_skip = 0;
376 cs_dbs_info->enable = 1;
377 cs_dbs_info->requested_freq = policy->cur;
379 struct od_ops *od_ops = cdata->gov_ops;
380 struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
382 od_dbs_info->rate_mult = 1;
383 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
384 od_ops->powersave_bias_init_cpu(cpu);
387 /* Initiate timer time stamp */
388 cdbs->time_stamp = ktime_get();
390 gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
395 static void cpufreq_governor_stop(struct cpufreq_policy *policy,
396 struct dbs_data *dbs_data)
398 struct common_dbs_data *cdata = dbs_data->cdata;
399 unsigned int cpu = policy->cpu;
400 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
402 if (cdata->governor == GOV_CONSERVATIVE) {
403 struct cs_cpu_dbs_info_s *cs_dbs_info =
404 cdata->get_cpu_dbs_info_s(cpu);
406 cs_dbs_info->enable = 0;
409 gov_cancel_work(dbs_data, policy);
411 mutex_destroy(&cdbs->timer_mutex);
412 cdbs->cur_policy = NULL;
415 static void cpufreq_governor_limits(struct cpufreq_policy *policy,
416 struct dbs_data *dbs_data)
418 struct common_dbs_data *cdata = dbs_data->cdata;
419 unsigned int cpu = policy->cpu;
420 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
422 if (!cdbs->cur_policy)
425 mutex_lock(&cdbs->timer_mutex);
426 if (policy->max < cdbs->cur_policy->cur)
427 __cpufreq_driver_target(cdbs->cur_policy, policy->max,
429 else if (policy->min > cdbs->cur_policy->cur)
430 __cpufreq_driver_target(cdbs->cur_policy, policy->min,
432 dbs_check_cpu(dbs_data, cpu);
433 mutex_unlock(&cdbs->timer_mutex);
436 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
437 struct common_dbs_data *cdata, unsigned int event)
439 struct dbs_data *dbs_data;
442 /* Lock governor to block concurrent initialization of governor */
443 mutex_lock(&cdata->mutex);
445 if (have_governor_per_policy())
446 dbs_data = policy->governor_data;
448 dbs_data = cdata->gdbs_data;
450 if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
456 case CPUFREQ_GOV_POLICY_INIT:
457 ret = cpufreq_governor_init(policy, dbs_data, cdata);
459 case CPUFREQ_GOV_POLICY_EXIT:
460 cpufreq_governor_exit(policy, dbs_data);
462 case CPUFREQ_GOV_START:
463 ret = cpufreq_governor_start(policy, dbs_data);
465 case CPUFREQ_GOV_STOP:
466 cpufreq_governor_stop(policy, dbs_data);
468 case CPUFREQ_GOV_LIMITS:
469 cpufreq_governor_limits(policy, dbs_data);
474 mutex_unlock(&cdata->mutex);
478 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);