2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/cpufreq_times.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/suspend.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/tick.h>
34 #include <linux/sched.h>
36 #include <trace/events/power.h>
38 static LIST_HEAD(cpufreq_policy_list);
40 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
42 return cpumask_empty(policy->cpus);
45 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
47 return active == !policy_is_inactive(policy);
50 /* Finds Next Acive/Inactive policy */
51 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
55 policy = list_next_entry(policy, policy_list);
57 /* No more policies in the list */
58 if (&policy->policy_list == &cpufreq_policy_list)
60 } while (!suitable_policy(policy, active));
65 static struct cpufreq_policy *first_policy(bool active)
67 struct cpufreq_policy *policy;
69 /* No policies in the list */
70 if (list_empty(&cpufreq_policy_list))
73 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
76 if (!suitable_policy(policy, active))
77 policy = next_policy(policy, active);
82 /* Macros to iterate over CPU policies */
83 #define for_each_suitable_policy(__policy, __active) \
84 for (__policy = first_policy(__active); \
86 __policy = next_policy(__policy, __active))
88 #define for_each_active_policy(__policy) \
89 for_each_suitable_policy(__policy, true)
90 #define for_each_inactive_policy(__policy) \
91 for_each_suitable_policy(__policy, false)
93 #define for_each_policy(__policy) \
94 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
96 /* Iterate over governors */
97 static LIST_HEAD(cpufreq_governor_list);
98 #define for_each_governor(__governor) \
99 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
102 * The "cpufreq driver" - the arch- or hardware-dependent low
103 * level driver of CPUFreq support, and its spinlock. This lock
104 * also protects the cpufreq_cpu_data array.
106 static struct cpufreq_driver *cpufreq_driver;
107 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
108 static DEFINE_RWLOCK(cpufreq_driver_lock);
109 DEFINE_MUTEX(cpufreq_governor_lock);
111 /* Flag to suspend/resume CPUFreq governors */
112 static bool cpufreq_suspended;
114 static inline bool has_target(void)
116 return cpufreq_driver->target_index || cpufreq_driver->target;
119 /* internal prototypes */
120 static int __cpufreq_governor(struct cpufreq_policy *policy,
122 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
123 static void handle_update(struct work_struct *work);
126 * Two notifier lists: the "policy" list is involved in the
127 * validation process for a new CPU frequency policy; the
128 * "transition" list for kernel code that needs to handle
129 * changes to devices when the CPU clock speed changes.
130 * The mutex locks both lists.
132 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
133 static struct srcu_notifier_head cpufreq_transition_notifier_list;
135 static bool init_cpufreq_transition_notifier_list_called;
136 static int __init init_cpufreq_transition_notifier_list(void)
138 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
139 init_cpufreq_transition_notifier_list_called = true;
142 pure_initcall(init_cpufreq_transition_notifier_list);
144 static int off __read_mostly;
145 static int cpufreq_disabled(void)
149 void disable_cpufreq(void)
153 static DEFINE_MUTEX(cpufreq_governor_mutex);
155 bool have_governor_per_policy(void)
157 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
159 EXPORT_SYMBOL_GPL(have_governor_per_policy);
161 bool cpufreq_driver_is_slow(void)
163 return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
165 EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
167 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
169 if (have_governor_per_policy())
170 return &policy->kobj;
172 return cpufreq_global_kobject;
174 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
176 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
178 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
180 return policy && !policy_is_inactive(policy) ?
181 policy->freq_table : NULL;
183 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
185 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
191 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
193 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
194 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
195 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
196 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
197 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
198 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
200 idle_time = cur_wall_time - busy_time;
202 *wall = cputime_to_usecs(cur_wall_time);
204 return cputime_to_usecs(idle_time);
207 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
209 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
211 if (idle_time == -1ULL)
212 return get_cpu_idle_time_jiffy(cpu, wall);
214 idle_time += get_cpu_iowait_time_us(cpu, wall);
218 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
221 * This is a generic cpufreq init() routine which can be used by cpufreq
222 * drivers of SMP systems. It will do following:
223 * - validate & show freq table passed
224 * - set policies transition latency
225 * - policy->cpus with all possible CPUs
227 int cpufreq_generic_init(struct cpufreq_policy *policy,
228 struct cpufreq_frequency_table *table,
229 unsigned int transition_latency)
233 ret = cpufreq_table_validate_and_show(policy, table);
235 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
239 policy->cpuinfo.transition_latency = transition_latency;
242 * The driver only supports the SMP configuration where all processors
243 * share the clock and voltage and clock.
245 cpumask_setall(policy->cpus);
249 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
251 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
253 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
255 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
257 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
259 unsigned int cpufreq_generic_get(unsigned int cpu)
261 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
263 if (!policy || IS_ERR(policy->clk)) {
264 pr_err("%s: No %s associated to cpu: %d\n",
265 __func__, policy ? "clk" : "policy", cpu);
269 return clk_get_rate(policy->clk) / 1000;
271 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
274 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
276 * @cpu: cpu to find policy for.
278 * This returns policy for 'cpu', returns NULL if it doesn't exist.
279 * It also increments the kobject reference count to mark it busy and so would
280 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
281 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
282 * freed as that depends on the kobj count.
284 * Return: A valid policy on success, otherwise NULL on failure.
286 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
288 struct cpufreq_policy *policy = NULL;
291 if (WARN_ON(cpu >= nr_cpu_ids))
294 /* get the cpufreq driver */
295 read_lock_irqsave(&cpufreq_driver_lock, flags);
297 if (cpufreq_driver) {
299 policy = cpufreq_cpu_get_raw(cpu);
301 kobject_get(&policy->kobj);
304 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
308 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
311 * cpufreq_cpu_put: Decrements the usage count of a policy
313 * @policy: policy earlier returned by cpufreq_cpu_get().
315 * This decrements the kobject reference count incremented earlier by calling
318 void cpufreq_cpu_put(struct cpufreq_policy *policy)
320 kobject_put(&policy->kobj);
322 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
324 /*********************************************************************
325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
333 * systems as each CPU might be scaled differently. So, use the arch
334 * per-CPU loops_per_jiffy value wherever possible.
336 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
342 if (ci->flags & CPUFREQ_CONST_LOOPS)
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
360 /*********************************************************************
361 * FREQUENCY INVARIANT CPU CAPACITY *
362 *********************************************************************/
364 static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
365 static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
368 scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
370 unsigned long cur = freqs ? freqs->new : policy->cur;
371 unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
372 struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
375 pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
376 cpumask_pr_args(policy->cpus), cur, policy->max, scale);
378 for_each_cpu(cpu, policy->cpus)
379 per_cpu(freq_scale, cpu) = scale;
384 scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
386 pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
387 cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
390 for_each_cpu(cpu, policy->cpus)
391 per_cpu(max_freq_scale, cpu) = scale;
394 unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
396 return per_cpu(freq_scale, cpu);
399 unsigned long cpufreq_scale_max_freq_capacity(int cpu)
401 return per_cpu(max_freq_scale, cpu);
404 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
405 struct cpufreq_freqs *freqs, unsigned int state)
407 BUG_ON(irqs_disabled());
409 if (cpufreq_disabled())
412 freqs->flags = cpufreq_driver->flags;
413 pr_debug("notification %u of frequency transition to %u kHz\n",
418 case CPUFREQ_PRECHANGE:
419 /* detect if the driver reported a value as "old frequency"
420 * which is not equal to what the cpufreq core thinks is
423 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
424 if ((policy) && (policy->cpu == freqs->cpu) &&
425 (policy->cur) && (policy->cur != freqs->old)) {
426 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
427 freqs->old, policy->cur);
428 freqs->old = policy->cur;
431 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
432 CPUFREQ_PRECHANGE, freqs);
433 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
436 case CPUFREQ_POSTCHANGE:
437 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
438 pr_debug("FREQ: %lu - CPU: %lu\n",
439 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
440 trace_cpu_frequency(freqs->new, freqs->cpu);
441 cpufreq_times_record_transition(freqs);
442 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
443 CPUFREQ_POSTCHANGE, freqs);
444 if (likely(policy) && likely(policy->cpu == freqs->cpu))
445 policy->cur = freqs->new;
451 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
452 * on frequency transition.
454 * This function calls the transition notifiers and the "adjust_jiffies"
455 * function. It is called twice on all CPU frequency changes that have
458 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
459 struct cpufreq_freqs *freqs, unsigned int state)
461 for_each_cpu(freqs->cpu, policy->cpus)
462 __cpufreq_notify_transition(policy, freqs, state);
465 /* Do post notifications when there are chances that transition has failed */
466 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
467 struct cpufreq_freqs *freqs, int transition_failed)
469 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
470 if (!transition_failed)
473 swap(freqs->old, freqs->new);
474 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
475 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
478 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
479 struct cpufreq_freqs *freqs)
486 * Catch double invocations of _begin() which lead to self-deadlock.
487 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
488 * doesn't invoke _begin() on their behalf, and hence the chances of
489 * double invocations are very low. Moreover, there are scenarios
490 * where these checks can emit false-positive warnings in these
491 * drivers; so we avoid that by skipping them altogether.
493 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
494 && current == policy->transition_task);
497 wait_event(policy->transition_wait, !policy->transition_ongoing);
499 spin_lock(&policy->transition_lock);
501 if (unlikely(policy->transition_ongoing)) {
502 spin_unlock(&policy->transition_lock);
506 policy->transition_ongoing = true;
507 policy->transition_task = current;
509 spin_unlock(&policy->transition_lock);
511 scale_freq_capacity(policy, freqs);
513 for_each_cpu(cpu, policy->cpus)
514 trace_cpu_capacity(capacity_curr_of(cpu), cpu);
517 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
519 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
521 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
522 struct cpufreq_freqs *freqs, int transition_failed)
524 if (unlikely(WARN_ON(!policy->transition_ongoing)))
527 cpufreq_notify_post_transition(policy, freqs, transition_failed);
529 policy->transition_ongoing = false;
530 policy->transition_task = NULL;
532 wake_up(&policy->transition_wait);
534 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
537 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
539 * @target_freq: target frequency to resolve.
541 * The target to driver frequency mapping is cached in the policy.
543 * Return: Lowest driver-supported frequency greater than or equal to the
544 * given target_freq, subject to policy (min/max) and driver limitations.
546 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
547 unsigned int target_freq)
549 target_freq = clamp_val(target_freq, policy->min, policy->max);
550 policy->cached_target_freq = target_freq;
552 if (cpufreq_driver->target_index) {
555 rv = cpufreq_frequency_table_target(policy, policy->freq_table,
561 policy->cached_resolved_idx = idx;
562 return policy->freq_table[idx].frequency;
567 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
569 /*********************************************************************
571 *********************************************************************/
572 static ssize_t show_boost(struct kobject *kobj,
573 struct attribute *attr, char *buf)
575 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
578 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
579 const char *buf, size_t count)
583 ret = sscanf(buf, "%d", &enable);
584 if (ret != 1 || enable < 0 || enable > 1)
587 if (cpufreq_boost_trigger_state(enable)) {
588 pr_err("%s: Cannot %s BOOST!\n",
589 __func__, enable ? "enable" : "disable");
593 pr_debug("%s: cpufreq BOOST %s\n",
594 __func__, enable ? "enabled" : "disabled");
598 define_one_global_rw(boost);
600 static struct cpufreq_governor *find_governor(const char *str_governor)
602 struct cpufreq_governor *t;
605 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
612 * cpufreq_parse_governor - parse a governor string
614 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
615 struct cpufreq_governor **governor)
619 if (cpufreq_driver->setpolicy) {
620 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
621 *policy = CPUFREQ_POLICY_PERFORMANCE;
623 } else if (!strncasecmp(str_governor, "powersave",
625 *policy = CPUFREQ_POLICY_POWERSAVE;
629 struct cpufreq_governor *t;
631 mutex_lock(&cpufreq_governor_mutex);
633 t = find_governor(str_governor);
638 mutex_unlock(&cpufreq_governor_mutex);
639 ret = request_module("cpufreq_%s", str_governor);
640 mutex_lock(&cpufreq_governor_mutex);
643 t = find_governor(str_governor);
651 mutex_unlock(&cpufreq_governor_mutex);
657 * cpufreq_per_cpu_attr_read() / show_##file_name() -
658 * print out cpufreq information
660 * Write out information from cpufreq_driver->policy[cpu]; object must be
664 #define show_one(file_name, object) \
665 static ssize_t show_##file_name \
666 (struct cpufreq_policy *policy, char *buf) \
668 return sprintf(buf, "%u\n", policy->object); \
671 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
672 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
673 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
674 show_one(scaling_min_freq, min);
675 show_one(scaling_max_freq, max);
677 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
681 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
682 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
684 ret = sprintf(buf, "%u\n", policy->cur);
688 static int cpufreq_set_policy(struct cpufreq_policy *policy,
689 struct cpufreq_policy *new_policy);
692 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
694 #define store_one(file_name, object) \
695 static ssize_t store_##file_name \
696 (struct cpufreq_policy *policy, const char *buf, size_t count) \
699 struct cpufreq_policy new_policy; \
701 memcpy(&new_policy, policy, sizeof(*policy)); \
702 new_policy.min = policy->user_policy.min; \
703 new_policy.max = policy->user_policy.max; \
705 ret = sscanf(buf, "%u", &new_policy.object); \
709 temp = new_policy.object; \
710 ret = cpufreq_set_policy(policy, &new_policy); \
712 policy->user_policy.object = temp; \
714 return ret ? ret : count; \
717 store_one(scaling_min_freq, min);
718 store_one(scaling_max_freq, max);
721 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
723 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
726 unsigned int cur_freq = __cpufreq_get(policy);
729 return sprintf(buf, "%u\n", cur_freq);
731 return sprintf(buf, "<unknown>\n");
735 * show_scaling_governor - show the current policy for the specified CPU
737 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
739 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
740 return sprintf(buf, "powersave\n");
741 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
742 return sprintf(buf, "performance\n");
743 else if (policy->governor)
744 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
745 policy->governor->name);
750 * store_scaling_governor - store policy for the specified CPU
752 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
753 const char *buf, size_t count)
756 char str_governor[16];
757 struct cpufreq_policy new_policy;
759 memcpy(&new_policy, policy, sizeof(*policy));
761 ret = sscanf(buf, "%15s", str_governor);
765 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
766 &new_policy.governor))
769 ret = cpufreq_set_policy(policy, &new_policy);
770 return ret ? ret : count;
774 * show_scaling_driver - show the cpufreq driver currently loaded
776 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
778 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
782 * show_scaling_available_governors - show the available CPUfreq governors
784 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
788 struct cpufreq_governor *t;
791 i += sprintf(buf, "performance powersave");
795 for_each_governor(t) {
796 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
797 - (CPUFREQ_NAME_LEN + 2)))
799 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
802 i += sprintf(&buf[i], "\n");
806 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
811 for_each_cpu(cpu, mask) {
813 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
814 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
815 if (i >= (PAGE_SIZE - 5))
818 i += sprintf(&buf[i], "\n");
821 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
824 * show_related_cpus - show the CPUs affected by each transition even if
825 * hw coordination is in use
827 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
829 return cpufreq_show_cpus(policy->related_cpus, buf);
833 * show_affected_cpus - show the CPUs affected by each transition
835 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
837 return cpufreq_show_cpus(policy->cpus, buf);
840 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
841 const char *buf, size_t count)
843 unsigned int freq = 0;
846 if (!policy->governor || !policy->governor->store_setspeed)
849 ret = sscanf(buf, "%u", &freq);
853 policy->governor->store_setspeed(policy, freq);
858 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
860 if (!policy->governor || !policy->governor->show_setspeed)
861 return sprintf(buf, "<unsupported>\n");
863 return policy->governor->show_setspeed(policy, buf);
867 * show_bios_limit - show the current cpufreq HW/BIOS limitation
869 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
873 if (cpufreq_driver->bios_limit) {
874 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
876 return sprintf(buf, "%u\n", limit);
878 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
881 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
882 cpufreq_freq_attr_ro(cpuinfo_min_freq);
883 cpufreq_freq_attr_ro(cpuinfo_max_freq);
884 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
885 cpufreq_freq_attr_ro(scaling_available_governors);
886 cpufreq_freq_attr_ro(scaling_driver);
887 cpufreq_freq_attr_ro(scaling_cur_freq);
888 cpufreq_freq_attr_ro(bios_limit);
889 cpufreq_freq_attr_ro(related_cpus);
890 cpufreq_freq_attr_ro(affected_cpus);
891 cpufreq_freq_attr_rw(scaling_min_freq);
892 cpufreq_freq_attr_rw(scaling_max_freq);
893 cpufreq_freq_attr_rw(scaling_governor);
894 cpufreq_freq_attr_rw(scaling_setspeed);
896 static struct attribute *default_attrs[] = {
897 &cpuinfo_min_freq.attr,
898 &cpuinfo_max_freq.attr,
899 &cpuinfo_transition_latency.attr,
900 &scaling_min_freq.attr,
901 &scaling_max_freq.attr,
904 &scaling_governor.attr,
905 &scaling_driver.attr,
906 &scaling_available_governors.attr,
907 &scaling_setspeed.attr,
911 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
912 #define to_attr(a) container_of(a, struct freq_attr, attr)
914 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
916 struct cpufreq_policy *policy = to_policy(kobj);
917 struct freq_attr *fattr = to_attr(attr);
920 down_read(&policy->rwsem);
923 ret = fattr->show(policy, buf);
927 up_read(&policy->rwsem);
932 static ssize_t store(struct kobject *kobj, struct attribute *attr,
933 const char *buf, size_t count)
935 struct cpufreq_policy *policy = to_policy(kobj);
936 struct freq_attr *fattr = to_attr(attr);
937 ssize_t ret = -EINVAL;
941 if (!cpu_online(policy->cpu))
944 down_write(&policy->rwsem);
947 ret = fattr->store(policy, buf, count);
951 up_write(&policy->rwsem);
958 static void cpufreq_sysfs_release(struct kobject *kobj)
960 struct cpufreq_policy *policy = to_policy(kobj);
961 pr_debug("last reference is dropped\n");
962 complete(&policy->kobj_unregister);
965 static const struct sysfs_ops sysfs_ops = {
970 static struct kobj_type ktype_cpufreq = {
971 .sysfs_ops = &sysfs_ops,
972 .default_attrs = default_attrs,
973 .release = cpufreq_sysfs_release,
976 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
978 struct device *cpu_dev;
980 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
985 cpu_dev = get_cpu_device(cpu);
986 if (WARN_ON(!cpu_dev))
989 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
992 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
994 struct device *cpu_dev;
996 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
998 cpu_dev = get_cpu_device(cpu);
999 if (WARN_ON(!cpu_dev))
1002 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1005 /* Add/remove symlinks for all related CPUs */
1006 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1011 /* Some related CPUs might not be present (physically hotplugged) */
1012 for_each_cpu(j, policy->real_cpus) {
1013 ret = add_cpu_dev_symlink(policy, j);
1021 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1025 /* Some related CPUs might not be present (physically hotplugged) */
1026 for_each_cpu(j, policy->real_cpus)
1027 remove_cpu_dev_symlink(policy, j);
1030 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1032 struct freq_attr **drv_attr;
1035 /* set up files for this cpu device */
1036 drv_attr = cpufreq_driver->attr;
1037 while (drv_attr && *drv_attr) {
1038 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1043 if (cpufreq_driver->get) {
1044 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1049 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1053 if (cpufreq_driver->bios_limit) {
1054 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1059 return cpufreq_add_dev_symlink(policy);
1062 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1064 struct cpufreq_governor *gov = NULL;
1065 struct cpufreq_policy new_policy;
1067 memcpy(&new_policy, policy, sizeof(*policy));
1069 /* Update governor of new_policy to the governor used before hotplug */
1070 gov = find_governor(policy->last_governor);
1072 pr_debug("Restoring governor %s for cpu %d\n",
1073 policy->governor->name, policy->cpu);
1075 gov = CPUFREQ_DEFAULT_GOVERNOR;
1077 new_policy.governor = gov;
1079 /* Use the default policy if there is no last_policy. */
1080 if (cpufreq_driver->setpolicy) {
1081 if (policy->last_policy)
1082 new_policy.policy = policy->last_policy;
1084 cpufreq_parse_governor(gov->name, &new_policy.policy,
1087 /* set default policy */
1088 return cpufreq_set_policy(policy, &new_policy);
1091 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1095 /* Has this CPU been taken care of already? */
1096 if (cpumask_test_cpu(cpu, policy->cpus))
1100 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1102 pr_err("%s: Failed to stop governor\n", __func__);
1107 down_write(&policy->rwsem);
1108 cpumask_set_cpu(cpu, policy->cpus);
1109 up_write(&policy->rwsem);
1112 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1114 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1117 pr_err("%s: Failed to start governor\n", __func__);
1125 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1127 struct device *dev = get_cpu_device(cpu);
1128 struct cpufreq_policy *policy;
1133 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1137 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1138 goto err_free_policy;
1140 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1141 goto err_free_cpumask;
1143 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1144 goto err_free_rcpumask;
1146 kobject_init(&policy->kobj, &ktype_cpufreq);
1147 INIT_LIST_HEAD(&policy->policy_list);
1148 init_rwsem(&policy->rwsem);
1149 spin_lock_init(&policy->transition_lock);
1150 init_waitqueue_head(&policy->transition_wait);
1151 init_completion(&policy->kobj_unregister);
1152 INIT_WORK(&policy->update, handle_update);
1158 free_cpumask_var(policy->related_cpus);
1160 free_cpumask_var(policy->cpus);
1167 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1169 struct kobject *kobj;
1170 struct completion *cmp;
1173 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1174 CPUFREQ_REMOVE_POLICY, policy);
1176 down_write(&policy->rwsem);
1177 cpufreq_remove_dev_symlink(policy);
1178 kobj = &policy->kobj;
1179 cmp = &policy->kobj_unregister;
1180 up_write(&policy->rwsem);
1184 * We need to make sure that the underlying kobj is
1185 * actually not referenced anymore by anybody before we
1186 * proceed with unloading.
1188 pr_debug("waiting for dropping of refcount\n");
1189 wait_for_completion(cmp);
1190 pr_debug("wait complete\n");
1193 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1195 unsigned long flags;
1198 /* Remove policy from list */
1199 write_lock_irqsave(&cpufreq_driver_lock, flags);
1200 list_del(&policy->policy_list);
1202 for_each_cpu(cpu, policy->related_cpus)
1203 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1204 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1206 cpufreq_policy_put_kobj(policy, notify);
1207 free_cpumask_var(policy->real_cpus);
1208 free_cpumask_var(policy->related_cpus);
1209 free_cpumask_var(policy->cpus);
1213 static int cpufreq_online(unsigned int cpu)
1215 struct cpufreq_policy *policy;
1217 unsigned long flags;
1221 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1223 /* Check if this CPU already has a policy to manage it */
1224 policy = per_cpu(cpufreq_cpu_data, cpu);
1226 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1227 if (!policy_is_inactive(policy))
1228 return cpufreq_add_policy_cpu(policy, cpu);
1230 /* This is the only online CPU for the policy. Start over. */
1232 down_write(&policy->rwsem);
1234 policy->governor = NULL;
1235 up_write(&policy->rwsem);
1238 policy = cpufreq_policy_alloc(cpu);
1243 cpumask_copy(policy->cpus, cpumask_of(cpu));
1245 /* call driver. From then on the cpufreq must be able
1246 * to accept all calls to ->verify and ->setpolicy for this CPU
1248 ret = cpufreq_driver->init(policy);
1250 pr_debug("initialization failed\n");
1251 goto out_free_policy;
1254 down_write(&policy->rwsem);
1257 /* related_cpus should at least include policy->cpus. */
1258 cpumask_copy(policy->related_cpus, policy->cpus);
1259 /* Remember CPUs present at the policy creation time. */
1260 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1262 /* Name and add the kobject */
1263 ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1265 cpumask_first(policy->related_cpus));
1267 pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1269 goto out_exit_policy;
1274 * affected cpus must always be the one, which are online. We aren't
1275 * managing offline cpus here.
1277 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1280 policy->user_policy.min = policy->min;
1281 policy->user_policy.max = policy->max;
1283 write_lock_irqsave(&cpufreq_driver_lock, flags);
1284 for_each_cpu(j, policy->related_cpus)
1285 per_cpu(cpufreq_cpu_data, j) = policy;
1286 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1288 policy->min = policy->user_policy.min;
1289 policy->max = policy->user_policy.max;
1292 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1293 policy->cur = cpufreq_driver->get(policy->cpu);
1295 pr_err("%s: ->get() failed\n", __func__);
1296 goto out_exit_policy;
1301 * Sometimes boot loaders set CPU frequency to a value outside of
1302 * frequency table present with cpufreq core. In such cases CPU might be
1303 * unstable if it has to run on that frequency for long duration of time
1304 * and so its better to set it to a frequency which is specified in
1305 * freq-table. This also makes cpufreq stats inconsistent as
1306 * cpufreq-stats would fail to register because current frequency of CPU
1307 * isn't found in freq-table.
1309 * Because we don't want this change to effect boot process badly, we go
1310 * for the next freq which is >= policy->cur ('cur' must be set by now,
1311 * otherwise we will end up setting freq to lowest of the table as 'cur'
1312 * is initialized to zero).
1314 * We are passing target-freq as "policy->cur - 1" otherwise
1315 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1316 * equal to target-freq.
1318 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1320 /* Are we running at unknown frequency ? */
1321 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1322 if (ret == -EINVAL) {
1323 /* Warn user and fix it */
1324 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1325 __func__, policy->cpu, policy->cur);
1326 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1327 CPUFREQ_RELATION_L);
1330 * Reaching here after boot in a few seconds may not
1331 * mean that system will remain stable at "unknown"
1332 * frequency for longer duration. Hence, a BUG_ON().
1335 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1336 __func__, policy->cpu, policy->cur);
1340 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1341 CPUFREQ_START, policy);
1344 ret = cpufreq_add_dev_interface(policy);
1346 goto out_exit_policy;
1347 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1348 CPUFREQ_CREATE_POLICY, policy);
1349 cpufreq_times_create_policy(policy);
1351 write_lock_irqsave(&cpufreq_driver_lock, flags);
1352 list_add(&policy->policy_list, &cpufreq_policy_list);
1353 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1356 ret = cpufreq_init_policy(policy);
1358 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1359 __func__, cpu, ret);
1360 /* cpufreq_policy_free() will notify based on this */
1362 goto out_exit_policy;
1365 up_write(&policy->rwsem);
1367 kobject_uevent(&policy->kobj, KOBJ_ADD);
1369 /* Callback for handling stuff after policy is ready */
1370 if (cpufreq_driver->ready)
1371 cpufreq_driver->ready(policy);
1373 pr_debug("initialization complete\n");
1378 up_write(&policy->rwsem);
1380 if (cpufreq_driver->exit)
1381 cpufreq_driver->exit(policy);
1383 cpufreq_policy_free(policy, !new_policy);
1388 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1390 * @sif: Subsystem interface structure pointer (not used)
1392 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1394 unsigned cpu = dev->id;
1397 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1399 if (cpu_online(cpu)) {
1400 ret = cpufreq_online(cpu);
1403 * A hotplug notifier will follow and we will handle it as CPU
1404 * online then. For now, just create the sysfs link, unless
1405 * there is no policy or the link is already present.
1407 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1409 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1410 ? add_cpu_dev_symlink(policy, cpu) : 0;
1416 static void cpufreq_offline_prepare(unsigned int cpu)
1418 struct cpufreq_policy *policy;
1420 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1422 policy = cpufreq_cpu_get_raw(cpu);
1424 pr_debug("%s: No cpu_data found\n", __func__);
1429 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1431 pr_err("%s: Failed to stop governor\n", __func__);
1434 down_write(&policy->rwsem);
1435 cpumask_clear_cpu(cpu, policy->cpus);
1437 if (policy_is_inactive(policy)) {
1439 strncpy(policy->last_governor, policy->governor->name,
1442 policy->last_policy = policy->policy;
1443 } else if (cpu == policy->cpu) {
1444 /* Nominate new CPU */
1445 policy->cpu = cpumask_any(policy->cpus);
1447 up_write(&policy->rwsem);
1449 /* Start governor again for active policy */
1450 if (!policy_is_inactive(policy)) {
1452 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1454 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1457 pr_err("%s: Failed to start governor\n", __func__);
1459 } else if (cpufreq_driver->stop_cpu) {
1460 cpufreq_driver->stop_cpu(policy);
1464 static void cpufreq_offline_finish(unsigned int cpu)
1466 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1469 pr_debug("%s: No cpu_data found\n", __func__);
1473 /* Only proceed for inactive policies */
1474 if (!policy_is_inactive(policy))
1477 /* If cpu is last user of policy, free policy */
1479 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1481 pr_err("%s: Failed to exit governor\n", __func__);
1485 * Perform the ->exit() even during light-weight tear-down,
1486 * since this is a core component, and is essential for the
1487 * subsequent light-weight ->init() to succeed.
1489 if (cpufreq_driver->exit) {
1490 cpufreq_driver->exit(policy);
1491 policy->freq_table = NULL;
1496 * cpufreq_remove_dev - remove a CPU device
1498 * Removes the cpufreq interface for a CPU device.
1500 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1502 unsigned int cpu = dev->id;
1503 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1508 if (cpu_online(cpu)) {
1509 cpufreq_offline_prepare(cpu);
1510 cpufreq_offline_finish(cpu);
1513 cpumask_clear_cpu(cpu, policy->real_cpus);
1514 remove_cpu_dev_symlink(policy, cpu);
1516 if (cpumask_empty(policy->real_cpus))
1517 cpufreq_policy_free(policy, true);
1520 static void handle_update(struct work_struct *work)
1522 struct cpufreq_policy *policy =
1523 container_of(work, struct cpufreq_policy, update);
1524 unsigned int cpu = policy->cpu;
1525 pr_debug("handle_update for cpu %u called\n", cpu);
1526 cpufreq_update_policy(cpu);
1530 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1532 * @policy: policy managing CPUs
1533 * @new_freq: CPU frequency the CPU actually runs at
1535 * We adjust to current frequency first, and need to clean up later.
1536 * So either call to cpufreq_update_policy() or schedule handle_update()).
1538 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1539 unsigned int new_freq)
1541 struct cpufreq_freqs freqs;
1543 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1544 policy->cur, new_freq);
1546 freqs.old = policy->cur;
1547 freqs.new = new_freq;
1549 cpufreq_freq_transition_begin(policy, &freqs);
1550 cpufreq_freq_transition_end(policy, &freqs, 0);
1554 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1557 * This is the last known freq, without actually getting it from the driver.
1558 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1560 unsigned int cpufreq_quick_get(unsigned int cpu)
1562 struct cpufreq_policy *policy;
1563 unsigned int ret_freq = 0;
1565 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1566 return cpufreq_driver->get(cpu);
1568 policy = cpufreq_cpu_get(cpu);
1570 ret_freq = policy->cur;
1571 cpufreq_cpu_put(policy);
1576 EXPORT_SYMBOL(cpufreq_quick_get);
1579 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1582 * Just return the max possible frequency for a given CPU.
1584 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1586 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1587 unsigned int ret_freq = 0;
1590 ret_freq = policy->max;
1591 cpufreq_cpu_put(policy);
1596 EXPORT_SYMBOL(cpufreq_quick_get_max);
1598 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1600 unsigned int ret_freq = 0;
1602 if (!cpufreq_driver->get)
1605 ret_freq = cpufreq_driver->get(policy->cpu);
1607 /* Updating inactive policies is invalid, so avoid doing that. */
1608 if (unlikely(policy_is_inactive(policy)))
1611 if (ret_freq && policy->cur &&
1612 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1613 /* verify no discrepancy between actual and
1614 saved value exists */
1615 if (unlikely(ret_freq != policy->cur)) {
1616 cpufreq_out_of_sync(policy, ret_freq);
1617 schedule_work(&policy->update);
1625 * cpufreq_get - get the current CPU frequency (in kHz)
1628 * Get the CPU current (static) CPU frequency
1630 unsigned int cpufreq_get(unsigned int cpu)
1632 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1633 unsigned int ret_freq = 0;
1636 down_read(&policy->rwsem);
1637 ret_freq = __cpufreq_get(policy);
1638 up_read(&policy->rwsem);
1640 cpufreq_cpu_put(policy);
1645 EXPORT_SYMBOL(cpufreq_get);
1647 static struct subsys_interface cpufreq_interface = {
1649 .subsys = &cpu_subsys,
1650 .add_dev = cpufreq_add_dev,
1651 .remove_dev = cpufreq_remove_dev,
1655 * In case platform wants some specific frequency to be configured
1658 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1662 if (!policy->suspend_freq) {
1663 pr_debug("%s: suspend_freq not defined\n", __func__);
1667 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1668 policy->suspend_freq);
1670 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1671 CPUFREQ_RELATION_H);
1673 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1674 __func__, policy->suspend_freq, ret);
1678 EXPORT_SYMBOL(cpufreq_generic_suspend);
1681 * cpufreq_suspend() - Suspend CPUFreq governors
1683 * Called during system wide Suspend/Hibernate cycles for suspending governors
1684 * as some platforms can't change frequency after this point in suspend cycle.
1685 * Because some of the devices (like: i2c, regulators, etc) they use for
1686 * changing frequency are suspended quickly after this point.
1688 void cpufreq_suspend(void)
1690 struct cpufreq_policy *policy;
1692 if (!cpufreq_driver)
1698 pr_debug("%s: Suspending Governors\n", __func__);
1700 for_each_active_policy(policy) {
1701 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1702 pr_err("%s: Failed to stop governor for policy: %p\n",
1704 else if (cpufreq_driver->suspend
1705 && cpufreq_driver->suspend(policy))
1706 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1711 cpufreq_suspended = true;
1715 * cpufreq_resume() - Resume CPUFreq governors
1717 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1718 * are suspended with cpufreq_suspend().
1720 void cpufreq_resume(void)
1722 struct cpufreq_policy *policy;
1724 if (!cpufreq_driver)
1727 cpufreq_suspended = false;
1732 pr_debug("%s: Resuming Governors\n", __func__);
1734 for_each_active_policy(policy) {
1735 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1736 pr_err("%s: Failed to resume driver: %p\n", __func__,
1738 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1739 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1740 pr_err("%s: Failed to start governor for policy: %p\n",
1745 * schedule call cpufreq_update_policy() for first-online CPU, as that
1746 * wouldn't be hotplugged-out on suspend. It will verify that the
1747 * current freq is in sync with what we believe it to be.
1749 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1750 if (WARN_ON(!policy))
1753 schedule_work(&policy->update);
1757 * cpufreq_get_current_driver - return current driver's name
1759 * Return the name string of the currently loaded cpufreq driver
1762 const char *cpufreq_get_current_driver(void)
1765 return cpufreq_driver->name;
1769 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1772 * cpufreq_get_driver_data - return current driver data
1774 * Return the private data of the currently loaded cpufreq
1775 * driver, or NULL if no cpufreq driver is loaded.
1777 void *cpufreq_get_driver_data(void)
1780 return cpufreq_driver->driver_data;
1784 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1786 /*********************************************************************
1787 * NOTIFIER LISTS INTERFACE *
1788 *********************************************************************/
1791 * cpufreq_register_notifier - register a driver with cpufreq
1792 * @nb: notifier function to register
1793 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1795 * Add a driver to one of two lists: either a list of drivers that
1796 * are notified about clock rate changes (once before and once after
1797 * the transition), or a list of drivers that are notified about
1798 * changes in cpufreq policy.
1800 * This function may sleep, and has the same return conditions as
1801 * blocking_notifier_chain_register.
1803 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1807 if (cpufreq_disabled())
1810 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1813 case CPUFREQ_TRANSITION_NOTIFIER:
1814 ret = srcu_notifier_chain_register(
1815 &cpufreq_transition_notifier_list, nb);
1817 case CPUFREQ_POLICY_NOTIFIER:
1818 ret = blocking_notifier_chain_register(
1819 &cpufreq_policy_notifier_list, nb);
1827 EXPORT_SYMBOL(cpufreq_register_notifier);
1830 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1831 * @nb: notifier block to be unregistered
1832 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1834 * Remove a driver from the CPU frequency notifier list.
1836 * This function may sleep, and has the same return conditions as
1837 * blocking_notifier_chain_unregister.
1839 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1843 if (cpufreq_disabled())
1847 case CPUFREQ_TRANSITION_NOTIFIER:
1848 ret = srcu_notifier_chain_unregister(
1849 &cpufreq_transition_notifier_list, nb);
1851 case CPUFREQ_POLICY_NOTIFIER:
1852 ret = blocking_notifier_chain_unregister(
1853 &cpufreq_policy_notifier_list, nb);
1861 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1864 /*********************************************************************
1866 *********************************************************************/
1868 /* Must set freqs->new to intermediate frequency */
1869 static int __target_intermediate(struct cpufreq_policy *policy,
1870 struct cpufreq_freqs *freqs, int index)
1874 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1876 /* We don't need to switch to intermediate freq */
1880 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1881 __func__, policy->cpu, freqs->old, freqs->new);
1883 cpufreq_freq_transition_begin(policy, freqs);
1884 ret = cpufreq_driver->target_intermediate(policy, index);
1885 cpufreq_freq_transition_end(policy, freqs, ret);
1888 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1894 static int __target_index(struct cpufreq_policy *policy,
1895 struct cpufreq_frequency_table *freq_table, int index)
1897 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1898 unsigned int intermediate_freq = 0;
1899 int retval = -EINVAL;
1902 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1904 /* Handle switching to intermediate frequency */
1905 if (cpufreq_driver->get_intermediate) {
1906 retval = __target_intermediate(policy, &freqs, index);
1910 intermediate_freq = freqs.new;
1911 /* Set old freq to intermediate */
1912 if (intermediate_freq)
1913 freqs.old = freqs.new;
1916 freqs.new = freq_table[index].frequency;
1917 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1918 __func__, policy->cpu, freqs.old, freqs.new);
1920 cpufreq_freq_transition_begin(policy, &freqs);
1923 retval = cpufreq_driver->target_index(policy, index);
1925 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1929 cpufreq_freq_transition_end(policy, &freqs, retval);
1932 * Failed after setting to intermediate freq? Driver should have
1933 * reverted back to initial frequency and so should we. Check
1934 * here for intermediate_freq instead of get_intermediate, in
1935 * case we haven't switched to intermediate freq at all.
1937 if (unlikely(retval && intermediate_freq)) {
1938 freqs.old = intermediate_freq;
1939 freqs.new = policy->restore_freq;
1940 cpufreq_freq_transition_begin(policy, &freqs);
1941 cpufreq_freq_transition_end(policy, &freqs, 0);
1948 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1949 unsigned int target_freq,
1950 unsigned int relation)
1952 unsigned int old_target_freq = target_freq;
1953 int retval = -EINVAL;
1955 if (cpufreq_disabled())
1958 /* Make sure that target_freq is within supported range */
1959 if (target_freq > policy->max)
1960 target_freq = policy->max;
1961 if (target_freq < policy->min)
1962 target_freq = policy->min;
1964 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1965 policy->cpu, target_freq, relation, old_target_freq);
1968 * This might look like a redundant call as we are checking it again
1969 * after finding index. But it is left intentionally for cases where
1970 * exactly same freq is called again and so we can save on few function
1973 if (target_freq == policy->cur)
1976 /* Save last value to restore later on errors */
1977 policy->restore_freq = policy->cur;
1979 if (cpufreq_driver->target)
1980 retval = cpufreq_driver->target(policy, target_freq, relation);
1981 else if (cpufreq_driver->target_index) {
1982 struct cpufreq_frequency_table *freq_table;
1985 freq_table = cpufreq_frequency_get_table(policy->cpu);
1986 if (unlikely(!freq_table)) {
1987 pr_err("%s: Unable to find freq_table\n", __func__);
1991 retval = cpufreq_frequency_table_target(policy, freq_table,
1992 target_freq, relation, &index);
1993 if (unlikely(retval)) {
1994 pr_err("%s: Unable to find matching freq\n", __func__);
1998 if (freq_table[index].frequency == policy->cur) {
2003 retval = __target_index(policy, freq_table, index);
2009 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2011 int cpufreq_driver_target(struct cpufreq_policy *policy,
2012 unsigned int target_freq,
2013 unsigned int relation)
2017 down_write(&policy->rwsem);
2019 ret = __cpufreq_driver_target(policy, target_freq, relation);
2021 up_write(&policy->rwsem);
2025 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2027 static int __cpufreq_governor(struct cpufreq_policy *policy,
2032 /* Only must be defined when default governor is known to have latency
2033 restrictions, like e.g. conservative or ondemand.
2034 That this is the case is already ensured in Kconfig
2036 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2037 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2039 struct cpufreq_governor *gov = NULL;
2042 /* Don't start any governor operations if we are entering suspend */
2043 if (cpufreq_suspended)
2046 * Governor might not be initiated here if ACPI _PPC changed
2047 * notification happened, so check it.
2049 if (!policy->governor)
2052 if (policy->governor->max_transition_latency &&
2053 policy->cpuinfo.transition_latency >
2054 policy->governor->max_transition_latency) {
2058 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2059 policy->governor->name, gov->name);
2060 policy->governor = gov;
2064 if (event == CPUFREQ_GOV_POLICY_INIT)
2065 if (!try_module_get(policy->governor->owner))
2068 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2070 mutex_lock(&cpufreq_governor_lock);
2071 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2072 || (!policy->governor_enabled
2073 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2074 mutex_unlock(&cpufreq_governor_lock);
2078 if (event == CPUFREQ_GOV_STOP)
2079 policy->governor_enabled = false;
2080 else if (event == CPUFREQ_GOV_START)
2081 policy->governor_enabled = true;
2083 mutex_unlock(&cpufreq_governor_lock);
2085 ret = policy->governor->governor(policy, event);
2088 if (event == CPUFREQ_GOV_POLICY_INIT)
2089 policy->governor->initialized++;
2090 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2091 policy->governor->initialized--;
2093 /* Restore original values */
2094 mutex_lock(&cpufreq_governor_lock);
2095 if (event == CPUFREQ_GOV_STOP)
2096 policy->governor_enabled = true;
2097 else if (event == CPUFREQ_GOV_START)
2098 policy->governor_enabled = false;
2099 mutex_unlock(&cpufreq_governor_lock);
2102 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2103 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2104 module_put(policy->governor->owner);
2109 int cpufreq_register_governor(struct cpufreq_governor *governor)
2116 if (cpufreq_disabled())
2119 mutex_lock(&cpufreq_governor_mutex);
2121 governor->initialized = 0;
2123 if (!find_governor(governor->name)) {
2125 list_add(&governor->governor_list, &cpufreq_governor_list);
2128 mutex_unlock(&cpufreq_governor_mutex);
2131 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2133 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2135 struct cpufreq_policy *policy;
2136 unsigned long flags;
2141 if (cpufreq_disabled())
2144 /* clear last_governor for all inactive policies */
2145 read_lock_irqsave(&cpufreq_driver_lock, flags);
2146 for_each_inactive_policy(policy) {
2147 if (!strcmp(policy->last_governor, governor->name)) {
2148 policy->governor = NULL;
2149 strcpy(policy->last_governor, "\0");
2152 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2154 mutex_lock(&cpufreq_governor_mutex);
2155 list_del(&governor->governor_list);
2156 mutex_unlock(&cpufreq_governor_mutex);
2159 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2162 /*********************************************************************
2163 * POLICY INTERFACE *
2164 *********************************************************************/
2167 * cpufreq_get_policy - get the current cpufreq_policy
2168 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2171 * Reads the current cpufreq policy.
2173 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2175 struct cpufreq_policy *cpu_policy;
2179 cpu_policy = cpufreq_cpu_get(cpu);
2183 memcpy(policy, cpu_policy, sizeof(*policy));
2185 cpufreq_cpu_put(cpu_policy);
2188 EXPORT_SYMBOL(cpufreq_get_policy);
2191 * policy : current policy.
2192 * new_policy: policy to be set.
2194 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2195 struct cpufreq_policy *new_policy)
2197 struct cpufreq_governor *old_gov;
2200 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2201 new_policy->cpu, new_policy->min, new_policy->max);
2203 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2206 * This check works well when we store new min/max freq attributes,
2207 * because new_policy is a copy of policy with one field updated.
2209 if (new_policy->min > new_policy->max)
2212 /* verify the cpu speed can be set within this limit */
2213 ret = cpufreq_driver->verify(new_policy);
2217 /* adjust if necessary - all reasons */
2218 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2219 CPUFREQ_ADJUST, new_policy);
2222 * verify the cpu speed can be set within this limit, which might be
2223 * different to the first one
2225 ret = cpufreq_driver->verify(new_policy);
2229 /* notification of the new policy */
2230 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2231 CPUFREQ_NOTIFY, new_policy);
2233 scale_freq_capacity(new_policy, NULL);
2235 policy->min = new_policy->min;
2236 policy->max = new_policy->max;
2237 trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
2239 pr_debug("new min and max freqs are %u - %u kHz\n",
2240 policy->min, policy->max);
2242 if (cpufreq_driver->setpolicy) {
2243 policy->policy = new_policy->policy;
2244 pr_debug("setting range\n");
2245 return cpufreq_driver->setpolicy(new_policy);
2248 if (new_policy->governor == policy->governor)
2251 pr_debug("governor switch\n");
2253 /* save old, working values */
2254 old_gov = policy->governor;
2255 /* end old governor */
2257 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2259 /* This can happen due to race with other operations */
2260 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2261 __func__, old_gov->name, ret);
2265 up_write(&policy->rwsem);
2266 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2267 down_write(&policy->rwsem);
2270 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2271 __func__, old_gov->name, ret);
2276 /* start new governor */
2277 policy->governor = new_policy->governor;
2278 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2280 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2284 up_write(&policy->rwsem);
2285 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2286 down_write(&policy->rwsem);
2289 /* new governor failed, so re-start old one */
2290 pr_debug("starting governor %s failed\n", policy->governor->name);
2292 policy->governor = old_gov;
2293 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2294 policy->governor = NULL;
2296 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2302 pr_debug("governor: change or update limits\n");
2303 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2307 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2308 * @cpu: CPU which shall be re-evaluated
2310 * Useful for policy notifiers which have different necessities
2311 * at different times.
2313 int cpufreq_update_policy(unsigned int cpu)
2315 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2316 struct cpufreq_policy new_policy;
2322 down_write(&policy->rwsem);
2324 pr_debug("updating policy for CPU %u\n", cpu);
2325 memcpy(&new_policy, policy, sizeof(*policy));
2326 new_policy.min = policy->user_policy.min;
2327 new_policy.max = policy->user_policy.max;
2330 * BIOS might change freq behind our back
2331 * -> ask driver for current freq and notify governors about a change
2333 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2334 new_policy.cur = cpufreq_driver->get(cpu);
2335 if (WARN_ON(!new_policy.cur)) {
2341 pr_debug("Driver did not initialize current freq\n");
2342 policy->cur = new_policy.cur;
2344 if (policy->cur != new_policy.cur && has_target())
2345 cpufreq_out_of_sync(policy, new_policy.cur);
2349 ret = cpufreq_set_policy(policy, &new_policy);
2352 up_write(&policy->rwsem);
2354 cpufreq_cpu_put(policy);
2357 EXPORT_SYMBOL(cpufreq_update_policy);
2359 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2360 unsigned long action, void *hcpu)
2362 unsigned int cpu = (unsigned long)hcpu;
2364 switch (action & ~CPU_TASKS_FROZEN) {
2366 cpufreq_online(cpu);
2369 case CPU_DOWN_PREPARE:
2370 cpufreq_offline_prepare(cpu);
2374 cpufreq_offline_finish(cpu);
2377 case CPU_DOWN_FAILED:
2378 cpufreq_online(cpu);
2384 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2385 .notifier_call = cpufreq_cpu_callback,
2388 /*********************************************************************
2390 *********************************************************************/
2391 static int cpufreq_boost_set_sw(int state)
2393 struct cpufreq_frequency_table *freq_table;
2394 struct cpufreq_policy *policy;
2397 for_each_active_policy(policy) {
2398 freq_table = cpufreq_frequency_get_table(policy->cpu);
2400 ret = cpufreq_frequency_table_cpuinfo(policy,
2403 pr_err("%s: Policy frequency update failed\n",
2407 policy->user_policy.max = policy->max;
2408 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2415 int cpufreq_boost_trigger_state(int state)
2417 unsigned long flags;
2420 if (cpufreq_driver->boost_enabled == state)
2423 write_lock_irqsave(&cpufreq_driver_lock, flags);
2424 cpufreq_driver->boost_enabled = state;
2425 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2427 ret = cpufreq_driver->set_boost(state);
2429 write_lock_irqsave(&cpufreq_driver_lock, flags);
2430 cpufreq_driver->boost_enabled = !state;
2431 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2433 pr_err("%s: Cannot %s BOOST\n",
2434 __func__, state ? "enable" : "disable");
2440 int cpufreq_boost_supported(void)
2442 if (likely(cpufreq_driver))
2443 return cpufreq_driver->boost_supported;
2447 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2449 static int create_boost_sysfs_file(void)
2453 if (!cpufreq_boost_supported())
2457 * Check if driver provides function to enable boost -
2458 * if not, use cpufreq_boost_set_sw as default
2460 if (!cpufreq_driver->set_boost)
2461 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2463 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2465 pr_err("%s: cannot register global BOOST sysfs file\n",
2471 static void remove_boost_sysfs_file(void)
2473 if (cpufreq_boost_supported())
2474 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2477 int cpufreq_enable_boost_support(void)
2479 if (!cpufreq_driver)
2482 if (cpufreq_boost_supported())
2485 cpufreq_driver->boost_supported = true;
2487 /* This will get removed on driver unregister */
2488 return create_boost_sysfs_file();
2490 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2492 int cpufreq_boost_enabled(void)
2494 return cpufreq_driver->boost_enabled;
2496 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2498 /*********************************************************************
2499 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2500 *********************************************************************/
2503 * cpufreq_register_driver - register a CPU Frequency driver
2504 * @driver_data: A struct cpufreq_driver containing the values#
2505 * submitted by the CPU Frequency driver.
2507 * Registers a CPU Frequency driver to this core code. This code
2508 * returns zero on success, -EBUSY when another driver got here first
2509 * (and isn't unregistered in the meantime).
2512 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2514 unsigned long flags;
2517 if (cpufreq_disabled())
2520 if (!driver_data || !driver_data->verify || !driver_data->init ||
2521 !(driver_data->setpolicy || driver_data->target_index ||
2522 driver_data->target) ||
2523 (driver_data->setpolicy && (driver_data->target_index ||
2524 driver_data->target)) ||
2525 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2528 pr_debug("trying to register driver %s\n", driver_data->name);
2530 /* Protect against concurrent CPU online/offline. */
2533 write_lock_irqsave(&cpufreq_driver_lock, flags);
2534 if (cpufreq_driver) {
2535 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2539 cpufreq_driver = driver_data;
2540 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2542 if (driver_data->setpolicy)
2543 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2545 ret = create_boost_sysfs_file();
2547 goto err_null_driver;
2549 ret = subsys_interface_register(&cpufreq_interface);
2551 goto err_boost_unreg;
2553 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2554 list_empty(&cpufreq_policy_list)) {
2555 /* if all ->init() calls failed, unregister */
2557 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2562 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2563 pr_debug("driver %s up and running\n", driver_data->name);
2570 subsys_interface_unregister(&cpufreq_interface);
2572 remove_boost_sysfs_file();
2574 write_lock_irqsave(&cpufreq_driver_lock, flags);
2575 cpufreq_driver = NULL;
2576 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2579 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2582 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2584 * Unregister the current CPUFreq driver. Only call this if you have
2585 * the right to do so, i.e. if you have succeeded in initialising before!
2586 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2587 * currently not initialised.
2589 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2591 unsigned long flags;
2593 if (!cpufreq_driver || (driver != cpufreq_driver))
2596 pr_debug("unregistering driver %s\n", driver->name);
2598 /* Protect against concurrent cpu hotplug */
2600 subsys_interface_unregister(&cpufreq_interface);
2601 remove_boost_sysfs_file();
2602 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2604 write_lock_irqsave(&cpufreq_driver_lock, flags);
2606 cpufreq_driver = NULL;
2608 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2613 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2616 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2617 * or mutexes when secondary CPUs are halted.
2619 static struct syscore_ops cpufreq_syscore_ops = {
2620 .shutdown = cpufreq_suspend,
2623 struct kobject *cpufreq_global_kobject;
2624 EXPORT_SYMBOL(cpufreq_global_kobject);
2626 static int __init cpufreq_core_init(void)
2628 if (cpufreq_disabled())
2631 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2632 BUG_ON(!cpufreq_global_kobject);
2634 register_syscore_ops(&cpufreq_syscore_ops);
2638 core_initcall(cpufreq_core_init);