2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/cpufreq_times.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/suspend.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/tick.h>
34 #include <linux/sched.h>
36 #include <trace/events/power.h>
38 static LIST_HEAD(cpufreq_policy_list);
40 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
42 return cpumask_empty(policy->cpus);
45 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
47 return active == !policy_is_inactive(policy);
50 /* Finds Next Acive/Inactive policy */
51 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
55 policy = list_next_entry(policy, policy_list);
57 /* No more policies in the list */
58 if (&policy->policy_list == &cpufreq_policy_list)
60 } while (!suitable_policy(policy, active));
65 static struct cpufreq_policy *first_policy(bool active)
67 struct cpufreq_policy *policy;
69 /* No policies in the list */
70 if (list_empty(&cpufreq_policy_list))
73 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
76 if (!suitable_policy(policy, active))
77 policy = next_policy(policy, active);
82 /* Macros to iterate over CPU policies */
83 #define for_each_suitable_policy(__policy, __active) \
84 for (__policy = first_policy(__active); \
86 __policy = next_policy(__policy, __active))
88 #define for_each_active_policy(__policy) \
89 for_each_suitable_policy(__policy, true)
90 #define for_each_inactive_policy(__policy) \
91 for_each_suitable_policy(__policy, false)
93 #define for_each_policy(__policy) \
94 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
96 /* Iterate over governors */
97 static LIST_HEAD(cpufreq_governor_list);
98 #define for_each_governor(__governor) \
99 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
102 * The "cpufreq driver" - the arch- or hardware-dependent low
103 * level driver of CPUFreq support, and its spinlock. This lock
104 * also protects the cpufreq_cpu_data array.
106 static struct cpufreq_driver *cpufreq_driver;
107 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
108 static DEFINE_RWLOCK(cpufreq_driver_lock);
109 DEFINE_MUTEX(cpufreq_governor_lock);
111 /* Flag to suspend/resume CPUFreq governors */
112 static bool cpufreq_suspended;
114 static inline bool has_target(void)
116 return cpufreq_driver->target_index || cpufreq_driver->target;
119 /* internal prototypes */
120 static int __cpufreq_governor(struct cpufreq_policy *policy,
122 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
123 static void handle_update(struct work_struct *work);
126 * Two notifier lists: the "policy" list is involved in the
127 * validation process for a new CPU frequency policy; the
128 * "transition" list for kernel code that needs to handle
129 * changes to devices when the CPU clock speed changes.
130 * The mutex locks both lists.
132 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
133 static struct srcu_notifier_head cpufreq_transition_notifier_list;
135 static bool init_cpufreq_transition_notifier_list_called;
136 static int __init init_cpufreq_transition_notifier_list(void)
138 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
139 init_cpufreq_transition_notifier_list_called = true;
142 pure_initcall(init_cpufreq_transition_notifier_list);
144 static int off __read_mostly;
145 static int cpufreq_disabled(void)
149 void disable_cpufreq(void)
153 static DEFINE_MUTEX(cpufreq_governor_mutex);
155 bool have_governor_per_policy(void)
157 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
159 EXPORT_SYMBOL_GPL(have_governor_per_policy);
161 bool cpufreq_driver_is_slow(void)
163 return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
165 EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
167 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
169 if (have_governor_per_policy())
170 return &policy->kobj;
172 return cpufreq_global_kobject;
174 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
176 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
178 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
180 return policy && !policy_is_inactive(policy) ?
181 policy->freq_table : NULL;
183 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
185 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
191 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
193 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
194 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
195 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
196 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
197 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
198 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
200 idle_time = cur_wall_time - busy_time;
202 *wall = cputime_to_usecs(cur_wall_time);
204 return cputime_to_usecs(idle_time);
207 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
209 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
211 if (idle_time == -1ULL)
212 return get_cpu_idle_time_jiffy(cpu, wall);
214 idle_time += get_cpu_iowait_time_us(cpu, wall);
218 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
221 * This is a generic cpufreq init() routine which can be used by cpufreq
222 * drivers of SMP systems. It will do following:
223 * - validate & show freq table passed
224 * - set policies transition latency
225 * - policy->cpus with all possible CPUs
227 int cpufreq_generic_init(struct cpufreq_policy *policy,
228 struct cpufreq_frequency_table *table,
229 unsigned int transition_latency)
233 ret = cpufreq_table_validate_and_show(policy, table);
235 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
239 policy->cpuinfo.transition_latency = transition_latency;
242 * The driver only supports the SMP configuration where all processors
243 * share the clock and voltage and clock.
245 cpumask_setall(policy->cpus);
249 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
251 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
253 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
255 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
257 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
259 unsigned int cpufreq_generic_get(unsigned int cpu)
261 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
263 if (!policy || IS_ERR(policy->clk)) {
264 pr_err("%s: No %s associated to cpu: %d\n",
265 __func__, policy ? "clk" : "policy", cpu);
269 return clk_get_rate(policy->clk) / 1000;
271 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
274 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
276 * @cpu: cpu to find policy for.
278 * This returns policy for 'cpu', returns NULL if it doesn't exist.
279 * It also increments the kobject reference count to mark it busy and so would
280 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
281 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
282 * freed as that depends on the kobj count.
284 * Return: A valid policy on success, otherwise NULL on failure.
286 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
288 struct cpufreq_policy *policy = NULL;
291 if (WARN_ON(cpu >= nr_cpu_ids))
294 /* get the cpufreq driver */
295 read_lock_irqsave(&cpufreq_driver_lock, flags);
297 if (cpufreq_driver) {
299 policy = cpufreq_cpu_get_raw(cpu);
301 kobject_get(&policy->kobj);
304 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
308 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
311 * cpufreq_cpu_put: Decrements the usage count of a policy
313 * @policy: policy earlier returned by cpufreq_cpu_get().
315 * This decrements the kobject reference count incremented earlier by calling
318 void cpufreq_cpu_put(struct cpufreq_policy *policy)
320 kobject_put(&policy->kobj);
322 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
324 /*********************************************************************
325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
333 * systems as each CPU might be scaled differently. So, use the arch
334 * per-CPU loops_per_jiffy value wherever possible.
336 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
342 if (ci->flags & CPUFREQ_CONST_LOOPS)
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
360 /*********************************************************************
361 * FREQUENCY INVARIANT CPU CAPACITY *
362 *********************************************************************/
364 static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
365 static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
368 scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
370 unsigned long cur = freqs ? freqs->new : policy->cur;
371 unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
372 struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
375 pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
376 cpumask_pr_args(policy->cpus), cur, policy->max, scale);
378 for_each_cpu(cpu, policy->cpus)
379 per_cpu(freq_scale, cpu) = scale;
384 scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
386 pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
387 cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
390 for_each_cpu(cpu, policy->cpus)
391 per_cpu(max_freq_scale, cpu) = scale;
394 unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
396 return per_cpu(freq_scale, cpu);
399 unsigned long cpufreq_scale_max_freq_capacity(int cpu)
401 return per_cpu(max_freq_scale, cpu);
404 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
405 struct cpufreq_freqs *freqs, unsigned int state)
407 BUG_ON(irqs_disabled());
409 if (cpufreq_disabled())
412 freqs->flags = cpufreq_driver->flags;
413 pr_debug("notification %u of frequency transition to %u kHz\n",
418 case CPUFREQ_PRECHANGE:
419 /* detect if the driver reported a value as "old frequency"
420 * which is not equal to what the cpufreq core thinks is
423 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
424 if ((policy) && (policy->cpu == freqs->cpu) &&
425 (policy->cur) && (policy->cur != freqs->old)) {
426 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
427 freqs->old, policy->cur);
428 freqs->old = policy->cur;
431 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
432 CPUFREQ_PRECHANGE, freqs);
433 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
436 case CPUFREQ_POSTCHANGE:
437 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
438 pr_debug("FREQ: %lu - CPU: %lu\n",
439 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
440 trace_cpu_frequency(freqs->new, freqs->cpu);
441 cpufreq_times_record_transition(freqs);
442 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
443 CPUFREQ_POSTCHANGE, freqs);
444 if (likely(policy) && likely(policy->cpu == freqs->cpu))
445 policy->cur = freqs->new;
451 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
452 * on frequency transition.
454 * This function calls the transition notifiers and the "adjust_jiffies"
455 * function. It is called twice on all CPU frequency changes that have
458 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
459 struct cpufreq_freqs *freqs, unsigned int state)
461 for_each_cpu(freqs->cpu, policy->cpus)
462 __cpufreq_notify_transition(policy, freqs, state);
465 /* Do post notifications when there are chances that transition has failed */
466 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
467 struct cpufreq_freqs *freqs, int transition_failed)
469 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
470 if (!transition_failed)
473 swap(freqs->old, freqs->new);
474 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
475 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
478 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
479 struct cpufreq_freqs *freqs)
486 * Catch double invocations of _begin() which lead to self-deadlock.
487 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
488 * doesn't invoke _begin() on their behalf, and hence the chances of
489 * double invocations are very low. Moreover, there are scenarios
490 * where these checks can emit false-positive warnings in these
491 * drivers; so we avoid that by skipping them altogether.
493 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
494 && current == policy->transition_task);
497 wait_event(policy->transition_wait, !policy->transition_ongoing);
499 spin_lock(&policy->transition_lock);
501 if (unlikely(policy->transition_ongoing)) {
502 spin_unlock(&policy->transition_lock);
506 policy->transition_ongoing = true;
507 policy->transition_task = current;
509 spin_unlock(&policy->transition_lock);
511 scale_freq_capacity(policy, freqs);
513 for_each_cpu(cpu, policy->cpus)
514 trace_cpu_capacity(capacity_curr_of(cpu), cpu);
517 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
519 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
521 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
522 struct cpufreq_freqs *freqs, int transition_failed)
524 if (unlikely(WARN_ON(!policy->transition_ongoing)))
527 cpufreq_notify_post_transition(policy, freqs, transition_failed);
529 policy->transition_ongoing = false;
530 policy->transition_task = NULL;
532 wake_up(&policy->transition_wait);
534 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
537 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
539 * @target_freq: target frequency to resolve.
541 * The target to driver frequency mapping is cached in the policy.
543 * Return: Lowest driver-supported frequency greater than or equal to the
544 * given target_freq, subject to policy (min/max) and driver limitations.
546 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
547 unsigned int target_freq)
549 target_freq = clamp_val(target_freq, policy->min, policy->max);
550 policy->cached_target_freq = target_freq;
552 if (cpufreq_driver->target_index) {
555 rv = cpufreq_frequency_table_target(policy, policy->freq_table,
561 policy->cached_resolved_idx = idx;
562 return policy->freq_table[idx].frequency;
567 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
569 /*********************************************************************
571 *********************************************************************/
572 static ssize_t show_boost(struct kobject *kobj,
573 struct kobj_attribute *attr, char *buf)
575 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
578 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
579 const char *buf, size_t count)
583 ret = sscanf(buf, "%d", &enable);
584 if (ret != 1 || enable < 0 || enable > 1)
587 if (cpufreq_boost_trigger_state(enable)) {
588 pr_err("%s: Cannot %s BOOST!\n",
589 __func__, enable ? "enable" : "disable");
593 pr_debug("%s: cpufreq BOOST %s\n",
594 __func__, enable ? "enabled" : "disabled");
598 define_one_global_rw(boost);
600 static struct cpufreq_governor *find_governor(const char *str_governor)
602 struct cpufreq_governor *t;
605 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
612 * cpufreq_parse_governor - parse a governor string
614 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
615 struct cpufreq_governor **governor)
619 if (cpufreq_driver->setpolicy) {
620 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
621 *policy = CPUFREQ_POLICY_PERFORMANCE;
623 } else if (!strncasecmp(str_governor, "powersave",
625 *policy = CPUFREQ_POLICY_POWERSAVE;
629 struct cpufreq_governor *t;
631 mutex_lock(&cpufreq_governor_mutex);
633 t = find_governor(str_governor);
638 mutex_unlock(&cpufreq_governor_mutex);
639 ret = request_module("cpufreq_%s", str_governor);
640 mutex_lock(&cpufreq_governor_mutex);
643 t = find_governor(str_governor);
651 mutex_unlock(&cpufreq_governor_mutex);
657 * cpufreq_per_cpu_attr_read() / show_##file_name() -
658 * print out cpufreq information
660 * Write out information from cpufreq_driver->policy[cpu]; object must be
664 #define show_one(file_name, object) \
665 static ssize_t show_##file_name \
666 (struct cpufreq_policy *policy, char *buf) \
668 return sprintf(buf, "%u\n", policy->object); \
671 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
672 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
673 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
674 show_one(scaling_min_freq, min);
675 show_one(scaling_max_freq, max);
677 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
681 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
682 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
684 ret = sprintf(buf, "%u\n", policy->cur);
688 static int cpufreq_set_policy(struct cpufreq_policy *policy,
689 struct cpufreq_policy *new_policy);
692 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
694 #define store_one(file_name, object) \
695 static ssize_t store_##file_name \
696 (struct cpufreq_policy *policy, const char *buf, size_t count) \
699 struct cpufreq_policy new_policy; \
701 memcpy(&new_policy, policy, sizeof(*policy)); \
702 new_policy.min = policy->user_policy.min; \
703 new_policy.max = policy->user_policy.max; \
705 ret = sscanf(buf, "%u", &new_policy.object); \
709 temp = new_policy.object; \
710 ret = cpufreq_set_policy(policy, &new_policy); \
712 policy->user_policy.object = temp; \
714 return ret ? ret : count; \
717 store_one(scaling_min_freq, min);
718 store_one(scaling_max_freq, max);
721 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
723 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
726 unsigned int cur_freq = __cpufreq_get(policy);
729 return sprintf(buf, "%u\n", cur_freq);
731 return sprintf(buf, "<unknown>\n");
735 * show_scaling_governor - show the current policy for the specified CPU
737 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
739 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
740 return sprintf(buf, "powersave\n");
741 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
742 return sprintf(buf, "performance\n");
743 else if (policy->governor)
744 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
745 policy->governor->name);
750 * store_scaling_governor - store policy for the specified CPU
752 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
753 const char *buf, size_t count)
756 char str_governor[16];
757 struct cpufreq_policy new_policy;
759 memcpy(&new_policy, policy, sizeof(*policy));
761 ret = sscanf(buf, "%15s", str_governor);
765 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
766 &new_policy.governor))
769 ret = cpufreq_set_policy(policy, &new_policy);
770 return ret ? ret : count;
774 * show_scaling_driver - show the cpufreq driver currently loaded
776 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
778 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
782 * show_scaling_available_governors - show the available CPUfreq governors
784 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
788 struct cpufreq_governor *t;
791 i += sprintf(buf, "performance powersave");
795 for_each_governor(t) {
796 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
797 - (CPUFREQ_NAME_LEN + 2)))
799 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
802 i += sprintf(&buf[i], "\n");
806 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
811 for_each_cpu(cpu, mask) {
813 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
814 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
815 if (i >= (PAGE_SIZE - 5))
818 i += sprintf(&buf[i], "\n");
821 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
824 * show_related_cpus - show the CPUs affected by each transition even if
825 * hw coordination is in use
827 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
829 return cpufreq_show_cpus(policy->related_cpus, buf);
833 * show_affected_cpus - show the CPUs affected by each transition
835 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
837 return cpufreq_show_cpus(policy->cpus, buf);
840 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
841 const char *buf, size_t count)
843 unsigned int freq = 0;
846 if (!policy->governor || !policy->governor->store_setspeed)
849 ret = sscanf(buf, "%u", &freq);
853 policy->governor->store_setspeed(policy, freq);
858 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
860 if (!policy->governor || !policy->governor->show_setspeed)
861 return sprintf(buf, "<unsupported>\n");
863 return policy->governor->show_setspeed(policy, buf);
867 * show_bios_limit - show the current cpufreq HW/BIOS limitation
869 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
873 if (cpufreq_driver->bios_limit) {
874 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
876 return sprintf(buf, "%u\n", limit);
878 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
881 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
882 cpufreq_freq_attr_ro(cpuinfo_min_freq);
883 cpufreq_freq_attr_ro(cpuinfo_max_freq);
884 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
885 cpufreq_freq_attr_ro(scaling_available_governors);
886 cpufreq_freq_attr_ro(scaling_driver);
887 cpufreq_freq_attr_ro(scaling_cur_freq);
888 cpufreq_freq_attr_ro(bios_limit);
889 cpufreq_freq_attr_ro(related_cpus);
890 cpufreq_freq_attr_ro(affected_cpus);
891 cpufreq_freq_attr_rw(scaling_min_freq);
892 cpufreq_freq_attr_rw(scaling_max_freq);
893 cpufreq_freq_attr_rw(scaling_governor);
894 cpufreq_freq_attr_rw(scaling_setspeed);
896 static struct attribute *default_attrs[] = {
897 &cpuinfo_min_freq.attr,
898 &cpuinfo_max_freq.attr,
899 &cpuinfo_transition_latency.attr,
900 &scaling_min_freq.attr,
901 &scaling_max_freq.attr,
904 &scaling_governor.attr,
905 &scaling_driver.attr,
906 &scaling_available_governors.attr,
907 &scaling_setspeed.attr,
911 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
912 #define to_attr(a) container_of(a, struct freq_attr, attr)
914 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
916 struct cpufreq_policy *policy = to_policy(kobj);
917 struct freq_attr *fattr = to_attr(attr);
923 down_read(&policy->rwsem);
926 ret = fattr->show(policy, buf);
930 up_read(&policy->rwsem);
935 static ssize_t store(struct kobject *kobj, struct attribute *attr,
936 const char *buf, size_t count)
938 struct cpufreq_policy *policy = to_policy(kobj);
939 struct freq_attr *fattr = to_attr(attr);
940 ssize_t ret = -EINVAL;
947 if (!cpu_online(policy->cpu))
950 down_write(&policy->rwsem);
953 ret = fattr->store(policy, buf, count);
957 up_write(&policy->rwsem);
964 static void cpufreq_sysfs_release(struct kobject *kobj)
966 struct cpufreq_policy *policy = to_policy(kobj);
967 pr_debug("last reference is dropped\n");
968 complete(&policy->kobj_unregister);
971 static const struct sysfs_ops sysfs_ops = {
976 static struct kobj_type ktype_cpufreq = {
977 .sysfs_ops = &sysfs_ops,
978 .default_attrs = default_attrs,
979 .release = cpufreq_sysfs_release,
982 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
984 struct device *cpu_dev;
986 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
991 cpu_dev = get_cpu_device(cpu);
992 if (WARN_ON(!cpu_dev))
995 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
998 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
1000 struct device *cpu_dev;
1002 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
1004 cpu_dev = get_cpu_device(cpu);
1005 if (WARN_ON(!cpu_dev))
1008 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1011 /* Add/remove symlinks for all related CPUs */
1012 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1017 /* Some related CPUs might not be present (physically hotplugged) */
1018 for_each_cpu(j, policy->real_cpus) {
1019 ret = add_cpu_dev_symlink(policy, j);
1027 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1031 /* Some related CPUs might not be present (physically hotplugged) */
1032 for_each_cpu(j, policy->real_cpus)
1033 remove_cpu_dev_symlink(policy, j);
1036 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1038 struct freq_attr **drv_attr;
1041 /* set up files for this cpu device */
1042 drv_attr = cpufreq_driver->attr;
1043 while (drv_attr && *drv_attr) {
1044 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1049 if (cpufreq_driver->get) {
1050 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1055 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1059 if (cpufreq_driver->bios_limit) {
1060 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1065 return cpufreq_add_dev_symlink(policy);
1068 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1070 struct cpufreq_governor *gov = NULL;
1071 struct cpufreq_policy new_policy;
1073 memcpy(&new_policy, policy, sizeof(*policy));
1075 /* Update governor of new_policy to the governor used before hotplug */
1076 gov = find_governor(policy->last_governor);
1078 pr_debug("Restoring governor %s for cpu %d\n",
1079 policy->governor->name, policy->cpu);
1081 gov = CPUFREQ_DEFAULT_GOVERNOR;
1083 new_policy.governor = gov;
1085 /* Use the default policy if there is no last_policy. */
1086 if (cpufreq_driver->setpolicy) {
1087 if (policy->last_policy)
1088 new_policy.policy = policy->last_policy;
1090 cpufreq_parse_governor(gov->name, &new_policy.policy,
1093 /* set default policy */
1094 return cpufreq_set_policy(policy, &new_policy);
1097 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1101 /* Has this CPU been taken care of already? */
1102 if (cpumask_test_cpu(cpu, policy->cpus))
1106 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1108 pr_err("%s: Failed to stop governor\n", __func__);
1113 down_write(&policy->rwsem);
1114 cpumask_set_cpu(cpu, policy->cpus);
1115 up_write(&policy->rwsem);
1118 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1120 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1123 pr_err("%s: Failed to start governor\n", __func__);
1131 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1133 struct device *dev = get_cpu_device(cpu);
1134 struct cpufreq_policy *policy;
1139 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1143 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1144 goto err_free_policy;
1146 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1147 goto err_free_cpumask;
1149 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1150 goto err_free_rcpumask;
1152 kobject_init(&policy->kobj, &ktype_cpufreq);
1153 INIT_LIST_HEAD(&policy->policy_list);
1154 init_rwsem(&policy->rwsem);
1155 spin_lock_init(&policy->transition_lock);
1156 init_waitqueue_head(&policy->transition_wait);
1157 init_completion(&policy->kobj_unregister);
1158 INIT_WORK(&policy->update, handle_update);
1164 free_cpumask_var(policy->related_cpus);
1166 free_cpumask_var(policy->cpus);
1173 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1175 struct kobject *kobj;
1176 struct completion *cmp;
1179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1180 CPUFREQ_REMOVE_POLICY, policy);
1182 down_write(&policy->rwsem);
1183 cpufreq_remove_dev_symlink(policy);
1184 kobj = &policy->kobj;
1185 cmp = &policy->kobj_unregister;
1186 up_write(&policy->rwsem);
1190 * We need to make sure that the underlying kobj is
1191 * actually not referenced anymore by anybody before we
1192 * proceed with unloading.
1194 pr_debug("waiting for dropping of refcount\n");
1195 wait_for_completion(cmp);
1196 pr_debug("wait complete\n");
1199 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1201 unsigned long flags;
1204 /* Remove policy from list */
1205 write_lock_irqsave(&cpufreq_driver_lock, flags);
1206 list_del(&policy->policy_list);
1208 for_each_cpu(cpu, policy->related_cpus)
1209 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1210 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1212 cpufreq_policy_put_kobj(policy, notify);
1213 free_cpumask_var(policy->real_cpus);
1214 free_cpumask_var(policy->related_cpus);
1215 free_cpumask_var(policy->cpus);
1219 static int cpufreq_online(unsigned int cpu)
1221 struct cpufreq_policy *policy;
1223 unsigned long flags;
1227 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1229 /* Check if this CPU already has a policy to manage it */
1230 policy = per_cpu(cpufreq_cpu_data, cpu);
1232 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1233 if (!policy_is_inactive(policy))
1234 return cpufreq_add_policy_cpu(policy, cpu);
1236 /* This is the only online CPU for the policy. Start over. */
1238 down_write(&policy->rwsem);
1240 policy->governor = NULL;
1241 up_write(&policy->rwsem);
1244 policy = cpufreq_policy_alloc(cpu);
1249 cpumask_copy(policy->cpus, cpumask_of(cpu));
1251 /* call driver. From then on the cpufreq must be able
1252 * to accept all calls to ->verify and ->setpolicy for this CPU
1254 ret = cpufreq_driver->init(policy);
1256 pr_debug("initialization failed\n");
1257 goto out_free_policy;
1260 down_write(&policy->rwsem);
1263 /* related_cpus should at least include policy->cpus. */
1264 cpumask_copy(policy->related_cpus, policy->cpus);
1265 /* Remember CPUs present at the policy creation time. */
1266 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1268 /* Name and add the kobject */
1269 ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1271 cpumask_first(policy->related_cpus));
1273 pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1275 goto out_exit_policy;
1280 * affected cpus must always be the one, which are online. We aren't
1281 * managing offline cpus here.
1283 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1286 policy->user_policy.min = policy->min;
1287 policy->user_policy.max = policy->max;
1289 write_lock_irqsave(&cpufreq_driver_lock, flags);
1290 for_each_cpu(j, policy->related_cpus)
1291 per_cpu(cpufreq_cpu_data, j) = policy;
1292 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1294 policy->min = policy->user_policy.min;
1295 policy->max = policy->user_policy.max;
1298 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1299 policy->cur = cpufreq_driver->get(policy->cpu);
1301 pr_err("%s: ->get() failed\n", __func__);
1302 goto out_exit_policy;
1307 * Sometimes boot loaders set CPU frequency to a value outside of
1308 * frequency table present with cpufreq core. In such cases CPU might be
1309 * unstable if it has to run on that frequency for long duration of time
1310 * and so its better to set it to a frequency which is specified in
1311 * freq-table. This also makes cpufreq stats inconsistent as
1312 * cpufreq-stats would fail to register because current frequency of CPU
1313 * isn't found in freq-table.
1315 * Because we don't want this change to effect boot process badly, we go
1316 * for the next freq which is >= policy->cur ('cur' must be set by now,
1317 * otherwise we will end up setting freq to lowest of the table as 'cur'
1318 * is initialized to zero).
1320 * We are passing target-freq as "policy->cur - 1" otherwise
1321 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1322 * equal to target-freq.
1324 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1326 /* Are we running at unknown frequency ? */
1327 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1328 if (ret == -EINVAL) {
1329 /* Warn user and fix it */
1330 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1331 __func__, policy->cpu, policy->cur);
1332 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1333 CPUFREQ_RELATION_L);
1336 * Reaching here after boot in a few seconds may not
1337 * mean that system will remain stable at "unknown"
1338 * frequency for longer duration. Hence, a BUG_ON().
1341 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1342 __func__, policy->cpu, policy->cur);
1346 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1347 CPUFREQ_START, policy);
1350 ret = cpufreq_add_dev_interface(policy);
1352 goto out_exit_policy;
1353 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1354 CPUFREQ_CREATE_POLICY, policy);
1355 cpufreq_times_create_policy(policy);
1357 write_lock_irqsave(&cpufreq_driver_lock, flags);
1358 list_add(&policy->policy_list, &cpufreq_policy_list);
1359 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1362 ret = cpufreq_init_policy(policy);
1364 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1365 __func__, cpu, ret);
1366 /* cpufreq_policy_free() will notify based on this */
1368 goto out_exit_policy;
1371 up_write(&policy->rwsem);
1373 kobject_uevent(&policy->kobj, KOBJ_ADD);
1375 /* Callback for handling stuff after policy is ready */
1376 if (cpufreq_driver->ready)
1377 cpufreq_driver->ready(policy);
1379 pr_debug("initialization complete\n");
1384 up_write(&policy->rwsem);
1386 if (cpufreq_driver->exit)
1387 cpufreq_driver->exit(policy);
1389 cpufreq_policy_free(policy, !new_policy);
1394 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1396 * @sif: Subsystem interface structure pointer (not used)
1398 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1400 unsigned cpu = dev->id;
1403 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1405 if (cpu_online(cpu)) {
1406 ret = cpufreq_online(cpu);
1409 * A hotplug notifier will follow and we will handle it as CPU
1410 * online then. For now, just create the sysfs link, unless
1411 * there is no policy or the link is already present.
1413 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1415 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1416 ? add_cpu_dev_symlink(policy, cpu) : 0;
1422 static void cpufreq_offline_prepare(unsigned int cpu)
1424 struct cpufreq_policy *policy;
1426 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1428 policy = cpufreq_cpu_get_raw(cpu);
1430 pr_debug("%s: No cpu_data found\n", __func__);
1435 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1437 pr_err("%s: Failed to stop governor\n", __func__);
1440 down_write(&policy->rwsem);
1441 cpumask_clear_cpu(cpu, policy->cpus);
1443 if (policy_is_inactive(policy)) {
1445 strncpy(policy->last_governor, policy->governor->name,
1448 policy->last_policy = policy->policy;
1449 } else if (cpu == policy->cpu) {
1450 /* Nominate new CPU */
1451 policy->cpu = cpumask_any(policy->cpus);
1453 up_write(&policy->rwsem);
1455 /* Start governor again for active policy */
1456 if (!policy_is_inactive(policy)) {
1458 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1460 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1463 pr_err("%s: Failed to start governor\n", __func__);
1465 } else if (cpufreq_driver->stop_cpu) {
1466 cpufreq_driver->stop_cpu(policy);
1470 static void cpufreq_offline_finish(unsigned int cpu)
1472 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1475 pr_debug("%s: No cpu_data found\n", __func__);
1479 /* Only proceed for inactive policies */
1480 if (!policy_is_inactive(policy))
1483 /* If cpu is last user of policy, free policy */
1485 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1487 pr_err("%s: Failed to exit governor\n", __func__);
1491 * Perform the ->exit() even during light-weight tear-down,
1492 * since this is a core component, and is essential for the
1493 * subsequent light-weight ->init() to succeed.
1495 if (cpufreq_driver->exit) {
1496 cpufreq_driver->exit(policy);
1497 policy->freq_table = NULL;
1502 * cpufreq_remove_dev - remove a CPU device
1504 * Removes the cpufreq interface for a CPU device.
1506 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1508 unsigned int cpu = dev->id;
1509 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1514 if (cpu_online(cpu)) {
1515 cpufreq_offline_prepare(cpu);
1516 cpufreq_offline_finish(cpu);
1519 cpumask_clear_cpu(cpu, policy->real_cpus);
1520 remove_cpu_dev_symlink(policy, cpu);
1522 if (cpumask_empty(policy->real_cpus))
1523 cpufreq_policy_free(policy, true);
1526 static void handle_update(struct work_struct *work)
1528 struct cpufreq_policy *policy =
1529 container_of(work, struct cpufreq_policy, update);
1530 unsigned int cpu = policy->cpu;
1531 pr_debug("handle_update for cpu %u called\n", cpu);
1532 cpufreq_update_policy(cpu);
1536 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1538 * @policy: policy managing CPUs
1539 * @new_freq: CPU frequency the CPU actually runs at
1541 * We adjust to current frequency first, and need to clean up later.
1542 * So either call to cpufreq_update_policy() or schedule handle_update()).
1544 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1545 unsigned int new_freq)
1547 struct cpufreq_freqs freqs;
1549 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1550 policy->cur, new_freq);
1552 freqs.old = policy->cur;
1553 freqs.new = new_freq;
1555 cpufreq_freq_transition_begin(policy, &freqs);
1556 cpufreq_freq_transition_end(policy, &freqs, 0);
1560 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1563 * This is the last known freq, without actually getting it from the driver.
1564 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1566 unsigned int cpufreq_quick_get(unsigned int cpu)
1568 struct cpufreq_policy *policy;
1569 unsigned int ret_freq = 0;
1571 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1572 return cpufreq_driver->get(cpu);
1574 policy = cpufreq_cpu_get(cpu);
1576 ret_freq = policy->cur;
1577 cpufreq_cpu_put(policy);
1582 EXPORT_SYMBOL(cpufreq_quick_get);
1585 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1588 * Just return the max possible frequency for a given CPU.
1590 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1592 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1593 unsigned int ret_freq = 0;
1596 ret_freq = policy->max;
1597 cpufreq_cpu_put(policy);
1602 EXPORT_SYMBOL(cpufreq_quick_get_max);
1604 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1606 unsigned int ret_freq = 0;
1608 if (!cpufreq_driver->get)
1611 ret_freq = cpufreq_driver->get(policy->cpu);
1613 /* Updating inactive policies is invalid, so avoid doing that. */
1614 if (unlikely(policy_is_inactive(policy)))
1617 if (ret_freq && policy->cur &&
1618 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1619 /* verify no discrepancy between actual and
1620 saved value exists */
1621 if (unlikely(ret_freq != policy->cur)) {
1622 cpufreq_out_of_sync(policy, ret_freq);
1623 schedule_work(&policy->update);
1631 * cpufreq_get - get the current CPU frequency (in kHz)
1634 * Get the CPU current (static) CPU frequency
1636 unsigned int cpufreq_get(unsigned int cpu)
1638 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1639 unsigned int ret_freq = 0;
1642 down_read(&policy->rwsem);
1643 ret_freq = __cpufreq_get(policy);
1644 up_read(&policy->rwsem);
1646 cpufreq_cpu_put(policy);
1651 EXPORT_SYMBOL(cpufreq_get);
1653 static struct subsys_interface cpufreq_interface = {
1655 .subsys = &cpu_subsys,
1656 .add_dev = cpufreq_add_dev,
1657 .remove_dev = cpufreq_remove_dev,
1661 * In case platform wants some specific frequency to be configured
1664 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1668 if (!policy->suspend_freq) {
1669 pr_debug("%s: suspend_freq not defined\n", __func__);
1673 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1674 policy->suspend_freq);
1676 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1677 CPUFREQ_RELATION_H);
1679 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1680 __func__, policy->suspend_freq, ret);
1684 EXPORT_SYMBOL(cpufreq_generic_suspend);
1687 * cpufreq_suspend() - Suspend CPUFreq governors
1689 * Called during system wide Suspend/Hibernate cycles for suspending governors
1690 * as some platforms can't change frequency after this point in suspend cycle.
1691 * Because some of the devices (like: i2c, regulators, etc) they use for
1692 * changing frequency are suspended quickly after this point.
1694 void cpufreq_suspend(void)
1696 struct cpufreq_policy *policy;
1698 if (!cpufreq_driver)
1704 pr_debug("%s: Suspending Governors\n", __func__);
1706 for_each_active_policy(policy) {
1707 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1708 pr_err("%s: Failed to stop governor for policy: %p\n",
1710 else if (cpufreq_driver->suspend
1711 && cpufreq_driver->suspend(policy))
1712 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1717 cpufreq_suspended = true;
1721 * cpufreq_resume() - Resume CPUFreq governors
1723 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1724 * are suspended with cpufreq_suspend().
1726 void cpufreq_resume(void)
1728 struct cpufreq_policy *policy;
1730 if (!cpufreq_driver)
1733 if (unlikely(!cpufreq_suspended))
1736 cpufreq_suspended = false;
1741 pr_debug("%s: Resuming Governors\n", __func__);
1743 for_each_active_policy(policy) {
1744 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1745 pr_err("%s: Failed to resume driver: %p\n", __func__,
1747 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1748 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1749 pr_err("%s: Failed to start governor for policy: %p\n",
1754 * schedule call cpufreq_update_policy() for first-online CPU, as that
1755 * wouldn't be hotplugged-out on suspend. It will verify that the
1756 * current freq is in sync with what we believe it to be.
1758 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1759 if (WARN_ON(!policy))
1762 schedule_work(&policy->update);
1766 * cpufreq_get_current_driver - return current driver's name
1768 * Return the name string of the currently loaded cpufreq driver
1771 const char *cpufreq_get_current_driver(void)
1774 return cpufreq_driver->name;
1778 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1781 * cpufreq_get_driver_data - return current driver data
1783 * Return the private data of the currently loaded cpufreq
1784 * driver, or NULL if no cpufreq driver is loaded.
1786 void *cpufreq_get_driver_data(void)
1789 return cpufreq_driver->driver_data;
1793 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1795 /*********************************************************************
1796 * NOTIFIER LISTS INTERFACE *
1797 *********************************************************************/
1800 * cpufreq_register_notifier - register a driver with cpufreq
1801 * @nb: notifier function to register
1802 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1804 * Add a driver to one of two lists: either a list of drivers that
1805 * are notified about clock rate changes (once before and once after
1806 * the transition), or a list of drivers that are notified about
1807 * changes in cpufreq policy.
1809 * This function may sleep, and has the same return conditions as
1810 * blocking_notifier_chain_register.
1812 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1816 if (cpufreq_disabled())
1819 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1822 case CPUFREQ_TRANSITION_NOTIFIER:
1823 ret = srcu_notifier_chain_register(
1824 &cpufreq_transition_notifier_list, nb);
1826 case CPUFREQ_POLICY_NOTIFIER:
1827 ret = blocking_notifier_chain_register(
1828 &cpufreq_policy_notifier_list, nb);
1836 EXPORT_SYMBOL(cpufreq_register_notifier);
1839 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1840 * @nb: notifier block to be unregistered
1841 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1843 * Remove a driver from the CPU frequency notifier list.
1845 * This function may sleep, and has the same return conditions as
1846 * blocking_notifier_chain_unregister.
1848 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1852 if (cpufreq_disabled())
1856 case CPUFREQ_TRANSITION_NOTIFIER:
1857 ret = srcu_notifier_chain_unregister(
1858 &cpufreq_transition_notifier_list, nb);
1860 case CPUFREQ_POLICY_NOTIFIER:
1861 ret = blocking_notifier_chain_unregister(
1862 &cpufreq_policy_notifier_list, nb);
1870 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1873 /*********************************************************************
1875 *********************************************************************/
1877 /* Must set freqs->new to intermediate frequency */
1878 static int __target_intermediate(struct cpufreq_policy *policy,
1879 struct cpufreq_freqs *freqs, int index)
1883 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1885 /* We don't need to switch to intermediate freq */
1889 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1890 __func__, policy->cpu, freqs->old, freqs->new);
1892 cpufreq_freq_transition_begin(policy, freqs);
1893 ret = cpufreq_driver->target_intermediate(policy, index);
1894 cpufreq_freq_transition_end(policy, freqs, ret);
1897 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1903 static int __target_index(struct cpufreq_policy *policy,
1904 struct cpufreq_frequency_table *freq_table, int index)
1906 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1907 unsigned int intermediate_freq = 0;
1908 int retval = -EINVAL;
1911 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1913 /* Handle switching to intermediate frequency */
1914 if (cpufreq_driver->get_intermediate) {
1915 retval = __target_intermediate(policy, &freqs, index);
1919 intermediate_freq = freqs.new;
1920 /* Set old freq to intermediate */
1921 if (intermediate_freq)
1922 freqs.old = freqs.new;
1925 freqs.new = freq_table[index].frequency;
1926 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1927 __func__, policy->cpu, freqs.old, freqs.new);
1929 cpufreq_freq_transition_begin(policy, &freqs);
1932 retval = cpufreq_driver->target_index(policy, index);
1934 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1938 cpufreq_freq_transition_end(policy, &freqs, retval);
1941 * Failed after setting to intermediate freq? Driver should have
1942 * reverted back to initial frequency and so should we. Check
1943 * here for intermediate_freq instead of get_intermediate, in
1944 * case we haven't switched to intermediate freq at all.
1946 if (unlikely(retval && intermediate_freq)) {
1947 freqs.old = intermediate_freq;
1948 freqs.new = policy->restore_freq;
1949 cpufreq_freq_transition_begin(policy, &freqs);
1950 cpufreq_freq_transition_end(policy, &freqs, 0);
1957 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1958 unsigned int target_freq,
1959 unsigned int relation)
1961 unsigned int old_target_freq = target_freq;
1962 int retval = -EINVAL;
1964 if (cpufreq_disabled())
1967 /* Make sure that target_freq is within supported range */
1968 if (target_freq > policy->max)
1969 target_freq = policy->max;
1970 if (target_freq < policy->min)
1971 target_freq = policy->min;
1973 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1974 policy->cpu, target_freq, relation, old_target_freq);
1977 * This might look like a redundant call as we are checking it again
1978 * after finding index. But it is left intentionally for cases where
1979 * exactly same freq is called again and so we can save on few function
1982 if (target_freq == policy->cur)
1985 /* Save last value to restore later on errors */
1986 policy->restore_freq = policy->cur;
1988 if (cpufreq_driver->target)
1989 retval = cpufreq_driver->target(policy, target_freq, relation);
1990 else if (cpufreq_driver->target_index) {
1991 struct cpufreq_frequency_table *freq_table;
1994 freq_table = cpufreq_frequency_get_table(policy->cpu);
1995 if (unlikely(!freq_table)) {
1996 pr_err("%s: Unable to find freq_table\n", __func__);
2000 retval = cpufreq_frequency_table_target(policy, freq_table,
2001 target_freq, relation, &index);
2002 if (unlikely(retval)) {
2003 pr_err("%s: Unable to find matching freq\n", __func__);
2007 if (freq_table[index].frequency == policy->cur) {
2012 retval = __target_index(policy, freq_table, index);
2018 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2020 int cpufreq_driver_target(struct cpufreq_policy *policy,
2021 unsigned int target_freq,
2022 unsigned int relation)
2026 down_write(&policy->rwsem);
2028 ret = __cpufreq_driver_target(policy, target_freq, relation);
2030 up_write(&policy->rwsem);
2034 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2036 static int __cpufreq_governor(struct cpufreq_policy *policy,
2041 /* Only must be defined when default governor is known to have latency
2042 restrictions, like e.g. conservative or ondemand.
2043 That this is the case is already ensured in Kconfig
2045 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2046 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2048 struct cpufreq_governor *gov = NULL;
2051 /* Don't start any governor operations if we are entering suspend */
2052 if (cpufreq_suspended)
2055 * Governor might not be initiated here if ACPI _PPC changed
2056 * notification happened, so check it.
2058 if (!policy->governor)
2061 if (policy->governor->max_transition_latency &&
2062 policy->cpuinfo.transition_latency >
2063 policy->governor->max_transition_latency) {
2067 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2068 policy->governor->name, gov->name);
2069 policy->governor = gov;
2073 if (event == CPUFREQ_GOV_POLICY_INIT)
2074 if (!try_module_get(policy->governor->owner))
2077 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2079 mutex_lock(&cpufreq_governor_lock);
2080 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2081 || (!policy->governor_enabled
2082 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2083 mutex_unlock(&cpufreq_governor_lock);
2087 if (event == CPUFREQ_GOV_STOP)
2088 policy->governor_enabled = false;
2089 else if (event == CPUFREQ_GOV_START)
2090 policy->governor_enabled = true;
2092 mutex_unlock(&cpufreq_governor_lock);
2094 ret = policy->governor->governor(policy, event);
2097 if (event == CPUFREQ_GOV_POLICY_INIT)
2098 policy->governor->initialized++;
2099 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2100 policy->governor->initialized--;
2102 /* Restore original values */
2103 mutex_lock(&cpufreq_governor_lock);
2104 if (event == CPUFREQ_GOV_STOP)
2105 policy->governor_enabled = true;
2106 else if (event == CPUFREQ_GOV_START)
2107 policy->governor_enabled = false;
2108 mutex_unlock(&cpufreq_governor_lock);
2111 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2112 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2113 module_put(policy->governor->owner);
2118 int cpufreq_register_governor(struct cpufreq_governor *governor)
2125 if (cpufreq_disabled())
2128 mutex_lock(&cpufreq_governor_mutex);
2130 governor->initialized = 0;
2132 if (!find_governor(governor->name)) {
2134 list_add(&governor->governor_list, &cpufreq_governor_list);
2137 mutex_unlock(&cpufreq_governor_mutex);
2140 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2142 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2144 struct cpufreq_policy *policy;
2145 unsigned long flags;
2150 if (cpufreq_disabled())
2153 /* clear last_governor for all inactive policies */
2154 read_lock_irqsave(&cpufreq_driver_lock, flags);
2155 for_each_inactive_policy(policy) {
2156 if (!strcmp(policy->last_governor, governor->name)) {
2157 policy->governor = NULL;
2158 strcpy(policy->last_governor, "\0");
2161 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2163 mutex_lock(&cpufreq_governor_mutex);
2164 list_del(&governor->governor_list);
2165 mutex_unlock(&cpufreq_governor_mutex);
2168 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2171 /*********************************************************************
2172 * POLICY INTERFACE *
2173 *********************************************************************/
2176 * cpufreq_get_policy - get the current cpufreq_policy
2177 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2180 * Reads the current cpufreq policy.
2182 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2184 struct cpufreq_policy *cpu_policy;
2188 cpu_policy = cpufreq_cpu_get(cpu);
2192 memcpy(policy, cpu_policy, sizeof(*policy));
2194 cpufreq_cpu_put(cpu_policy);
2197 EXPORT_SYMBOL(cpufreq_get_policy);
2200 * policy : current policy.
2201 * new_policy: policy to be set.
2203 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2204 struct cpufreq_policy *new_policy)
2206 struct cpufreq_governor *old_gov;
2209 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2210 new_policy->cpu, new_policy->min, new_policy->max);
2212 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2215 * This check works well when we store new min/max freq attributes,
2216 * because new_policy is a copy of policy with one field updated.
2218 if (new_policy->min > new_policy->max)
2221 /* verify the cpu speed can be set within this limit */
2222 ret = cpufreq_driver->verify(new_policy);
2226 /* adjust if necessary - all reasons */
2227 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2228 CPUFREQ_ADJUST, new_policy);
2231 * verify the cpu speed can be set within this limit, which might be
2232 * different to the first one
2234 ret = cpufreq_driver->verify(new_policy);
2238 /* notification of the new policy */
2239 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2240 CPUFREQ_NOTIFY, new_policy);
2242 scale_freq_capacity(new_policy, NULL);
2244 policy->min = new_policy->min;
2245 policy->max = new_policy->max;
2246 trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
2248 pr_debug("new min and max freqs are %u - %u kHz\n",
2249 policy->min, policy->max);
2251 if (cpufreq_driver->setpolicy) {
2252 policy->policy = new_policy->policy;
2253 pr_debug("setting range\n");
2254 return cpufreq_driver->setpolicy(new_policy);
2257 if (new_policy->governor == policy->governor)
2260 pr_debug("governor switch\n");
2262 /* save old, working values */
2263 old_gov = policy->governor;
2264 /* end old governor */
2266 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2268 /* This can happen due to race with other operations */
2269 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2270 __func__, old_gov->name, ret);
2274 up_write(&policy->rwsem);
2275 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2276 down_write(&policy->rwsem);
2279 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2280 __func__, old_gov->name, ret);
2285 /* start new governor */
2286 policy->governor = new_policy->governor;
2287 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2289 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2293 up_write(&policy->rwsem);
2294 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2295 down_write(&policy->rwsem);
2298 /* new governor failed, so re-start old one */
2299 pr_debug("starting governor %s failed\n", policy->governor->name);
2301 policy->governor = old_gov;
2302 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2303 policy->governor = NULL;
2305 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2311 pr_debug("governor: change or update limits\n");
2312 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2316 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2317 * @cpu: CPU which shall be re-evaluated
2319 * Useful for policy notifiers which have different necessities
2320 * at different times.
2322 int cpufreq_update_policy(unsigned int cpu)
2324 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2325 struct cpufreq_policy new_policy;
2331 down_write(&policy->rwsem);
2333 pr_debug("updating policy for CPU %u\n", cpu);
2334 memcpy(&new_policy, policy, sizeof(*policy));
2335 new_policy.min = policy->user_policy.min;
2336 new_policy.max = policy->user_policy.max;
2339 * BIOS might change freq behind our back
2340 * -> ask driver for current freq and notify governors about a change
2342 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2343 new_policy.cur = cpufreq_driver->get(cpu);
2344 if (WARN_ON(!new_policy.cur)) {
2350 pr_debug("Driver did not initialize current freq\n");
2351 policy->cur = new_policy.cur;
2353 if (policy->cur != new_policy.cur && has_target())
2354 cpufreq_out_of_sync(policy, new_policy.cur);
2358 ret = cpufreq_set_policy(policy, &new_policy);
2361 up_write(&policy->rwsem);
2363 cpufreq_cpu_put(policy);
2366 EXPORT_SYMBOL(cpufreq_update_policy);
2368 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2369 unsigned long action, void *hcpu)
2371 unsigned int cpu = (unsigned long)hcpu;
2373 switch (action & ~CPU_TASKS_FROZEN) {
2375 cpufreq_online(cpu);
2378 case CPU_DOWN_PREPARE:
2379 cpufreq_offline_prepare(cpu);
2383 cpufreq_offline_finish(cpu);
2386 case CPU_DOWN_FAILED:
2387 cpufreq_online(cpu);
2393 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2394 .notifier_call = cpufreq_cpu_callback,
2397 /*********************************************************************
2399 *********************************************************************/
2400 static int cpufreq_boost_set_sw(int state)
2402 struct cpufreq_frequency_table *freq_table;
2403 struct cpufreq_policy *policy;
2406 for_each_active_policy(policy) {
2407 freq_table = cpufreq_frequency_get_table(policy->cpu);
2409 ret = cpufreq_frequency_table_cpuinfo(policy,
2412 pr_err("%s: Policy frequency update failed\n",
2416 policy->user_policy.max = policy->max;
2417 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2424 int cpufreq_boost_trigger_state(int state)
2426 unsigned long flags;
2429 if (cpufreq_driver->boost_enabled == state)
2432 write_lock_irqsave(&cpufreq_driver_lock, flags);
2433 cpufreq_driver->boost_enabled = state;
2434 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2436 ret = cpufreq_driver->set_boost(state);
2438 write_lock_irqsave(&cpufreq_driver_lock, flags);
2439 cpufreq_driver->boost_enabled = !state;
2440 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2442 pr_err("%s: Cannot %s BOOST\n",
2443 __func__, state ? "enable" : "disable");
2449 int cpufreq_boost_supported(void)
2451 if (likely(cpufreq_driver))
2452 return cpufreq_driver->boost_supported;
2456 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2458 static int create_boost_sysfs_file(void)
2462 if (!cpufreq_boost_supported())
2466 * Check if driver provides function to enable boost -
2467 * if not, use cpufreq_boost_set_sw as default
2469 if (!cpufreq_driver->set_boost)
2470 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2472 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2474 pr_err("%s: cannot register global BOOST sysfs file\n",
2480 static void remove_boost_sysfs_file(void)
2482 if (cpufreq_boost_supported())
2483 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2486 int cpufreq_enable_boost_support(void)
2488 if (!cpufreq_driver)
2491 if (cpufreq_boost_supported())
2494 cpufreq_driver->boost_supported = true;
2496 /* This will get removed on driver unregister */
2497 return create_boost_sysfs_file();
2499 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2501 int cpufreq_boost_enabled(void)
2503 return cpufreq_driver->boost_enabled;
2505 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2507 /*********************************************************************
2508 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2509 *********************************************************************/
2512 * cpufreq_register_driver - register a CPU Frequency driver
2513 * @driver_data: A struct cpufreq_driver containing the values#
2514 * submitted by the CPU Frequency driver.
2516 * Registers a CPU Frequency driver to this core code. This code
2517 * returns zero on success, -EBUSY when another driver got here first
2518 * (and isn't unregistered in the meantime).
2521 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2523 unsigned long flags;
2526 if (cpufreq_disabled())
2529 if (!driver_data || !driver_data->verify || !driver_data->init ||
2530 !(driver_data->setpolicy || driver_data->target_index ||
2531 driver_data->target) ||
2532 (driver_data->setpolicy && (driver_data->target_index ||
2533 driver_data->target)) ||
2534 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2537 pr_debug("trying to register driver %s\n", driver_data->name);
2539 /* Protect against concurrent CPU online/offline. */
2542 write_lock_irqsave(&cpufreq_driver_lock, flags);
2543 if (cpufreq_driver) {
2544 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2548 cpufreq_driver = driver_data;
2549 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2551 if (driver_data->setpolicy)
2552 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2554 ret = create_boost_sysfs_file();
2556 goto err_null_driver;
2558 ret = subsys_interface_register(&cpufreq_interface);
2560 goto err_boost_unreg;
2562 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2563 list_empty(&cpufreq_policy_list)) {
2564 /* if all ->init() calls failed, unregister */
2566 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2571 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2572 pr_debug("driver %s up and running\n", driver_data->name);
2579 subsys_interface_unregister(&cpufreq_interface);
2581 remove_boost_sysfs_file();
2583 write_lock_irqsave(&cpufreq_driver_lock, flags);
2584 cpufreq_driver = NULL;
2585 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2588 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2591 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2593 * Unregister the current CPUFreq driver. Only call this if you have
2594 * the right to do so, i.e. if you have succeeded in initialising before!
2595 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2596 * currently not initialised.
2598 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2600 unsigned long flags;
2602 if (!cpufreq_driver || (driver != cpufreq_driver))
2605 pr_debug("unregistering driver %s\n", driver->name);
2607 /* Protect against concurrent cpu hotplug */
2609 subsys_interface_unregister(&cpufreq_interface);
2610 remove_boost_sysfs_file();
2611 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2613 write_lock_irqsave(&cpufreq_driver_lock, flags);
2615 cpufreq_driver = NULL;
2617 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2622 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2624 struct kobject *cpufreq_global_kobject;
2625 EXPORT_SYMBOL(cpufreq_global_kobject);
2627 static int __init cpufreq_core_init(void)
2629 if (cpufreq_disabled())
2632 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2633 BUG_ON(!cpufreq_global_kobject);
2637 core_initcall(cpufreq_core_init);