*/
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
static struct srcu_notifier_head cpufreq_transition_notifier_list;
+struct atomic_notifier_head cpufreq_govinfo_notifier_list;
static bool init_cpufreq_transition_notifier_list_called;
static int __init init_cpufreq_transition_notifier_list(void)
}
pure_initcall(init_cpufreq_transition_notifier_list);
+static bool init_cpufreq_govinfo_notifier_list_called;
+static int __init init_cpufreq_govinfo_notifier_list(void)
+{
+ ATOMIC_INIT_NOTIFIER_HEAD(&cpufreq_govinfo_notifier_list);
+ init_cpufreq_govinfo_notifier_list_called = true;
+ return 0;
+}
+pure_initcall(init_cpufreq_govinfo_notifier_list);
+
static int off __read_mostly;
static int cpufreq_disabled(void)
{
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
- pr_err("%s: Failed to stop governor\n", __func__);
+ pr_err("%s: Failed to stop governor for CPU%u, policy CPU%u\n",
+ __func__, cpu, policy->cpu);
return ret;
}
}
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
if (ret) {
- pr_err("%s: Failed to start governor\n", __func__);
+ pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+ __func__, cpu, policy->cpu);
return ret;
}
}
if (has_target()) {
int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret)
- pr_err("%s: Failed to stop governor\n", __func__);
+ pr_err("%s: Failed to stop governor for CPU%u\n",
+ __func__, cpu);
}
down_write(&policy->rwsem);
if (has_target()) {
int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret)
- pr_err("%s: Failed to exit governor\n", __func__);
+ pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+ __func__, cpu, policy->cpu);
}
/*
if (cpufreq_disabled())
return -EINVAL;
- WARN_ON(!init_cpufreq_transition_notifier_list_called);
+ WARN_ON(!init_cpufreq_transition_notifier_list_called ||
+ !init_cpufreq_govinfo_notifier_list_called);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = blocking_notifier_chain_register(
&cpufreq_policy_notifier_list, nb);
break;
+ case CPUFREQ_GOVINFO_NOTIFIER:
+ ret = atomic_notifier_chain_register(
+ &cpufreq_govinfo_notifier_list, nb);
+ break;
default:
ret = -EINVAL;
}
ret = blocking_notifier_chain_unregister(
&cpufreq_policy_notifier_list, nb);
break;
+ case CPUFREQ_GOVINFO_NOTIFIER:
+ ret = atomic_notifier_chain_unregister(
+ &cpufreq_govinfo_notifier_list, nb);
+ break;
default:
ret = -EINVAL;
}
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
- /*
- * This might look like a redundant call as we are checking it again
- * after finding index. But it is left intentionally for cases where
- * exactly same freq is called again and so we can save on few function
- * calls.
- */
- if (target_freq == policy->cur)
- return 0;
-
/* Save last value to restore later on errors */
policy->restore_freq = policy->cur;
{
unsigned int cpu = (unsigned long)hcpu;
+ if (!cpufreq_driver)
+ return NOTIFY_OK;
+
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
cpufreq_online(cpu);
pr_debug("trying to register driver %s\n", driver_data->name);
+ /* Register for hotplug notifers before blocking hotplug. */
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+
/* Protect against concurrent CPU online/offline. */
get_online_cpus();
goto err_if_unreg;
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
+ pr_info("driver %s up and running\n", driver_data->name);
out:
put_online_cpus();
if (!cpufreq_driver || (driver != cpufreq_driver))
return -EINVAL;
- pr_debug("unregistering driver %s\n", driver->name);
+ pr_info("unregistering driver %s\n", driver->name);
/* Protect against concurrent cpu hotplug */
get_online_cpus();