OSDN Git Service

cpuidle: psci: Support CPU hotplug for the hierarchical model
authorUlf Hansson <ulf.hansson@linaro.org>
Thu, 10 Oct 2019 10:01:48 +0000 (12:01 +0200)
committerUlf Hansson <ulf.hansson@linaro.org>
Thu, 2 Jan 2020 15:52:18 +0000 (16:52 +0100)
When the hierarchical CPU topology is used and when a CPU is put offline,
that CPU prevents its PM domain from being powered off, which is because
genpd observes the corresponding attached device as being active from a
runtime PM point of view. Furthermore, any potential master PM domains are
also prevented from being powered off.

To address this limitation, let's add add a new CPU hotplug state
(CPUHP_AP_CPU_PM_STARTING) and register up/down callbacks for it, which
allows us to deal with runtime PM accordingly.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Rafael J. Wysocki <rafael@kernel.org>
drivers/cpuidle/cpuidle-psci.c
include/linux/cpuhotplug.h

index 6e7804e..9d779be 100644 (file)
@@ -8,6 +8,7 @@
 
 #define pr_fmt(fmt) "CPUidle PSCI: " fmt
 
+#include <linux/cpuhotplug.h>
 #include <linux/cpuidle.h>
 #include <linux/cpumask.h>
 #include <linux/cpu_pm.h>
@@ -31,6 +32,7 @@ struct psci_cpuidle_data {
 
 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
 static DEFINE_PER_CPU(u32, domain_state);
+static bool psci_cpuidle_use_cpuhp __initdata;
 
 static inline void psci_set_domain_state(u32 state)
 {
@@ -72,6 +74,44 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
        return ret;
 }
 
+static int psci_idle_cpuhp_up(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+       if (pd_dev)
+               pm_runtime_get_sync(pd_dev);
+
+       return 0;
+}
+
+static int psci_idle_cpuhp_down(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+       if (pd_dev) {
+               pm_runtime_put_sync(pd_dev);
+               /* Clear domain state to start fresh at next online. */
+               psci_set_domain_state(0);
+       }
+
+       return 0;
+}
+
+static void __init psci_idle_init_cpuhp(void)
+{
+       int err;
+
+       if (!psci_cpuidle_use_cpuhp)
+               return;
+
+       err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+                                       "cpuidle/psci:online",
+                                       psci_idle_cpuhp_up,
+                                       psci_idle_cpuhp_down);
+       if (err)
+               pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
 static int psci_enter_idle_state(struct cpuidle_device *dev,
                                struct cpuidle_driver *drv, int idx)
 {
@@ -166,9 +206,11 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
                 * selection of a shared state for the domain, assumes the
                 * domain states are all deeper states.
                 */
-               if (data->dev)
+               if (data->dev) {
                        drv->states[state_count - 1].enter =
                                psci_enter_domain_idle_state;
+                       psci_cpuidle_use_cpuhp = true;
+               }
        }
 
        /* Idle states parsed correctly, store them in the per-cpu struct. */
@@ -289,6 +331,7 @@ static int __init psci_idle_init(void)
                        goto out_fail;
        }
 
+       psci_idle_init_cpuhp();
        return 0;
 
 out_fail:
index e51ee77..01f04ed 100644 (file)
@@ -95,6 +95,7 @@ enum cpuhp_state {
        CPUHP_AP_OFFLINE,
        CPUHP_AP_SCHED_STARTING,
        CPUHP_AP_RCUTREE_DYING,
+       CPUHP_AP_CPU_PM_STARTING,
        CPUHP_AP_IRQ_GIC_STARTING,
        CPUHP_AP_IRQ_HIP04_STARTING,
        CPUHP_AP_IRQ_ARMADA_XP_STARTING,