OSDN Git Service

cpuidle: lpm-levels: Fixes for clockevents_notify
authorMahesh Sivasubramanian <msivasub@codeaurora.org>
Mon, 1 Feb 2016 18:14:55 +0000 (11:14 -0700)
committerDavid Keitel <dkeitel@codeaurora.org>
Tue, 22 Mar 2016 18:07:21 +0000 (11:07 -0700)
The use of clockevents_notify is deprcated and targeted APIs are used
instead of the clockevents_notify callbacks. Switch broadcast timer
notifications to tick_broadcast_enter and tick_broadcast_exit.

Change-Id: I3441873eb4009b105db04f4a18d28ae9ccd07e95

drivers/cpuidle/lpm-levels.c
include/linux/tick.h
kernel/time/tick-sched.c

index 2fbda5c..2070aa4 100644 (file)
@@ -164,17 +164,6 @@ static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
        spin_unlock(&debug_lock);
 }
 
-static void setup_broadcast_timer(void *arg)
-{
-       unsigned long reason = (unsigned long)arg;
-       int cpu = raw_smp_processor_id();
-
-       reason = reason ?
-               CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
-
-       clockevents_notify(reason, &cpu);
-}
-
 static int lpm_cpu_callback(struct notifier_block *cpu_nb,
        unsigned long action, void *hcpu)
 {
@@ -190,10 +179,6 @@ static int lpm_cpu_callback(struct notifier_block *cpu_nb,
                cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu),
                                        NR_LPM_LEVELS, false);
                break;
-       case CPU_ONLINE:
-               smp_call_function_single(cpu, setup_broadcast_timer,
-                                       (void *)true, 1);
-               break;
        default:
                break;
        }
@@ -391,7 +376,6 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
        int cpu;
        int next_cpu = raw_smp_processor_id();
        ktime_t next_event;
-       struct tick_device *td;
        struct cpumask online_cpus_in_cluster;
 
        next_event.tv64 = KTIME_MAX;
@@ -410,9 +394,11 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
                        &cluster->num_children_in_sync, cpu_online_mask);
 
        for_each_cpu(cpu, &online_cpus_in_cluster) {
-               td = &per_cpu(tick_cpu_device, cpu);
-               if (td->evtdev->next_event.tv64 < next_event.tv64) {
-                       next_event.tv64 = td->evtdev->next_event.tv64;
+               ktime_t *next_event_c;
+
+               next_event_c = get_next_event_cpu(cpu);
+               if (next_event_c->tv64 < next_event.tv64) {
+                       next_event.tv64 = next_event_c->tv64;
                        next_cpu = cpu;
                }
        }
@@ -558,8 +544,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
        /* Notify cluster enter event after successfully config completion */
        cluster_notify(cluster, level, true);
 
-       sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
-
        cluster->last_level = idx;
        return 0;
 
@@ -686,7 +670,6 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
                BUG_ON(ret);
 
        }
-       sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
 
        cluster_notify(cluster, &cluster->levels[last_level], false);
        cluster_unprepare(cluster->parent, &cluster->child_cpus,
@@ -699,7 +682,6 @@ static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
                                bool from_idle)
 {
        struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
-       unsigned int cpu = raw_smp_processor_id();
        bool jtag_save_restore =
                        cluster->cpu->levels[cpu_index].jtag_save_restore;
 
@@ -715,7 +697,7 @@ static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
         */
        if (from_idle && (cpu_level->use_bc_timer ||
                        (cpu_index >= cluster->min_child_level)))
-               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+               tick_broadcast_enter();
 
        if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
                || (cpu_level->mode ==
@@ -734,13 +716,12 @@ static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
                                bool from_idle)
 {
        struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
-       unsigned int cpu = raw_smp_processor_id();
        bool jtag_save_restore =
                        cluster->cpu->levels[cpu_index].jtag_save_restore;
 
        if (from_idle && (cpu_level->use_bc_timer ||
                        (cpu_index >= cluster->min_child_level)))
-               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+               tick_broadcast_exit();
 
        if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
                || (cpu_level->mode ==
@@ -784,6 +765,7 @@ unlock_and_return:
 }
 
 #if !defined(CONFIG_CPU_V7)
+#include <asm/cpuidle.h>
 bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
 {
        /*
@@ -808,7 +790,7 @@ bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
                update_debug_pc_event(CPU_ENTER, state_id,
                                                0xdeaffeed, 0xdeaffeed, true);
                stop_critical_timings();
-               success = !cpu_suspend(state_id);
+               success = !arm_cpuidle_suspend(state_id);
                start_critical_timings();
                update_debug_pc_event(CPU_EXIT, state_id,
                                                success, 0xdeaffeed, true);
@@ -859,8 +841,6 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
        trace_cpu_idle_rcuidle(idx, dev->cpu);
 
        pwr_params = &cluster->cpu->levels[idx].pwr;
-       sched_set_cpu_cstate(smp_processor_id(), idx + 1,
-               pwr_params->energy_overhead, pwr_params->latency_us);
 
        trace_cpu_idle_enter(idx);
 
@@ -891,7 +871,6 @@ exit:
        cluster_unprepare(cluster, cpumask, idx, true);
        cpu_unprepare(cluster, idx, true);
 
-       sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
 
        time = ktime_to_ns(ktime_get()) - time;
        do_div(time, 1000);
@@ -1152,9 +1131,6 @@ static int lpm_probe(struct platform_device *pdev)
         * how late lpm_levels gets initialized.
         */
        register_hotcpu_notifier(&lpm_cpu_nblk);
-       get_cpu();
-       on_each_cpu(setup_broadcast_timer, (void *)true, 1);
-       put_cpu();
        suspend_set_ops(&lpm_suspend_ops);
        hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 
index e312219..37dc6fd 100644 (file)
@@ -203,4 +203,5 @@ static inline void tick_nohz_task_switch(void)
                __tick_nohz_task_switch();
 }
 
+ktime_t *get_next_event_cpu(unsigned int cpu);
 #endif
index 7c7ec45..0e456b0 100644 (file)
@@ -1181,3 +1181,8 @@ int tick_check_oneshot_change(int allow_nohz)
        tick_nohz_switch_to_nohz();
        return 0;
 }
+
+ktime_t * get_next_event_cpu(unsigned int cpu)
+{
+       return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
+}