OSDN Git Service

Merge Linux 4.4.202 into 10
authorsbwml <984419930@qq.com>
Wed, 20 Nov 2019 17:55:05 +0000 (01:55 +0800)
committersbwml <984419930@qq.com>
Wed, 20 Nov 2019 17:55:05 +0000 (01:55 +0800)
39 files changed:
1  2 
Documentation/kernel-parameters.txt
Makefile
arch/arm/include/asm/system_misc.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/smp.c
arch/arm/mm/fault.c
arch/arm/mm/proc-macros.S
arch/arm/vfp/vfpmodule.c
arch/arm64/Kconfig
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/asm-offsets.c
arch/x86/Kconfig
arch/x86/kernel/cpu/Makefile
drivers/base/cpu.c
drivers/cpuidle/lpm-levels.c
drivers/firmware/psci.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/nfc/fdp/i2c.c
drivers/power/qcom/apm.c
drivers/regulator/spm-regulator.c
drivers/usb/core/config.c
fs/cifs/file.c
fs/dcache.c
fs/fs-writeback.c
include/linux/cpu.h
include/linux/gfp.h
include/linux/psci.h
include/net/sock.h
kernel/time/alarmtimer.c
mm/filemap.c
mm/vmstat.c
net/core/flow_dissector.c
net/ipv4/tcp_ipv4.c
net/netfilter/ipvs/ip_vs_ctl.c

Simple merge
diff --cc Makefile
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -111,17 -111,13 +111,22 @@@ unsigned int elf_hwcap2 __read_mostly
  EXPORT_SYMBOL(elf_hwcap2);
  
  
 +char* (*arch_read_hardware_id)(void);
 +EXPORT_SYMBOL(arch_read_hardware_id);
 +
 +unsigned int boot_reason;
 +EXPORT_SYMBOL(boot_reason);
 +
 +unsigned int cold_boot;
 +EXPORT_SYMBOL(cold_boot);
 +
  #ifdef MULTI_CPU
  struct processor processor __read_mostly;
+ #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+ struct processor *cpu_vtable[NR_CPUS] = {
+       [0] = &processor,
+ };
+ #endif
  #endif
  #ifdef MULTI_TLB
  struct cpu_tlb_fns cpu_tlb __read_mostly;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -104,8 -93,6 +104,7 @@@ config ARM6
        select SYSCTL_EXCEPTION_TRACE
        select HAVE_CONTEXT_TRACKING
        select HAVE_ARM_SMCCC
-       select HAVE_ARM_SMCCC
 +      select THREAD_INFO_IN_TASK
        help
          ARM 64-bit (AArch64) Linux support.
  
@@@ -70,13 -68,8 +70,14 @@@ EXPORT_SYMBOL(test_and_change_bit)
  
  #ifdef CONFIG_FUNCTION_TRACER
  EXPORT_SYMBOL(_mcount);
 +NOKPROBE_SYMBOL(_mcount);
  #endif
 +      /* caching functions */
 +EXPORT_SYMBOL(__dma_inv_range);
 +EXPORT_SYMBOL(__dma_clean_range);
 +EXPORT_SYMBOL(__dma_flush_range);
 +
        /* arm-smccc */
  EXPORT_SYMBOL(arm_smccc_smc);
  EXPORT_SYMBOL(arm_smccc_hvc);
@@@ -130,18 -159,11 +130,20 @@@ int main(void
    DEFINE(CPU_CTX_SP,          offsetof(struct cpu_suspend_ctx, sp));
    DEFINE(MPIDR_HASH_MASK,     offsetof(struct mpidr_hash, mask));
    DEFINE(MPIDR_HASH_SHIFTS,   offsetof(struct mpidr_hash, shift_aff));
 -  DEFINE(SLEEP_SAVE_SP_SZ,    sizeof(struct sleep_save_sp));
 -  DEFINE(SLEEP_SAVE_SP_PHYS,  offsetof(struct sleep_save_sp, save_ptr_stash_phys));
 -  DEFINE(SLEEP_SAVE_SP_VIRT,  offsetof(struct sleep_save_sp, save_ptr_stash));
 +  DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS,        offsetof(struct sleep_stack_data, system_regs));
 +  DEFINE(SLEEP_STACK_DATA_CALLEE_REGS,        offsetof(struct sleep_stack_data, callee_saved_regs));
 +#endif
 +  DEFINE(ARM_SMCCC_RES_X0_OFFS,       offsetof(struct arm_smccc_res, a0));
 +  DEFINE(ARM_SMCCC_RES_X2_OFFS,       offsetof(struct arm_smccc_res, a2));
 +  BLANK();
 +  DEFINE(HIBERN_PBE_ORIG,     offsetof(struct pbe, orig_address));
 +  DEFINE(HIBERN_PBE_ADDR,     offsetof(struct pbe, address));
 +  DEFINE(HIBERN_PBE_NEXT,     offsetof(struct pbe, next));
 +  BLANK();
 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 +  DEFINE(TRAMP_VALIAS,                TRAMP_VALIAS);
  #endif
+   DEFINE(ARM_SMCCC_RES_X0_OFFS,       offsetof(struct arm_smccc_res, a0));
+   DEFINE(ARM_SMCCC_RES_X2_OFFS,       offsetof(struct arm_smccc_res, a2));
    return 0;
  }
Simple merge
Simple merge
Simple merge
index 1eaef20,0000000..9aa1400
mode 100644,000000..100644
--- /dev/null
@@@ -1,2044 -1,0 +1,2045 @@@
 +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
 + * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
 + * Copyright (C) 2009 Intel Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 and
 + * only version 2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + */
 +
 +#include <linux/module.h>
 +#include <linux/kernel.h>
 +#include <linux/init.h>
 +#include <linux/slab.h>
 +#include <linux/platform_device.h>
 +#include <linux/mutex.h>
 +#include <linux/cpu.h>
 +#include <linux/of.h>
 +#include <linux/irqchip/msm-mpm-irq.h>
 +#include <linux/hrtimer.h>
 +#include <linux/ktime.h>
 +#include <linux/tick.h>
 +#include <linux/suspend.h>
 +#include <linux/pm_qos.h>
 +#include <linux/of_platform.h>
 +#include <linux/smp.h>
 +#include <linux/remote_spinlock.h>
 +#include <linux/msm_remote_spinlock.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/coresight-cti.h>
 +#include <linux/moduleparam.h>
 +#include <linux/sched.h>
 +#include <linux/cpu_pm.h>
 +#include <linux/arm-smccc.h>
++#include <linux/psci.h>
 +#include <soc/qcom/spm.h>
 +#include <soc/qcom/pm.h>
 +#include <soc/qcom/rpm-notifier.h>
 +#include <soc/qcom/event_timer.h>
 +#include <soc/qcom/lpm-stats.h>
 +#include <soc/qcom/jtag.h>
 +#include <soc/qcom/minidump.h>
 +#include <asm/cputype.h>
 +#include <asm/arch_timer.h>
 +#include <asm/cacheflush.h>
 +#include <asm/suspend.h>
 +#include <asm/cpuidle.h>
 +#include "lpm-levels.h"
 +#include "lpm-workarounds.h"
 +#include <trace/events/power.h>
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/trace_msm_low_power.h>
 +#include "../../drivers/clk/msm/clock.h"
 +
 +#define SCLK_HZ (32768)
 +#define SCM_HANDOFF_LOCK_ID "S:7"
 +#define PSCI_POWER_STATE(reset) (reset << 30)
 +#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
 +static remote_spinlock_t scm_handoff_lock;
 +
 +enum {
 +      MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
 +      MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
 +};
 +
 +enum debug_event {
 +      CPU_ENTER,
 +      CPU_EXIT,
 +      CLUSTER_ENTER,
 +      CLUSTER_EXIT,
 +      PRE_PC_CB,
 +      CPU_HP_STARTING,
 +      CPU_HP_DYING,
 +};
 +
 +struct lpm_debug {
 +      cycle_t time;
 +      enum debug_event evt;
 +      int cpu;
 +      uint32_t arg1;
 +      uint32_t arg2;
 +      uint32_t arg3;
 +      uint32_t arg4;
 +};
 +
 +struct lpm_cluster *lpm_root_node;
 +
 +#define MAXSAMPLES 5
 +
 +static bool lpm_prediction = true;
 +module_param_named(lpm_prediction,
 +      lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
 +
 +static uint32_t ref_stddev = 100;
 +module_param_named(
 +      ref_stddev, ref_stddev, uint, S_IRUGO | S_IWUSR | S_IWGRP
 +);
 +
 +static uint32_t tmr_add = 100;
 +module_param_named(
 +      tmr_add, tmr_add, uint, S_IRUGO | S_IWUSR | S_IWGRP
 +);
 +
 +struct lpm_history {
 +      uint32_t resi[MAXSAMPLES];
 +      int mode[MAXSAMPLES];
 +      int nsamp;
 +      uint32_t hptr;
 +      uint32_t hinvalid;
 +      uint32_t htmr_wkup;
 +      int64_t stime;
 +};
 +
 +static DEFINE_PER_CPU(struct lpm_history, hist);
 +
 +static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
 +static bool suspend_in_progress;
 +static struct hrtimer lpm_hrtimer;
 +static struct hrtimer histtimer;
 +static struct lpm_debug *lpm_debug;
 +static phys_addr_t lpm_debug_phys;
 +static const int num_dbg_elements = 0x100;
 +static int lpm_cpu_callback(struct notifier_block *cpu_nb,
 +                              unsigned long action, void *hcpu);
 +
 +static void cluster_unprepare(struct lpm_cluster *cluster,
 +              const struct cpumask *cpu, int child_idx, bool from_idle,
 +              int64_t time);
 +static void cluster_prepare(struct lpm_cluster *cluster,
 +              const struct cpumask *cpu, int child_idx, bool from_idle,
 +              int64_t time);
 +
 +static struct notifier_block __refdata lpm_cpu_nblk = {
 +      .notifier_call = lpm_cpu_callback,
 +};
 +
 +static bool menu_select;
 +module_param_named(
 +      menu_select, menu_select, bool, S_IRUGO | S_IWUSR | S_IWGRP
 +);
 +
 +static int msm_pm_sleep_time_override;
 +module_param_named(sleep_time_override,
 +      msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
 +static uint64_t suspend_wake_time;
 +
 +static bool print_parsed_dt;
 +module_param_named(
 +      print_parsed_dt, print_parsed_dt, bool, S_IRUGO | S_IWUSR | S_IWGRP
 +);
 +
 +static bool sleep_disabled;
 +module_param_named(sleep_disabled,
 +      sleep_disabled, bool, S_IRUGO | S_IWUSR | S_IWGRP);
 +
 +s32 msm_cpuidle_get_deep_idle_latency(void)
 +{
 +      return 10;
 +}
 +
 +void lpm_suspend_wake_time(uint64_t wakeup_time)
 +{
 +      if (wakeup_time <= 0) {
 +              suspend_wake_time = msm_pm_sleep_time_override * MSEC_PER_SEC;
 +              return;
 +      }
 +
 +      if (msm_pm_sleep_time_override &&
 +              (msm_pm_sleep_time_override < wakeup_time))
 +              suspend_wake_time = msm_pm_sleep_time_override * MSEC_PER_SEC;
 +      else
 +              suspend_wake_time = wakeup_time;
 +}
 +EXPORT_SYMBOL(lpm_suspend_wake_time);
 +
 +static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
 +                                      struct latency_level *lat_level)
 +{
 +      struct list_head *list;
 +      struct lpm_cluster_level *level;
 +      struct lpm_cluster *n;
 +      struct power_params *pwr_params;
 +      uint32_t latency = 0;
 +      int i;
 +
 +      if (!cluster->list.next) {
 +              for (i = 0; i < cluster->nlevels; i++) {
 +                      level = &cluster->levels[i];
 +                      pwr_params = &level->pwr;
 +                      if (lat_level->reset_level == level->reset_level) {
 +                              if ((latency > pwr_params->latency_us)
 +                                              || (!latency))
 +                                      latency = pwr_params->latency_us;
 +                              break;
 +                      }
 +              }
 +      } else {
 +              list_for_each(list, &cluster->parent->child) {
 +                      n = list_entry(list, typeof(*n), list);
 +                      if (lat_level->level_name) {
 +                              if (strcmp(lat_level->level_name,
 +                                               n->cluster_name))
 +                                      continue;
 +                      }
 +                      for (i = 0; i < n->nlevels; i++) {
 +                              level = &n->levels[i];
 +                              pwr_params = &level->pwr;
 +                              if (lat_level->reset_level ==
 +                                              level->reset_level) {
 +                                      if ((latency > pwr_params->latency_us)
 +                                                              || (!latency))
 +                                              latency =
 +                                              pwr_params->latency_us;
 +                                      break;
 +                              }
 +                      }
 +              }
 +      }
 +      return latency;
 +}
 +
 +static uint32_t least_cpu_latency(struct list_head *child,
 +                              struct latency_level *lat_level)
 +{
 +      struct list_head *list;
 +      struct lpm_cpu_level *level;
 +      struct power_params *pwr_params;
 +      struct lpm_cpu *cpu;
 +      struct lpm_cluster *n;
 +      uint32_t latency = 0;
 +      int i;
 +
 +      list_for_each(list, child) {
 +              n = list_entry(list, typeof(*n), list);
 +              if (lat_level->level_name) {
 +                      if (strcmp(lat_level->level_name, n->cluster_name))
 +                              continue;
 +              }
 +              cpu = n->cpu;
 +              for (i = 0; i < cpu->nlevels; i++) {
 +                      level = &cpu->levels[i];
 +                      pwr_params = &level->pwr;
 +                      if (lat_level->reset_level == level->reset_level) {
 +                              if ((latency > pwr_params->latency_us)
 +                                                      || (!latency))
 +                                      latency = pwr_params->latency_us;
 +                              break;
 +                      }
 +              }
 +      }
 +      return latency;
 +}
 +
 +static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
 +                                                      int affinity_level)
 +{
 +      struct lpm_cluster *n;
 +
 +      if ((cluster->aff_level == affinity_level)
 +              || ((cluster->cpu) && (affinity_level == 0)))
 +              return cluster;
 +      else if (!cluster->cpu) {
 +              n =  list_entry(cluster->child.next, typeof(*n), list);
 +              return cluster_aff_match(n, affinity_level);
 +      } else
 +              return NULL;
 +}
 +
 +int lpm_get_latency(struct latency_level *level, uint32_t *latency)
 +{
 +      struct lpm_cluster *cluster;
 +      uint32_t val;
 +
 +      if (!lpm_root_node) {
 +              pr_err("%s: lpm_probe not completed\n", __func__);
 +              return -EAGAIN;
 +      }
 +
 +      if ((level->affinity_level < 0)
 +              || (level->affinity_level > lpm_root_node->aff_level)
 +              || (level->reset_level < LPM_RESET_LVL_RET)
 +              || (level->reset_level > LPM_RESET_LVL_PC)
 +              || !latency)
 +              return -EINVAL;
 +
 +      cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
 +      if (!cluster) {
 +              pr_err("%s:No matching cluster found for affinity_level:%d\n",
 +                                      __func__, level->affinity_level);
 +              return -EINVAL;
 +      }
 +
 +      if (level->affinity_level == 0)
 +              val = least_cpu_latency(&cluster->parent->child, level);
 +      else
 +              val = least_cluster_latency(cluster, level);
 +
 +      if (!val) {
 +              pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
 +                      __func__, level->affinity_level, level->reset_level);
 +              return -EINVAL;
 +      }
 +
 +      *latency = val;
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL(lpm_get_latency);
 +
 +static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
 +              uint32_t arg2, uint32_t arg3, uint32_t arg4)
 +{
 +      struct lpm_debug *dbg;
 +      int idx;
 +      static DEFINE_SPINLOCK(debug_lock);
 +      static int pc_event_index;
 +
 +      if (!lpm_debug)
 +              return;
 +
 +      spin_lock(&debug_lock);
 +      idx = pc_event_index++;
 +      dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
 +
 +      dbg->evt = event;
 +      dbg->time = arch_counter_get_cntvct();
 +      dbg->cpu = raw_smp_processor_id();
 +      dbg->arg1 = arg1;
 +      dbg->arg2 = arg2;
 +      dbg->arg3 = arg3;
 +      dbg->arg4 = arg4;
 +      spin_unlock(&debug_lock);
 +}
 +
 +static int lpm_cpu_callback(struct notifier_block *cpu_nb,
 +      unsigned long action, void *hcpu)
 +{
 +      unsigned long cpu = (unsigned long) hcpu;
 +      struct lpm_cluster *cluster = per_cpu(cpu_cluster, (unsigned int) cpu);
 +
 +      switch (action & ~CPU_TASKS_FROZEN) {
 +      case CPU_DYING:
 +              update_debug_pc_event(CPU_HP_DYING, cpu,
 +                              cluster->num_children_in_sync.bits[0],
 +                              cluster->child_cpus.bits[0], false);
 +              cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu),
 +                                      NR_LPM_LEVELS, false, 0);
 +              break;
 +      case CPU_STARTING:
 +              update_debug_pc_event(CPU_HP_STARTING, cpu,
 +                              cluster->num_children_in_sync.bits[0],
 +                              cluster->child_cpus.bits[0], false);
 +              cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu),
 +                                      NR_LPM_LEVELS, false, 0);
 +              break;
 +      default:
 +              break;
 +      }
 +      return NOTIFY_OK;
 +}
 +
 +#ifdef CONFIG_ARM_PSCI
 +
 +static int __init set_cpuidle_ops(void)
 +{
 +      int ret = 0, cpu;
 +
 +      for_each_possible_cpu(cpu) {
 +              ret = arm_cpuidle_init(cpu);
 +              if (ret)
 +                      goto exit;
 +      }
 +
 +exit:
 +      return ret;
 +}
 +
 +#endif
 +
 +static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
 +{
 +      return HRTIMER_NORESTART;
 +}
 +
 +static void histtimer_cancel(void)
 +{
 +      hrtimer_try_to_cancel(&histtimer);
 +}
 +
 +static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
 +{
 +      int cpu = raw_smp_processor_id();
 +      struct lpm_history *history = &per_cpu(hist, cpu);
 +
 +      history->hinvalid = 1;
 +      return HRTIMER_NORESTART;
 +}
 +
 +static void histtimer_start(uint32_t time_us)
 +{
 +      uint64_t time_ns = time_us * NSEC_PER_USEC;
 +      ktime_t hist_ktime = ns_to_ktime(time_ns);
 +
 +      histtimer.function = histtimer_fn;
 +      hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
 +}
 +
 +static void cluster_timer_init(struct lpm_cluster *cluster)
 +{
 +      struct list_head *list;
 +
 +      if (!cluster)
 +              return;
 +
 +      hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +
 +      list_for_each(list, &cluster->child) {
 +              struct lpm_cluster *n;
 +
 +              n = list_entry(list, typeof(*n), list);
 +              cluster_timer_init(n);
 +      }
 +}
 +
 +static void clusttimer_cancel(void)
 +{
 +      int cpu = raw_smp_processor_id();
 +      struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
 +
 +      hrtimer_try_to_cancel(&cluster->histtimer);
 +      hrtimer_try_to_cancel(&cluster->parent->histtimer);
 +}
 +
 +static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
 +{
 +      struct lpm_cluster *cluster = container_of(h,
 +                              struct lpm_cluster, histtimer);
 +
 +      cluster->history.hinvalid = 1;
 +      return HRTIMER_NORESTART;
 +}
 +
 +static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
 +{
 +      uint64_t time_ns = time_us * NSEC_PER_USEC;
 +      ktime_t clust_ktime = ns_to_ktime(time_ns);
 +
 +      cluster->histtimer.function = clusttimer_fn;
 +      hrtimer_start(&cluster->histtimer, clust_ktime,
 +                              HRTIMER_MODE_REL_PINNED);
 +}
 +
 +static void msm_pm_set_timer(uint32_t modified_time_us)
 +{
 +      u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
 +      ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
 +
 +      lpm_hrtimer.function = lpm_hrtimer_cb;
 +      hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
 +}
 +
 +int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
 +{
 +      int lpm = mode;
 +      int rc = 0;
 +      struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
 +                      smp_processor_id())->lpm_dev;
 +
 +      if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
 +                      cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
 +              coresight_cti_ctx_restore();
 +
 +      switch (mode) {
 +      case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
 +      case MSM_SPM_MODE_POWER_COLLAPSE:
 +      case MSM_SPM_MODE_FASTPC:
 +              cpu_ops->tz_flag = MSM_SCM_L2_OFF;
 +              coresight_cti_ctx_save();
 +              break;
 +      case MSM_SPM_MODE_GDHS:
 +              cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
 +              coresight_cti_ctx_save();
 +              break;
 +      case MSM_SPM_MODE_CLOCK_GATING:
 +      case MSM_SPM_MODE_RETENTION:
 +      case MSM_SPM_MODE_DISABLED:
 +              cpu_ops->tz_flag = MSM_SCM_L2_ON;
 +              break;
 +      default:
 +              cpu_ops->tz_flag = MSM_SCM_L2_ON;
 +              lpm = MSM_SPM_MODE_DISABLED;
 +              break;
 +      }
 +      rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
 +
 +      if (rc)
 +              pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
 +                              __func__, lpm, rc);
 +
 +      return rc;
 +}
 +
 +int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
 +{
 +      struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
 +                      smp_processor_id())->lpm_dev;
 +
 +      switch (mode) {
 +      case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
 +      case MSM_SPM_MODE_POWER_COLLAPSE:
 +      case MSM_SPM_MODE_FASTPC:
 +              cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
 +              break;
 +      default:
 +              break;
 +      }
 +      return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
 +}
 +
 +
 +int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
 +{
 +      return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
 +}
 +
 +static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
 +              struct lpm_cluster_level *level)
 +{
 +      struct low_power_ops *ops;
 +
 +      if (use_psci)
 +              return 0;
 +
 +      ops = &cluster->lpm_dev[ndevice];
 +      if (ops && ops->set_mode)
 +              return ops->set_mode(ops, level->mode[ndevice],
 +                              level->notify_rpm);
 +      else
 +              return -EINVAL;
 +}
 +
 +static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
 +              struct lpm_cpu *cpu, int *idx_restrict,
 +              uint32_t *idx_restrict_time)
 +{
 +      int i, j, divisor;
 +      uint64_t max, avg, stddev;
 +      int64_t thresh = LLONG_MAX;
 +      struct lpm_history *history = &per_cpu(hist, dev->cpu);
 +      uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
 +
 +      if (!lpm_prediction)
 +              return 0;
 +
 +      /*
 +       * Samples are marked invalid when woken-up due to timer,
 +       * so donot predict.
 +       */
 +      if (history->hinvalid) {
 +              history->hinvalid = 0;
 +              history->htmr_wkup = 1;
 +              history->stime = 0;
 +              return 0;
 +      }
 +
 +      /*
 +       * Predict only when all the samples are collected.
 +       */
 +      if (history->nsamp < MAXSAMPLES) {
 +              history->stime = 0;
 +              return 0;
 +      }
 +
 +      /*
 +       * Check if the samples are not much deviated, if so use the
 +       * average of those as predicted sleep time. Else if any
 +       * specific mode has more premature exits return the index of
 +       * that mode.
 +       */
 +
 +again:
 +      max = avg = divisor = stddev = 0;
 +      for (i = 0; i < MAXSAMPLES; i++) {
 +              int64_t value = history->resi[i];
 +
 +              if (value <= thresh) {
 +                      avg += value;
 +                      divisor++;
 +                      if (value > max)
 +                              max = value;
 +              }
 +      }
 +      do_div(avg, divisor);
 +
 +      for (i = 0; i < MAXSAMPLES; i++) {
 +              int64_t value = history->resi[i];
 +
 +              if (value <= thresh) {
 +                      int64_t diff = value - avg;
 +
 +                      stddev += diff * diff;
 +              }
 +      }
 +      do_div(stddev, divisor);
 +      stddev = int_sqrt(stddev);
 +
 +      /*
 +       * If the deviation is less, return the average, else
 +       * ignore one maximum sample and retry
 +       */
 +      if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
 +                                      || stddev <= ref_stddev) {
 +              history->stime = ktime_to_us(ktime_get()) + avg;
 +              return avg;
 +      } else if (divisor  > (MAXSAMPLES - 1)) {
 +              thresh = max - 1;
 +              goto again;
 +      }
 +
 +      /*
 +       * Find the number of premature exits for each of the mode,
 +       * excluding clockgating mode, and they are more than fifty
 +       * percent restrict that and deeper modes.
 +       */
 +      if (history->htmr_wkup != 1) {
 +              for (j = 1; j < cpu->nlevels; j++) {
 +                      uint32_t failed = 0;
 +                      uint64_t total = 0;
 +
 +                      for (i = 0; i < MAXSAMPLES; i++) {
 +                              if ((history->mode[i] == j) &&
 +                                      (history->resi[i] < min_residency[j])) {
 +                                      failed++;
 +                                      total += history->resi[i];
 +                              }
 +                      }
 +                      if (failed > (MAXSAMPLES/2)) {
 +                              *idx_restrict = j;
 +                              do_div(total, failed);
 +                              *idx_restrict_time = total;
 +                              history->stime = ktime_to_us(ktime_get())
 +                                              + *idx_restrict_time;
 +                              break;
 +                      }
 +              }
 +      }
 +      return 0;
 +}
 +
 +static inline void invalidate_predict_history(struct cpuidle_device *dev)
 +{
 +      struct lpm_history *history = &per_cpu(hist, dev->cpu);
 +
 +      if (!lpm_prediction)
 +              return;
 +
 +      if (history->hinvalid) {
 +              history->hinvalid = 0;
 +              history->htmr_wkup = 1;
 +              history->stime = 0;
 +      }
 +}
 +
 +static void clear_predict_history(void)
 +{
 +      struct lpm_history *history;
 +      int i;
 +      unsigned int cpu;
 +
 +      if (!lpm_prediction)
 +              return;
 +
 +      for_each_possible_cpu(cpu) {
 +              history = &per_cpu(hist, cpu);
 +              for (i = 0; i < MAXSAMPLES; i++) {
 +                      history->resi[i]  = 0;
 +                      history->mode[i] = -1;
 +                      history->hptr = 0;
 +                      history->nsamp = 0;
 +                      history->stime = 0;
 +              }
 +      }
 +}
 +
 +static void update_history(struct cpuidle_device *dev, int idx);
 +
 +static int cpu_power_select(struct cpuidle_device *dev,
 +              struct lpm_cpu *cpu)
 +{
 +      int best_level = -1;
 +      uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
 +                                                      dev->cpu);
 +      s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
 +      uint32_t modified_time_us = 0;
 +      uint32_t next_event_us = 0;
 +      int i, idx_restrict;
 +      uint32_t lvl_latency_us = 0;
 +      uint64_t predicted = 0;
 +      uint32_t htime = 0, idx_restrict_time = 0;
 +      uint32_t next_wakeup_us = (uint32_t)sleep_us;
 +      uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
 +      uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
 +
 +      if (!cpu)
 +              return -EINVAL;
 +
 +      if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us  < 0)
 +              return 0;
 +
 +      idx_restrict = cpu->nlevels + 1;
 +
 +      next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
 +
 +      for (i = 0; i < cpu->nlevels; i++) {
 +              struct lpm_cpu_level *level = &cpu->levels[i];
 +              struct power_params *pwr_params = &level->pwr;
 +              enum msm_pm_sleep_mode mode = level->mode;
 +              bool allow;
 +
 +              allow = lpm_cpu_mode_allow(dev->cpu, i, true);
 +
 +              if (!allow)
 +                      continue;
 +
 +              lvl_latency_us = pwr_params->latency_us;
 +
 +              if (latency_us < lvl_latency_us)
 +                      break;
 +
 +              if (next_event_us) {
 +                      if (next_event_us < lvl_latency_us)
 +                              break;
 +
 +                      if (((next_event_us - lvl_latency_us) < sleep_us) ||
 +                                      (next_event_us < sleep_us))
 +                              next_wakeup_us = next_event_us - lvl_latency_us;
 +              }
 +
 +              if (!i && !cpu_isolated(dev->cpu)) {
 +                      /*
 +                       * If the next_wake_us itself is not sufficient for
 +                       * deeper low power modes than clock gating do not
 +                       * call prediction.
 +                       */
 +                      if (next_wakeup_us > max_residency[i]) {
 +                              predicted = lpm_cpuidle_predict(dev, cpu,
 +                                      &idx_restrict, &idx_restrict_time);
 +                              if (predicted && (predicted < min_residency[i]))
 +                                      predicted = min_residency[i];
 +                      } else
 +                              invalidate_predict_history(dev);
 +              }
 +
 +              if (i >= idx_restrict)
 +                      break;
 +
 +              best_level = i;
 +
 +              if (next_event_us && next_event_us < sleep_us &&
 +                      (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
 +                      modified_time_us
 +                              = next_event_us - lvl_latency_us;
 +              else
 +                      modified_time_us = 0;
 +
 +              if (predicted ? (predicted <= max_residency[i])
 +                      : (next_wakeup_us <= max_residency[i]))
 +                      break;
 +      }
 +
 +      if (modified_time_us)
 +              msm_pm_set_timer(modified_time_us);
 +
 +      /*
 +       * Start timer to avoid staying in shallower mode forever
 +       * incase of misprediciton
 +       */
 +      if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
 +                      && ((best_level >= 0)
 +                      && (best_level < (cpu->nlevels-1)))) {
 +              htime = predicted + tmr_add;
 +              if (htime == tmr_add)
 +                      htime = idx_restrict_time;
 +              else if (htime > max_residency[best_level])
 +                      htime = max_residency[best_level];
 +
 +              if ((next_wakeup_us > htime) &&
 +                      ((next_wakeup_us - htime) > max_residency[best_level]))
 +                      histtimer_start(htime);
 +      }
 +
 +      trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
 +
 +      trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
 +                      predicted, htime);
 +
 +      return best_level;
 +}
 +
 +static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
 +              struct cpumask *mask, bool from_idle, uint32_t *pred_time)
 +{
 +      int cpu;
 +      int next_cpu = raw_smp_processor_id();
 +      ktime_t next_event;
 +      struct cpumask online_cpus_in_cluster;
 +      struct lpm_history *history;
 +      int64_t prediction = LONG_MAX;
 +
 +      next_event.tv64 = KTIME_MAX;
 +      if (!suspend_wake_time)
 +              suspend_wake_time =  msm_pm_sleep_time_override;
 +      if (!from_idle) {
 +              if (mask)
 +                      cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
 +              if (!suspend_wake_time)
 +                      return ~0ULL;
 +              else
 +                      return USEC_PER_MSEC * suspend_wake_time;
 +      }
 +
 +      cpumask_and(&online_cpus_in_cluster,
 +                      &cluster->num_children_in_sync, cpu_online_mask);
 +
 +      for_each_cpu(cpu, &online_cpus_in_cluster) {
 +              ktime_t *next_event_c;
 +
 +              next_event_c = get_next_event_cpu(cpu);
 +              if (next_event_c->tv64 < next_event.tv64) {
 +                      next_event.tv64 = next_event_c->tv64;
 +                      next_cpu = cpu;
 +              }
 +
 +              if (from_idle && lpm_prediction) {
 +                      history = &per_cpu(hist, cpu);
 +                      if (history->stime && (history->stime < prediction))
 +                              prediction = history->stime;
 +              }
 +      }
 +
 +      if (mask)
 +              cpumask_copy(mask, cpumask_of(next_cpu));
 +
 +      if (from_idle && lpm_prediction) {
 +              if (prediction > ktime_to_us(ktime_get()))
 +                      *pred_time = prediction - ktime_to_us(ktime_get());
 +      }
 +
 +      if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
 +              return ktime_to_us(ktime_sub(next_event, ktime_get()));
 +      else
 +              return 0;
 +}
 +
 +static int cluster_predict(struct lpm_cluster *cluster,
 +                              uint32_t *pred_us)
 +{
 +      int i, j;
 +      int ret = 0;
 +      struct cluster_history *history = &cluster->history;
 +      int64_t cur_time = ktime_to_us(ktime_get());
 +
 +      if (!lpm_prediction)
 +              return 0;
 +
 +      if (history->hinvalid) {
 +              history->hinvalid = 0;
 +              history->htmr_wkup = 1;
 +              history->flag = 0;
 +              return ret;
 +      }
 +
 +      if (history->nsamp == MAXSAMPLES) {
 +              for (i = 0; i < MAXSAMPLES; i++) {
 +                      if ((cur_time - history->stime[i])
 +                                      > CLUST_SMPL_INVLD_TIME)
 +                              history->nsamp--;
 +              }
 +      }
 +
 +      if (history->nsamp < MAXSAMPLES) {
 +              history->flag = 0;
 +              return ret;
 +      }
 +
 +      if (history->flag == 2)
 +              history->flag = 0;
 +
 +      if (history->htmr_wkup != 1) {
 +              uint64_t total = 0;
 +
 +              if (history->flag == 1) {
 +                      for (i = 0; i < MAXSAMPLES; i++)
 +                              total += history->resi[i];
 +                      do_div(total, MAXSAMPLES);
 +                      *pred_us = total;
 +                      return 2;
 +              }
 +
 +              for (j = 1; j < cluster->nlevels; j++) {
 +                      uint32_t failed = 0;
 +
 +                      total = 0;
 +                      for (i = 0; i < MAXSAMPLES; i++) {
 +                              if ((history->mode[i] == j) && (history->resi[i]
 +                              < cluster->levels[j].pwr.min_residency)) {
 +                                      failed++;
 +                                      total += history->resi[i];
 +                              }
 +                      }
 +
 +                      if (failed > (MAXSAMPLES-2)) {
 +                              do_div(total, failed);
 +                              *pred_us = total;
 +                              history->flag = 1;
 +                              return 1;
 +                      }
 +              }
 +      }
 +
 +      return ret;
 +}
 +
 +static void update_cluster_history_time(struct cluster_history *history,
 +                                              int idx, uint64_t start)
 +{
 +      history->entry_idx = idx;
 +      history->entry_time = start;
 +}
 +
 +static void update_cluster_history(struct cluster_history *history, int idx)
 +{
 +      uint32_t tmr = 0;
 +      uint32_t residency = 0;
 +      struct lpm_cluster *cluster =
 +                      container_of(history, struct lpm_cluster, history);
 +
 +      if (!lpm_prediction)
 +              return;
 +
 +      if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
 +              residency = ktime_to_us(ktime_get()) - history->entry_time;
 +              history->stime[history->hptr] = history->entry_time;
 +      } else
 +              return;
 +
 +      if (history->htmr_wkup) {
 +              if (!history->hptr)
 +                      history->hptr = MAXSAMPLES-1;
 +              else
 +                      history->hptr--;
 +
 +              history->resi[history->hptr] += residency;
 +
 +              history->htmr_wkup = 0;
 +              tmr = 1;
 +      } else {
 +              history->resi[history->hptr] = residency;
 +      }
 +
 +      history->mode[history->hptr] = idx;
 +
 +      history->entry_idx = INT_MIN;
 +      history->entry_time = 0;
 +
 +      if (history->nsamp < MAXSAMPLES)
 +              history->nsamp++;
 +
 +      trace_cluster_pred_hist(cluster->cluster_name,
 +              history->mode[history->hptr], history->resi[history->hptr],
 +              history->hptr, tmr);
 +
 +      (history->hptr)++;
 +
 +      if (history->hptr >= MAXSAMPLES)
 +              history->hptr = 0;
 +}
 +
 +static void clear_cl_history_each(struct cluster_history *history)
 +{
 +      int i;
 +
 +      for (i = 0; i < MAXSAMPLES; i++) {
 +              history->resi[i]  = 0;
 +              history->mode[i] = -1;
 +              history->stime[i] = 0;
 +      }
 +      history->hptr = 0;
 +      history->nsamp = 0;
 +      history->flag = 0;
 +      history->hinvalid = 0;
 +      history->htmr_wkup = 0;
 +}
 +
 +static void clear_cl_predict_history(void)
 +{
 +      struct lpm_cluster *cluster = lpm_root_node;
 +      struct list_head *list;
 +
 +      if (!lpm_prediction)
 +              return;
 +
 +      clear_cl_history_each(&cluster->history);
 +
 +      list_for_each(list, &cluster->child) {
 +              struct lpm_cluster *n;
 +
 +              n = list_entry(list, typeof(*n), list);
 +              clear_cl_history_each(&n->history);
 +      }
 +}
 +
 +static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
 +                                                      int *ispred)
 +{
 +      int best_level = -1;
 +      int i;
 +      struct cpumask mask;
 +      uint32_t latency_us = ~0U;
 +      uint32_t sleep_us;
 +      uint32_t cpupred_us = 0, pred_us = 0;
 +      int pred_mode = 0, predicted = 0;
 +
 +      if (!cluster)
 +              return -EINVAL;
 +
 +      sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
 +                                              from_idle, &cpupred_us);
 +
 +      if (from_idle) {
 +              pred_mode = cluster_predict(cluster, &pred_us);
 +
 +              if (cpupred_us && pred_mode && (cpupred_us < pred_us))
 +                      pred_us = cpupred_us;
 +
 +              if (pred_us && pred_mode && (pred_us < sleep_us))
 +                      predicted = 1;
 +
 +              if (predicted && (pred_us == cpupred_us))
 +                      predicted = 2;
 +      }
 +
 +      if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
 +              latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
 +                                                      &mask);
 +
 +      /*
 +       * If atleast one of the core in the cluster is online, the cluster
 +       * low power modes should be determined by the idle characteristics
 +       * even if the last core enters the low power mode as a part of
 +       * hotplug.
 +       */
 +
 +      if (!from_idle && num_online_cpus() > 1 &&
 +              cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
 +              from_idle = true;
 +
 +      for (i = 0; i < cluster->nlevels; i++) {
 +              struct lpm_cluster_level *level = &cluster->levels[i];
 +              struct power_params *pwr_params = &level->pwr;
 +
 +              if (!lpm_cluster_mode_allow(cluster, i, from_idle))
 +                      continue;
 +
 +              if (level->last_core_only &&
 +                      cpumask_weight(cpu_online_mask) > 1)
 +                      continue;
 +
 +              if (!cpumask_equal(&cluster->num_children_in_sync,
 +                                      &level->num_cpu_votes))
 +                      continue;
 +
 +              if (from_idle && latency_us < pwr_params->latency_us)
 +                      break;
 +
 +              if (sleep_us < pwr_params->time_overhead_us)
 +                      break;
 +
 +              if (suspend_in_progress && from_idle && level->notify_rpm)
 +                      continue;
 +
 +              if (level->notify_rpm && msm_rpm_waiting_for_ack())
 +                      continue;
 +
 +              best_level = i;
 +
 +              if (from_idle &&
 +                      (predicted ? (pred_us <= pwr_params->max_residency)
 +                      : (sleep_us <= pwr_params->max_residency)))
 +                      break;
 +      }
 +
 +      if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
 +              cluster->history.flag = 2;
 +
 +      *ispred = predicted;
 +
 +      trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
 +                                              latency_us, predicted, pred_us);
 +
 +      return best_level;
 +}
 +
 +static void cluster_notify(struct lpm_cluster *cluster,
 +              struct lpm_cluster_level *level, bool enter)
 +{
 +      if (level->is_reset && enter)
 +              cpu_cluster_pm_enter(cluster->aff_level);
 +      else if (level->is_reset && !enter)
 +              cpu_cluster_pm_exit(cluster->aff_level);
 +}
 +
 +static int cluster_configure(struct lpm_cluster *cluster, int idx,
 +              bool from_idle, int predicted)
 +{
 +      struct lpm_cluster_level *level = &cluster->levels[idx];
 +      struct cpumask online_cpus;
 +      int ret, i;
 +
 +      cpumask_and(&online_cpus, &cluster->num_children_in_sync,
 +                                      cpu_online_mask);
 +
 +      if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
 +                      || is_IPI_pending(&online_cpus)) {
 +              return -EPERM;
 +      }
 +
 +      if (idx != cluster->default_level) {
 +              update_debug_pc_event(CLUSTER_ENTER, idx,
 +                      cluster->num_children_in_sync.bits[0],
 +                      cluster->child_cpus.bits[0], from_idle);
 +              trace_cluster_enter(cluster->cluster_name, idx,
 +                      cluster->num_children_in_sync.bits[0],
 +                      cluster->child_cpus.bits[0], from_idle);
 +              lpm_stats_cluster_enter(cluster->stats, idx);
 +
 +              if (from_idle && lpm_prediction)
 +                      update_cluster_history_time(&cluster->history, idx,
 +                                              ktime_to_us(ktime_get()));
 +      }
 +
 +      for (i = 0; i < cluster->ndevices; i++) {
 +              ret = set_device_mode(cluster, i, level);
 +              if (ret)
 +                      goto failed_set_mode;
 +      }
 +
 +      if (level->notify_rpm) {
 +              struct cpumask nextcpu, *cpumask;
 +              uint64_t us;
 +              uint32_t pred_us;
 +              uint64_t sec;
 +              uint64_t nsec;
 +
 +              us = get_cluster_sleep_time(cluster, &nextcpu,
 +                                              from_idle, &pred_us);
 +              cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
 +
 +              ret = msm_rpm_enter_sleep(0, cpumask);
 +              if (ret) {
 +                      pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret);
 +                      goto failed_set_mode;
 +              }
 +
 +              clear_predict_history();
 +              clear_cl_predict_history();
 +
 +              us = us + 1;
 +              sec = us;
 +              do_div(sec, USEC_PER_SEC);
 +              nsec = us - sec * USEC_PER_SEC;
 +
 +              sec = sec * SCLK_HZ;
 +              if (nsec > 0) {
 +                      nsec = nsec * NSEC_PER_USEC;
 +                      do_div(nsec, NSEC_PER_SEC/SCLK_HZ);
 +              }
 +              us = sec + nsec;
 +              msm_mpm_enter_sleep(us, from_idle, cpumask);
 +      }
 +
 +      /* Notify cluster enter event after successfully config completion */
 +      cluster_notify(cluster, level, true);
 +
 +      sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
 +
 +      cluster->last_level = idx;
 +
 +      if (predicted && (idx < (cluster->nlevels - 1))) {
 +              struct power_params *pwr_params = &cluster->levels[idx].pwr;
 +
 +              tick_broadcast_exit();
 +              clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
 +              tick_broadcast_enter();
 +      }
 +
 +      return 0;
 +
 +failed_set_mode:
 +
 +      for (i = 0; i < cluster->ndevices; i++) {
 +              int rc = 0;
 +              level = &cluster->levels[cluster->default_level];
 +              rc = set_device_mode(cluster, i, level);
 +              BUG_ON(rc);
 +      }
 +      return ret;
 +}
 +
 +static void cluster_prepare(struct lpm_cluster *cluster,
 +              const struct cpumask *cpu, int child_idx, bool from_idle,
 +              int64_t start_time)
 +{
 +      int i;
 +      int predicted = 0;
 +
 +      if (!cluster)
 +              return;
 +
 +      if (cluster->min_child_level > child_idx)
 +              return;
 +
 +      spin_lock(&cluster->sync_lock);
 +      cpumask_or(&cluster->num_children_in_sync, cpu,
 +                      &cluster->num_children_in_sync);
 +
 +      for (i = 0; i < cluster->nlevels; i++) {
 +              struct lpm_cluster_level *lvl = &cluster->levels[i];
 +
 +              if (child_idx >= lvl->min_child_level)
 +                      cpumask_or(&lvl->num_cpu_votes, cpu,
 +                                      &lvl->num_cpu_votes);
 +      }
 +
 +      /*
 +       * cluster_select() does not make any configuration changes. So its ok
 +       * to release the lock here. If a core wakes up for a rude request,
 +       * it need not wait for another to finish its cluster selection and
 +       * configuration process
 +       */
 +
 +      if (!cpumask_equal(&cluster->num_children_in_sync,
 +                              &cluster->child_cpus))
 +              goto failed;
 +
 +      i = cluster_select(cluster, from_idle, &predicted);
 +
 +      if (((i < 0) || (i == cluster->default_level))
 +                              && predicted && from_idle) {
 +              update_cluster_history_time(&cluster->history,
 +                                      -1, ktime_to_us(ktime_get()));
 +
 +              if (i < 0) {
 +                      struct power_params *pwr_params =
 +                                              &cluster->levels[0].pwr;
 +
 +                      tick_broadcast_exit();
 +                      clusttimer_start(cluster,
 +                                      pwr_params->max_residency + tmr_add);
 +                      tick_broadcast_enter();
 +              }
 +      }
 +
 +      if (i < 0)
 +              goto failed;
 +
 +      if (cluster_configure(cluster, i, from_idle, predicted))
 +              goto failed;
 +
 +      cluster->stats->sleep_time = start_time;
 +      cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
 +                      from_idle, start_time);
 +
 +      spin_unlock(&cluster->sync_lock);
 +      return;
 +failed:
 +      spin_unlock(&cluster->sync_lock);
 +      cluster->stats->sleep_time = 0;
 +      return;
 +}
 +
 +static void cluster_unprepare(struct lpm_cluster *cluster,
 +              const struct cpumask *cpu, int child_idx, bool from_idle,
 +              int64_t end_time)
 +{
 +      struct lpm_cluster_level *level;
 +      bool first_cpu;
 +      int last_level, i, ret;
 +
 +      if (!cluster)
 +              return;
 +
 +      if (cluster->min_child_level > child_idx)
 +              return;
 +
 +      spin_lock(&cluster->sync_lock);
 +      last_level = cluster->default_level;
 +      first_cpu = cpumask_equal(&cluster->num_children_in_sync,
 +                              &cluster->child_cpus);
 +      cpumask_andnot(&cluster->num_children_in_sync,
 +                      &cluster->num_children_in_sync, cpu);
 +
 +      for (i = 0; i < cluster->nlevels; i++) {
 +              struct lpm_cluster_level *lvl = &cluster->levels[i];
 +
 +              if (child_idx >= lvl->min_child_level)
 +                      cpumask_andnot(&lvl->num_cpu_votes,
 +                                      &lvl->num_cpu_votes, cpu);
 +      }
 +
 +      if (from_idle && first_cpu &&
 +              (cluster->last_level == cluster->default_level))
 +              update_cluster_history(&cluster->history, cluster->last_level);
 +
 +      if (!first_cpu || cluster->last_level == cluster->default_level)
 +              goto unlock_return;
 +
 +      if (cluster->stats->sleep_time)
 +              cluster->stats->sleep_time = end_time -
 +                      cluster->stats->sleep_time;
 +      lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
 +
 +      level = &cluster->levels[cluster->last_level];
 +      if (level->notify_rpm) {
 +              msm_rpm_exit_sleep();
 +
 +              /* If RPM bumps up CX to turbo, unvote CX turbo vote
 +               * during exit of rpm assisted power collapse to
 +               * reduce the power impact
 +               */
 +
 +              lpm_wa_cx_unvote_send();
 +              msm_mpm_exit_sleep(from_idle);
 +
 +              if (!from_idle)
 +                      suspend_wake_time = 0;
 +      }
 +
 +      update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
 +                      cluster->num_children_in_sync.bits[0],
 +                      cluster->child_cpus.bits[0], from_idle);
 +      trace_cluster_exit(cluster->cluster_name, cluster->last_level,
 +                      cluster->num_children_in_sync.bits[0],
 +                      cluster->child_cpus.bits[0], from_idle);
 +
 +      last_level = cluster->last_level;
 +      cluster->last_level = cluster->default_level;
 +
 +      for (i = 0; i < cluster->ndevices; i++) {
 +              level = &cluster->levels[cluster->default_level];
 +              ret = set_device_mode(cluster, i, level);
 +
 +              BUG_ON(ret);
 +
 +      }
 +      sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
 +
 +      cluster_notify(cluster, &cluster->levels[last_level], false);
 +
 +      if (from_idle)
 +              update_cluster_history(&cluster->history, last_level);
 +
 +      cluster_unprepare(cluster->parent, &cluster->child_cpus,
 +                      last_level, from_idle, end_time);
 +unlock_return:
 +      spin_unlock(&cluster->sync_lock);
 +}
 +
 +static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
 +                              bool from_idle)
 +{
 +      struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
 +      bool jtag_save_restore =
 +                      cluster->cpu->levels[cpu_index].jtag_save_restore;
 +
 +      /* Use broadcast timer for aggregating sleep mode within a cluster.
 +       * A broadcast timer could be used in the following scenarios
 +       * 1) The architected timer HW gets reset during certain low power
 +       * modes and the core relies on a external(broadcast) timer to wake up
 +       * from sleep. This information is passed through device tree.
 +       * 2) The CPU low power mode could trigger a system low power mode.
 +       * The low power module relies on Broadcast timer to aggregate the
 +       * next wakeup within a cluster, in which case, CPU switches over to
 +       * use broadcast timer.
 +       */
 +      if (from_idle && (cpu_level->use_bc_timer ||
 +                      (cpu_index >= cluster->min_child_level)))
 +              tick_broadcast_enter();
 +
 +      if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
 +              || (cpu_level->mode ==
 +                      MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
 +                      || (cpu_level->is_reset)))
 +              cpu_pm_enter();
 +
 +      /*
 +       * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
 +       */
 +      if (jtag_save_restore)
 +              msm_jtag_save_state();
 +}
 +
 +static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
 +                              bool from_idle)
 +{
 +      struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
 +      bool jtag_save_restore =
 +                      cluster->cpu->levels[cpu_index].jtag_save_restore;
 +
 +      if (from_idle && (cpu_level->use_bc_timer ||
 +                      (cpu_index >= cluster->min_child_level)))
 +              tick_broadcast_exit();
 +
 +      if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
 +              || (cpu_level->mode ==
 +                      MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
 +              || cpu_level->is_reset))
 +              cpu_pm_exit();
 +
 +      /*
 +       * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
 +       */
 +      if (jtag_save_restore)
 +              msm_jtag_restore_state();
 +}
 +
 +int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
 +{
 +      int state_id = 0;
 +
 +      if (!cluster)
 +              return 0;
 +
 +      spin_lock(&cluster->sync_lock);
 +
 +      if (!cpumask_equal(&cluster->num_children_in_sync,
 +                              &cluster->child_cpus))
 +              goto unlock_and_return;
 +
 +      state_id |= get_cluster_id(cluster->parent, aff_lvl);
 +
 +      if (cluster->last_level != cluster->default_level) {
 +              struct lpm_cluster_level *level
 +                      = &cluster->levels[cluster->last_level];
 +
 +              state_id |= (level->psci_id & cluster->psci_mode_mask)
 +                                      << cluster->psci_mode_shift;
 +              (*aff_lvl)++;
 +      }
 +unlock_and_return:
 +      spin_unlock(&cluster->sync_lock);
 +      return state_id;
 +}
 +
 +#if !defined(CONFIG_CPU_V7)
 +bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
 +{
 +      /*
 +       * idx = 0 is the default LPM state
 +       */
 +      if (!idx) {
 +              stop_critical_timings();
 +              wfi();
 +              start_critical_timings();
 +              return 1;
 +      } else {
 +              int affinity_level = 0;
 +              int state_id = get_cluster_id(cluster, &affinity_level);
 +              int power_state =
 +                      PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
 +              bool success = false;
 +
 +              if (cluster->cpu->levels[idx].hyp_psci) {
 +                      stop_critical_timings();
 +                      __invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
 +                      start_critical_timings();
 +                      return 1;
 +              }
 +
 +              affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
 +              state_id |= (power_state | affinity_level
 +                      | cluster->cpu->levels[idx].psci_id);
 +
 +              update_debug_pc_event(CPU_ENTER, state_id,
 +                                              0xdeaffeed, 0xdeaffeed, true);
 +              stop_critical_timings();
 +              success = !arm_cpuidle_suspend(state_id);
 +              start_critical_timings();
 +              update_debug_pc_event(CPU_EXIT, state_id,
 +                                              success, 0xdeaffeed, true);
 +              return success;
 +      }
 +}
 +#elif defined(CONFIG_ARM_PSCI)
 +bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
 +{
 +      if (!idx) {
 +              stop_critical_timings();
 +              wfi();
 +              start_critical_timings();
 +              return 1;
 +      } else {
 +              int affinity_level = 0;
 +              int state_id = get_cluster_id(cluster, &affinity_level);
 +              int power_state =
 +                      PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
 +              bool success = false;
 +
 +              affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
 +              state_id |= (power_state | affinity_level
 +                      | cluster->cpu->levels[idx].psci_id);
 +
 +              update_debug_pc_event(CPU_ENTER, state_id,
 +                                              0xdeaffeed, 0xdeaffeed, true);
 +              stop_critical_timings();
 +              success = !arm_cpuidle_suspend(state_id);
 +              start_critical_timings();
 +              update_debug_pc_event(CPU_EXIT, state_id,
 +                                              success, 0xdeaffeed, true);
 +              return success;
 +      }
 +}
 +#else
 +bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
 +{
 +      WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
 +      return false;
 +}
 +#endif
 +
 +static int lpm_cpuidle_select(struct cpuidle_driver *drv,
 +              struct cpuidle_device *dev)
 +{
 +      struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
 +      int idx;
 +
 +      if (!cluster)
 +              return 0;
 +
 +      idx = cpu_power_select(dev, cluster->cpu);
 +
 +      if (idx < 0)
 +              return -EPERM;
 +
 +      return idx;
 +}
 +
 +static void update_history(struct cpuidle_device *dev, int idx)
 +{
 +      struct lpm_history *history = &per_cpu(hist, dev->cpu);
 +      uint32_t tmr = 0;
 +
 +      if (!lpm_prediction)
 +              return;
 +
 +      if (history->htmr_wkup) {
 +              if (!history->hptr)
 +                      history->hptr = MAXSAMPLES-1;
 +              else
 +                      history->hptr--;
 +
 +              history->resi[history->hptr] += dev->last_residency;
 +              history->htmr_wkup = 0;
 +              tmr = 1;
 +      } else
 +              history->resi[history->hptr] = dev->last_residency;
 +
 +      history->mode[history->hptr] = idx;
 +
 +      trace_cpu_pred_hist(history->mode[history->hptr],
 +              history->resi[history->hptr], history->hptr, tmr);
 +
 +      if (history->nsamp < MAXSAMPLES)
 +              history->nsamp++;
 +
 +      (history->hptr)++;
 +      if (history->hptr >= MAXSAMPLES)
 +              history->hptr = 0;
 +}
 +
 +static int lpm_cpuidle_enter(struct cpuidle_device *dev,
 +              struct cpuidle_driver *drv, int idx)
 +{
 +      struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
 +      bool success = false;
 +      const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
 +      int64_t start_time = ktime_to_ns(ktime_get()), end_time;
 +      struct power_params *pwr_params;
 +
 +      if (idx < 0)
 +              return -EINVAL;
 +
 +      pwr_params = &cluster->cpu->levels[idx].pwr;
 +      sched_set_cpu_cstate(smp_processor_id(), idx + 1,
 +              pwr_params->energy_overhead, pwr_params->latency_us);
 +
 +      pwr_params = &cluster->cpu->levels[idx].pwr;
 +
 +      cpu_prepare(cluster, idx, true);
 +      cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
 +
 +      trace_cpu_idle_enter(idx);
 +      lpm_stats_cpu_enter(idx, start_time);
 +
 +      if (need_resched())
 +              goto exit;
 +
 +      BUG_ON(!use_psci);
 +      success = psci_enter_sleep(cluster, idx, true);
 +
 +exit:
 +      end_time = ktime_to_ns(ktime_get());
 +      lpm_stats_cpu_exit(idx, end_time, success);
 +
 +      cluster_unprepare(cluster, cpumask, idx, true, end_time);
 +      cpu_unprepare(cluster, idx, true);
 +      sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
 +      end_time = ktime_to_ns(ktime_get()) - start_time;
 +      do_div(end_time, 1000);
 +      dev->last_residency = end_time;
 +      update_history(dev, idx);
 +      trace_cpu_idle_exit(idx, success);
 +      local_irq_enable();
 +      if (lpm_prediction) {
 +              histtimer_cancel();
 +              clusttimer_cancel();
 +      }
 +      return idx;
 +}
 +
 +#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
 +static int cpuidle_register_cpu(struct cpuidle_driver *drv,
 +              struct cpumask *mask)
 +{
 +      struct cpuidle_device *device;
 +      int cpu, ret;
 +
 +
 +      if (!mask || !drv)
 +              return -EINVAL;
 +
 +      drv->cpumask = mask;
 +      ret = cpuidle_register_driver(drv);
 +      if (ret) {
 +              pr_err("Failed to register cpuidle driver %d\n", ret);
 +              goto failed_driver_register;
 +      }
 +
 +      for_each_cpu(cpu, mask) {
 +              device = &per_cpu(cpuidle_dev, cpu);
 +              device->cpu = cpu;
 +
 +              ret = cpuidle_register_device(device);
 +              if (ret) {
 +                      pr_err("Failed to register cpuidle driver for cpu:%u\n",
 +                                      cpu);
 +                      goto failed_driver_register;
 +              }
 +      }
 +      return ret;
 +failed_driver_register:
 +      for_each_cpu(cpu, mask)
 +              cpuidle_unregister_driver(drv);
 +      return ret;
 +}
 +#else
 +static int cpuidle_register_cpu(struct cpuidle_driver *drv,
 +              struct  cpumask *mask)
 +{
 +      return cpuidle_register(drv, NULL);
 +}
 +#endif
 +
 +static struct cpuidle_governor lpm_governor = {
 +      .name =         "qcom",
 +      .rating =       30,
 +      .select =       lpm_cpuidle_select,
 +      .owner =        THIS_MODULE,
 +};
 +
 +static int cluster_cpuidle_register(struct lpm_cluster *cl)
 +{
 +      int i = 0, ret = 0;
 +      unsigned cpu;
 +      struct lpm_cluster *p = NULL;
 +
 +      if (!cl->cpu) {
 +              struct lpm_cluster *n;
 +
 +              list_for_each_entry(n, &cl->child, list) {
 +                      ret = cluster_cpuidle_register(n);
 +                      if (ret)
 +                              break;
 +              }
 +              return ret;
 +      }
 +
 +      cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
 +      if (!cl->drv)
 +              return -ENOMEM;
 +
 +      cl->drv->name = "msm_idle";
 +
 +      for (i = 0; i < cl->cpu->nlevels; i++) {
 +              struct cpuidle_state *st = &cl->drv->states[i];
 +              struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
 +              snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
 +              snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
 +                      cpu_level->name);
 +              st->flags = 0;
 +              st->exit_latency = cpu_level->pwr.latency_us;
 +              st->power_usage = cpu_level->pwr.ss_power;
 +              st->target_residency = 0;
 +              st->enter = lpm_cpuidle_enter;
 +      }
 +
 +      cl->drv->state_count = cl->cpu->nlevels;
 +      cl->drv->safe_state_index = 0;
 +      for_each_cpu(cpu, &cl->child_cpus)
 +              per_cpu(cpu_cluster, cpu) = cl;
 +
 +      for_each_possible_cpu(cpu) {
 +              if (cpu_online(cpu))
 +                      continue;
 +              p = per_cpu(cpu_cluster, cpu);
 +              while (p) {
 +                      int j;
 +                      spin_lock(&p->sync_lock);
 +                      cpumask_set_cpu(cpu, &p->num_children_in_sync);
 +                      for (j = 0; j < p->nlevels; j++)
 +                              cpumask_copy(&p->levels[j].num_cpu_votes,
 +                                              &p->num_children_in_sync);
 +                      spin_unlock(&p->sync_lock);
 +                      p = p->parent;
 +              }
 +      }
 +      ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
 +
 +      if (ret) {
 +              kfree(cl->drv);
 +              return -ENOMEM;
 +      }
 +      return 0;
 +}
 +
 +/**
 + * init_lpm - initializes the governor
 + */
 +static int __init init_lpm(void)
 +{
 +      return cpuidle_register_governor(&lpm_governor);
 +}
 +
 +postcore_initcall(init_lpm);
 +
 +static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
 +              struct lpm_cluster *parent)
 +{
 +      const char **level_name;
 +      int i;
 +
 +      level_name = kzalloc(cpu->nlevels * sizeof(*level_name), GFP_KERNEL);
 +
 +      if (!level_name)
 +              return;
 +
 +      for (i = 0; i < cpu->nlevels; i++)
 +              level_name[i] = cpu->levels[i].name;
 +
 +      lpm_stats_config_level("cpu", level_name, cpu->nlevels,
 +                      parent->stats, &parent->child_cpus);
 +
 +      kfree(level_name);
 +}
 +
 +static void register_cluster_lpm_stats(struct lpm_cluster *cl,
 +              struct lpm_cluster *parent)
 +{
 +      const char **level_name;
 +      int i;
 +      struct lpm_cluster *child;
 +
 +      if (!cl)
 +              return;
 +
 +      level_name = kzalloc(cl->nlevels * sizeof(*level_name), GFP_KERNEL);
 +
 +      if (!level_name)
 +              return;
 +
 +      for (i = 0; i < cl->nlevels; i++)
 +              level_name[i] = cl->levels[i].level_name;
 +
 +      cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
 +                      cl->nlevels, parent ? parent->stats : NULL, NULL);
 +
 +      kfree(level_name);
 +
 +      if (cl->cpu) {
 +              register_cpu_lpm_stats(cl->cpu, cl);
 +              return;
 +      }
 +
 +      list_for_each_entry(child, &cl->child, list)
 +              register_cluster_lpm_stats(child, cl);
 +}
 +
 +static int lpm_suspend_prepare(void)
 +{
 +      suspend_in_progress = true;
 +      msm_mpm_suspend_prepare();
 +      lpm_stats_suspend_enter();
 +
 +      return 0;
 +}
 +
 +static void lpm_suspend_wake(void)
 +{
 +      suspend_in_progress = false;
 +      msm_mpm_suspend_wake();
 +      lpm_stats_suspend_exit();
 +}
 +
 +static int lpm_suspend_enter(suspend_state_t state)
 +{
 +      int cpu = raw_smp_processor_id();
 +      struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
 +      struct lpm_cpu *lpm_cpu = cluster->cpu;
 +      const struct cpumask *cpumask = get_cpu_mask(cpu);
 +      int idx;
 +
 +      for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
 +
 +              if (lpm_cpu_mode_allow(cpu, idx, false))
 +                      break;
 +      }
 +      if (idx < 0) {
 +              pr_err("Failed suspend\n");
 +              return 0;
 +      }
 +      cpu_prepare(cluster, idx, false);
 +      cluster_prepare(cluster, cpumask, idx, false, 0);
 +      if (idx > 0)
 +              update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
 +                                      0xdeaffeed, false);
 +
 +      /*
 +       * Print the clocks which are enabled during system suspend
 +       * This debug information is useful to know which are the
 +       * clocks that are enabled and preventing the system level
 +       * LPMs(XO and Vmin).
 +       */
 +      clock_debug_print_enabled();
 +
 +      BUG_ON(!use_psci);
 +      psci_enter_sleep(cluster, idx, true);
 +
 +      if (idx > 0)
 +              update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
 +                                      false);
 +
 +      cluster_unprepare(cluster, cpumask, idx, false, 0);
 +      cpu_unprepare(cluster, idx, false);
 +      return 0;
 +}
 +
 +static const struct platform_suspend_ops lpm_suspend_ops = {
 +      .enter = lpm_suspend_enter,
 +      .valid = suspend_valid_only_mem,
 +      .prepare_late = lpm_suspend_prepare,
 +      .wake = lpm_suspend_wake,
 +};
 +
 +static int lpm_probe(struct platform_device *pdev)
 +{
 +      int ret;
 +      int size;
 +      struct kobject *module_kobj = NULL;
 +      struct md_region md_entry;
 +
 +      get_online_cpus();
 +      lpm_root_node = lpm_of_parse_cluster(pdev);
 +
 +      if (IS_ERR_OR_NULL(lpm_root_node)) {
 +              pr_err("%s(): Failed to probe low power modes\n", __func__);
 +              put_online_cpus();
 +              return PTR_ERR(lpm_root_node);
 +      }
 +
 +      if (print_parsed_dt)
 +              cluster_dt_walkthrough(lpm_root_node);
 +
 +      /*
 +       * Register hotplug notifier before broadcast time to ensure there
 +       * to prevent race where a broadcast timer might not be setup on for a
 +       * core.  BUG in existing code but no known issues possibly because of
 +       * how late lpm_levels gets initialized.
 +       */
 +      suspend_set_ops(&lpm_suspend_ops);
 +      hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +      hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +      cluster_timer_init(lpm_root_node);
 +
 +      ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
 +      if (ret) {
 +              pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
 +                      __func__, ret);
 +              put_online_cpus();
 +              return ret;
 +      }
 +
 +      size = num_dbg_elements * sizeof(struct lpm_debug);
 +      lpm_debug = dma_alloc_coherent(&pdev->dev, size,
 +                      &lpm_debug_phys, GFP_KERNEL);
 +      register_cluster_lpm_stats(lpm_root_node, NULL);
 +
 +      ret = cluster_cpuidle_register(lpm_root_node);
 +      put_online_cpus();
 +      if (ret) {
 +              pr_err("%s()Failed to register with cpuidle framework\n",
 +                              __func__);
 +              goto failed;
 +      }
 +      register_hotcpu_notifier(&lpm_cpu_nblk);
 +      module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
 +      if (!module_kobj) {
 +              pr_err("%s: cannot find kobject for module %s\n",
 +                      __func__, KBUILD_MODNAME);
 +              ret = -ENOENT;
 +              goto failed;
 +      }
 +
 +      ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
 +      if (ret) {
 +              pr_err("%s(): Failed to create cluster level nodes\n",
 +                              __func__);
 +              goto failed;
 +      }
 +
 +      /* Add lpm_debug to Minidump*/
 +      strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
 +      md_entry.virt_addr = (uintptr_t)lpm_debug;
 +      md_entry.phys_addr = lpm_debug_phys;
 +      md_entry.size = size;
 +      if (msm_minidump_add_region(&md_entry))
 +              pr_info("Failed to add lpm_debug in Minidump\n");
 +
 +      return 0;
 +failed:
 +      free_cluster_node(lpm_root_node);
 +      lpm_root_node = NULL;
 +      return ret;
 +}
 +
 +static struct of_device_id lpm_mtch_tbl[] = {
 +      {.compatible = "qcom,lpm-levels"},
 +      {},
 +};
 +
 +static struct platform_driver lpm_driver = {
 +      .probe = lpm_probe,
 +      .driver = {
 +              .name = "lpm-levels",
 +              .owner = THIS_MODULE,
 +              .of_match_table = lpm_mtch_tbl,
 +      },
 +};
 +
 +static int __init lpm_levels_module_init(void)
 +{
 +      int rc;
 +      rc = platform_driver_register(&lpm_driver);
 +      if (rc) {
 +              pr_info("Error registering %s\n", lpm_driver.driver.name);
 +              goto fail;
 +      }
 +
 +#ifdef CONFIG_ARM_PSCI
 +      rc = set_cpuidle_ops();
 +      if (rc) {
 +              pr_err("%s(): Failed to set cpuidle ops\n", __func__);
 +              goto fail;
 +      }
 +#endif
 +
 +fail:
 +      return rc;
 +}
 +late_initcall(lpm_levels_module_init);
 +
 +enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
 +{
 +      struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
 +      enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
 +
 +      /*
 +       * No need to acquire the lock if probe isn't completed yet
 +       * In the event of the hotplug happening before lpm probe, we want to
 +       * flush the cache to make sure that L2 is flushed. In particular, this
 +       * could cause incoherencies for a cluster architecture. This wouldn't
 +       * affect the idle case as the idle driver wouldn't be registered
 +       * before the probe function
 +       */
 +      if (!cluster)
 +              return MSM_SCM_L2_OFF;
 +
 +      /*
 +       * Assumes L2 only. What/How parameters gets passed into TZ will
 +       * determine how this function reports this info back in msm-pm.c
 +       */
 +      spin_lock(&cluster->sync_lock);
 +
 +      if (!cluster->lpm_dev) {
 +              retflag = MSM_SCM_L2_OFF;
 +              goto unlock_and_return;
 +      }
 +
 +      if (!cpumask_equal(&cluster->num_children_in_sync,
 +                                              &cluster->child_cpus))
 +              goto unlock_and_return;
 +
 +      if (cluster->lpm_dev)
 +              retflag = cluster->lpm_dev->tz_flag;
 +      /*
 +       * The scm_handoff_lock will be release by the secure monitor.
 +       * It is used to serialize power-collapses from this point on,
 +       * so that both Linux and the secure context have a consistent
 +       * view regarding the number of running cpus (cpu_count).
 +       *
 +       * It must be acquired before releasing the cluster lock.
 +       */
 +unlock_and_return:
 +      update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
 +                      0xdeadbeef);
 +      trace_pre_pc_cb(retflag);
 +      remote_spin_lock_rlock_id(&scm_handoff_lock,
 +                                REMOTE_SPINLOCK_TID_START + cpu);
 +      spin_unlock(&cluster->sync_lock);
 +      return retflag;
 +}
@@@ -109,6 -109,26 +112,26 @@@ bool psci_power_state_is_valid(u32 stat
        return !(state & ~valid_mask);
  }
  
 -static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+                       unsigned long arg0, unsigned long arg1,
+                       unsigned long arg2)
+ {
+       struct arm_smccc_res res;
+       arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+       return res.a0;
+ }
++unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+                       unsigned long arg0, unsigned long arg1,
+                       unsigned long arg2)
+ {
+       struct arm_smccc_res res;
+       arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+       return res.a0;
+ }
  static int psci_to_linux_errno(int errno)
  {
        switch (errno) {
Simple merge
Simple merge
index 9455468,0000000..5fc54b9
mode 100644,000000..100644
--- /dev/null
@@@ -1,1059 -1,0 +1,1060 @@@
 +/*
 + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 and
 + * only version 2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + */
 +
 +#define pr_fmt(fmt) "%s: " fmt, __func__
 +
 +#include <linux/debugfs.h>
 +#include <linux/delay.h>
 +#include <linux/of_device.h>
 +#include <linux/init.h>
 +#include <linux/io.h>
 +#include <linux/kernel.h>
 +#include <linux/list.h>
 +#include <linux/module.h>
 +#include <linux/of.h>
 +#include <linux/platform_device.h>
 +#include <linux/slab.h>
 +#include <linux/string.h>
 +#include <linux/power/qcom/apm.h>
 +#include <soc/qcom/scm.h>
 +#include <linux/arm-smccc.h>
++#include <linux/psci.h>
 +
 +/*
 + *        VDD_APCC
 + * =============================================================
 + *       |      VDD_MX                  |                    |
 + *       |    ==========================|=============       |
 + *    ___|___   ___|___    ___|___   ___|___    ___|___   ___|___
 + *   |       | |       |  |       | |       |  |       | |       |
 + *   | APCC  | | MX HS |  | MX HS | | APCC  |  | MX HS | | APCC  |
 + *   |  HS   | |       |  |       | |  HS   |  |       | |  HS   |
 + *   |_______| |_______|  |_______| |_______|  |_______| |_______|
 + *       |_________|          |_________|         |__________|
 + *            |                    |                    |
 + *      ______|_____         ______|_____        _______|_____
 + *     |            |       |            |      |             |
 + *     |            |       |            |      |             |
 + *     |  CPU MEM   |       |   L2 MEM   |      |    L3 MEM   |
 + *     |   Arrays   |       |   Arrays   |      |    Arrays   |
 + *     |            |       |            |      |             |
 + *     |____________|       |____________|      |_____________|
 + *
 + */
 +
 +/* Register value definitions */
 +#define APCS_GFMUXA_SEL_VAL            0x13
 +#define APCS_GFMUXA_DESEL_VAL          0x03
 +#define MSM_APM_MX_MODE_VAL            0x00
 +#define MSM_APM_APCC_MODE_VAL          0x10
 +#define MSM_APM_MX_DONE_VAL            0x00
 +#define MSM_APM_APCC_DONE_VAL          0x03
 +#define MSM_APM_OVERRIDE_SEL_VAL       0xb0
 +#define MSM_APM_SEC_CLK_SEL_VAL        0x30
 +#define SPM_EVENT_SET_VAL              0x01
 +#define SPM_EVENT_CLEAR_VAL            0x00
 +
 +/* Register bit mask definitions */
 +#define MSM_APM_CTL_STS_MASK            0x0f
 +
 +/* Register offset definitions */
 +#define APCC_APM_MODE              0x00000098
 +#define APCC_APM_CTL_STS           0x000000a8
 +#define APCS_SPARE                 0x00000068
 +#define APCS_VERSION               0x00000fd0
 +
 +#define HMSS_VERSION_1P2           0x10020000
 +
 +#define MSM_APM_SWITCH_TIMEOUT_US  10
 +#define SPM_WAKEUP_DELAY_US        2
 +#define SPM_EVENT_NUM              6
 +
 +#define MSM_APM_DRIVER_NAME        "qcom,msm-apm"
 +
 +
 +enum {
 +      CLOCK_ASSERT_ENABLE,
 +      CLOCK_ASSERT_DISABLE,
 +      CLOCK_ASSERT_TOGGLE,
 +};
 +
 +enum {
 +      MSM8996_ID,
 +      MSM8996PRO_ID,
 +      MSM8953_ID,
 +};
 +
 +struct msm_apm_ctrl_dev {
 +      struct list_head        list;
 +      struct device           *dev;
 +      enum msm_apm_supply     supply;
 +      spinlock_t              lock;
 +      void __iomem            *reg_base;
 +      void __iomem            *apcs_csr_base;
 +      void __iomem            **apcs_spm_events_addr;
 +      void __iomem            *apc0_pll_ctl_addr;
 +      void __iomem            *apc1_pll_ctl_addr;
 +      bool                    clk_src_override;
 +      u32                     version;
 +      struct dentry           *debugfs;
 +      u32                     msm_id;
 +};
 +
 +#if defined(CONFIG_DEBUG_FS)
 +static struct dentry *apm_debugfs_base;
 +#endif
 +
 +static DEFINE_MUTEX(apm_ctrl_list_mutex);
 +static LIST_HEAD(apm_ctrl_list);
 +
 +/*
 + * Get the resources associated with the APM controller from device tree
 + * and remap all I/O addresses that are relevant to this HW revision.
 + */
 +static int msm_apm_ctrl_devm_ioremap(struct platform_device *pdev,
 +                                   struct msm_apm_ctrl_dev *ctrl)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct resource *res;
 +      static const char *res_name[SPM_EVENT_NUM] = {
 +              "apc0-l2-spm",
 +              "apc1-l2-spm",
 +              "apc0-cpu0-spm",
 +              "apc0-cpu1-spm",
 +              "apc1-cpu0-spm",
 +              "apc1-cpu1-spm"
 +      };
 +      int i, ret = 0;
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
 +      if (!res) {
 +              dev_err(dev, "Missing PM APCC Global register physical address");
 +              return -EINVAL;
 +      }
 +      ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
 +      if (!ctrl->reg_base) {
 +              dev_err(dev, "Failed to map PM APCC Global registers\n");
 +              return -ENOMEM;
 +      }
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-csr");
 +      if (!res) {
 +              dev_err(dev, "Missing APCS CSR physical base address");
 +              return -EINVAL;
 +      }
 +      ctrl->apcs_csr_base = devm_ioremap(dev, res->start, resource_size(res));
 +      if (!ctrl->apcs_csr_base) {
 +              dev_err(dev, "Failed to map APCS CSR registers\n");
 +              return -ENOMEM;
 +      }
 +
 +      ctrl->clk_src_override = of_property_read_bool(dev->of_node,
 +                                             "qcom,clock-source-override");
 +
 +      if (ctrl->clk_src_override)
 +              dev_info(dev, "overriding clock sources across APM switch\n");
 +
 +      ctrl->version = readl_relaxed(ctrl->apcs_csr_base + APCS_VERSION);
 +
 +      if (ctrl->version >= HMSS_VERSION_1P2)
 +              return ret;
 +
 +      ctrl->apcs_spm_events_addr = devm_kzalloc(&pdev->dev,
 +                                                SPM_EVENT_NUM
 +                                                * sizeof(void __iomem *),
 +                                                GFP_KERNEL);
 +      if (!ctrl->apcs_spm_events_addr) {
 +              dev_err(dev, "Failed to allocate memory for APCS SPM event registers\n");
 +              return -ENOMEM;
 +      }
 +
 +      for (i = 0; i < SPM_EVENT_NUM; i++) {
 +              res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 +                                                 res_name[i]);
 +              if (!res) {
 +                      dev_err(dev, "Missing address for %s\n", res_name[i]);
 +                      ret = -EINVAL;
 +                      goto free_events;
 +              }
 +
 +              ctrl->apcs_spm_events_addr[i] = devm_ioremap(dev, res->start,
 +                                              resource_size(res));
 +              if (!ctrl->apcs_spm_events_addr[i]) {
 +                      dev_err(dev, "Failed to map %s\n", res_name[i]);
 +                      ret = -ENOMEM;
 +                      goto free_events;
 +              }
 +
 +              dev_dbg(dev, "%s event phys: %pa virt:0x%p\n", res_name[i],
 +                      &res->start, ctrl->apcs_spm_events_addr[i]);
 +      }
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 +                                         "apc0-pll-ctl");
 +      if (!res) {
 +              dev_err(dev, "Missing APC0 PLL CTL physical address\n");
 +              ret = -EINVAL;
 +              goto free_events;
 +      }
 +
 +      ctrl->apc0_pll_ctl_addr = devm_ioremap(dev,
 +                                         res->start,
 +                                         resource_size(res));
 +      if (!ctrl->apc0_pll_ctl_addr) {
 +              dev_err(dev, "Failed to map APC0 PLL CTL register\n");
 +              ret = -ENOMEM;
 +              goto free_events;
 +      }
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 +                                         "apc1-pll-ctl");
 +      if (!res) {
 +              dev_err(dev, "Missing APC1 PLL CTL physical address\n");
 +              ret = -EINVAL;
 +              goto free_events;
 +      }
 +
 +      ctrl->apc1_pll_ctl_addr = devm_ioremap(dev,
 +                                         res->start,
 +                                         resource_size(res));
 +      if (!ctrl->apc1_pll_ctl_addr) {
 +              dev_err(dev, "Failed to map APC1 PLL CTL register\n");
 +              ret = -ENOMEM;
 +              goto free_events;
 +      }
 +
 +      return ret;
 +
 +free_events:
 +      devm_kfree(dev, ctrl->apcs_spm_events_addr);
 +      return ret;
 +}
 +
 +/* MSM8953 register offset definition */
 +#define MSM8953_APM_DLY_CNTR          0x2ac
 +
 +/* Register field shift definitions */
 +#define APM_CTL_SEL_SWITCH_DLY_SHIFT  0
 +#define APM_CTL_RESUME_CLK_DLY_SHIFT  8
 +#define APM_CTL_HALT_CLK_DLY_SHIFT    16
 +#define APM_CTL_POST_HALT_DLY_SHIFT   24
 +
 +/* Register field mask definitions */
 +#define APM_CTL_SEL_SWITCH_DLY_MASK   GENMASK(7, 0)
 +#define APM_CTL_RESUME_CLK_DLY_MASK   GENMASK(15, 8)
 +#define APM_CTL_HALT_CLK_DLY_MASK     GENMASK(23, 16)
 +#define APM_CTL_POST_HALT_DLY_MASK    GENMASK(31, 24)
 +
 +/*
 + * Get the resources associated with the MSM8953 APM controller from
 + * device tree, remap all I/O addresses, and program the initial
 + * register configuration required for the MSM8953 APM controller device.
 + */
 +static int msm8953_apm_ctrl_init(struct platform_device *pdev,
 +                               struct msm_apm_ctrl_dev *ctrl)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct resource *res;
 +      u32 delay_counter, val = 0, regval = 0;
 +      int rc = 0;
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
 +      if (!res) {
 +              dev_err(dev, "Missing PM APCC Global register physical address\n");
 +              return -ENODEV;
 +      }
 +      ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
 +      if (!ctrl->reg_base) {
 +              dev_err(dev, "Failed to map PM APCC Global registers\n");
 +              return -ENOMEM;
 +      }
 +
 +      /*
 +       * Initial APM register configuration required before starting
 +       * APM HW controller.
 +       */
 +      regval = readl_relaxed(ctrl->reg_base + MSM8953_APM_DLY_CNTR);
 +      val = regval;
 +
 +      if (of_find_property(dev->of_node, "qcom,apm-post-halt-delay", NULL)) {
 +              rc = of_property_read_u32(dev->of_node,
 +                              "qcom,apm-post-halt-delay", &delay_counter);
 +              if (rc < 0) {
 +                      dev_err(dev, "apm-post-halt-delay read failed, rc = %d",
 +                              rc);
 +                      return rc;
 +              }
 +
 +              val &= ~APM_CTL_POST_HALT_DLY_MASK;
 +              val |= (delay_counter << APM_CTL_POST_HALT_DLY_SHIFT)
 +                      & APM_CTL_POST_HALT_DLY_MASK;
 +      }
 +
 +      if (of_find_property(dev->of_node, "qcom,apm-halt-clk-delay", NULL)) {
 +              rc = of_property_read_u32(dev->of_node,
 +                              "qcom,apm-halt-clk-delay", &delay_counter);
 +              if (rc < 0) {
 +                      dev_err(dev, "apm-halt-clk-delay read failed, rc = %d",
 +                              rc);
 +                      return rc;
 +              }
 +
 +              val &= ~APM_CTL_HALT_CLK_DLY_MASK;
 +              val |= (delay_counter << APM_CTL_HALT_CLK_DLY_SHIFT)
 +                      & APM_CTL_HALT_CLK_DLY_MASK;
 +      }
 +
 +      if (of_find_property(dev->of_node, "qcom,apm-resume-clk-delay", NULL)) {
 +              rc = of_property_read_u32(dev->of_node,
 +                              "qcom,apm-resume-clk-delay", &delay_counter);
 +              if (rc < 0) {
 +                      dev_err(dev, "apm-resume-clk-delay read failed, rc = %d",
 +                              rc);
 +                      return rc;
 +              }
 +
 +              val &= ~APM_CTL_RESUME_CLK_DLY_MASK;
 +              val |= (delay_counter << APM_CTL_RESUME_CLK_DLY_SHIFT)
 +                      & APM_CTL_RESUME_CLK_DLY_MASK;
 +      }
 +
 +      if (of_find_property(dev->of_node, "qcom,apm-sel-switch-delay", NULL)) {
 +              rc = of_property_read_u32(dev->of_node,
 +                              "qcom,apm-sel-switch-delay", &delay_counter);
 +              if (rc < 0) {
 +                      dev_err(dev, "apm-sel-switch-delay read failed, rc = %d",
 +                              rc);
 +                      return rc;
 +              }
 +
 +              val &= ~APM_CTL_SEL_SWITCH_DLY_MASK;
 +              val |= (delay_counter << APM_CTL_SEL_SWITCH_DLY_SHIFT)
 +                      & APM_CTL_SEL_SWITCH_DLY_MASK;
 +      }
 +
 +      if (val != regval) {
 +              writel_relaxed(val, ctrl->reg_base + MSM8953_APM_DLY_CNTR);
 +              /* make sure write completes before return */
 +              mb();
 +      }
 +
 +      return rc;
 +}
 +
 +static int msm_apm_secure_clock_source_override(
 +                      struct msm_apm_ctrl_dev *ctrl_dev, bool enable)
 +{
 +      int ret;
 +
 +      if (ctrl_dev->clk_src_override) {
 +              ret = __invoke_psci_fn_smc(0xC4000020, 3, enable ?
 +                                         CLOCK_ASSERT_ENABLE :
 +                                         CLOCK_ASSERT_DISABLE, 0);
 +              if (ret)
 +                      dev_err(ctrl_dev->dev, "PSCI request to switch to %s clock source failed\n",
 +                              enable ? "GPLL0" : "original");
 +      }
 +
 +      return 0;
 +}
 +
 +static int msm8996_apm_wait_for_switch(struct msm_apm_ctrl_dev *ctrl_dev,
 +                                      u32 done_val)
 +{
 +      int timeout = MSM_APM_SWITCH_TIMEOUT_US;
 +      u32 regval;
 +
 +      while (timeout > 0) {
 +              regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS);
 +              if ((regval & MSM_APM_CTL_STS_MASK) == done_val)
 +                      break;
 +
 +              udelay(1);
 +              timeout--;
 +      }
 +
 +      if (timeout == 0) {
 +              dev_err(ctrl_dev->dev, "%s switch timed out. APCC_APM_CTL_STS=0x%x\n",
 +                      done_val == MSM_APM_MX_DONE_VAL
 +                              ? "APCC to MX" : "MX to APCC",
 +                      regval);
 +              return -ETIMEDOUT;
 +      }
 +
 +      return 0;
 +}
 +
 +static int msm8996_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      unsigned long flags;
 +      int i, ret;
 +
 +      mutex_lock(&scm_lmh_lock);
 +      spin_lock_irqsave(&ctrl_dev->lock, flags);
 +
 +      ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
 +      if (ret)
 +              goto done;
 +
 +      /* Perform revision-specific programming steps */
 +      if (ctrl_dev->version < HMSS_VERSION_1P2) {
 +              /* Clear SPM events */
 +              for (i = 0; i < SPM_EVENT_NUM; i++)
 +                      writel_relaxed(SPM_EVENT_CLEAR_VAL,
 +                                     ctrl_dev->apcs_spm_events_addr[i]);
 +
 +              udelay(SPM_WAKEUP_DELAY_US);
 +
 +              /* Switch APC/CBF to GPLL0 clock */
 +              writel_relaxed(APCS_GFMUXA_SEL_VAL,
 +                             ctrl_dev->apcs_csr_base + APCS_SPARE);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
 +                             ctrl_dev->apc0_pll_ctl_addr);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
 +                             ctrl_dev->apc1_pll_ctl_addr);
 +
 +              /* Ensure writes complete before proceeding */
 +              mb();
 +      }
 +
 +      /* Switch arrays to MX supply and wait for its completion */
 +      writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
 +                     APCC_APM_MODE);
 +
 +      /* Ensure write above completes before delaying */
 +      mb();
 +
 +      ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_MX_DONE_VAL);
 +
 +      /* Perform revision-specific programming steps */
 +      if (ctrl_dev->version < HMSS_VERSION_1P2) {
 +              /* Switch APC/CBF clocks to original source */
 +              writel_relaxed(APCS_GFMUXA_DESEL_VAL,
 +                             ctrl_dev->apcs_csr_base + APCS_SPARE);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
 +                             ctrl_dev->apc0_pll_ctl_addr);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
 +                             ctrl_dev->apc1_pll_ctl_addr);
 +
 +              /* Complete clock source switch before SPM event sequence */
 +              mb();
 +
 +              /* Set SPM events */
 +              for (i = 0; i < SPM_EVENT_NUM; i++)
 +                      writel_relaxed(SPM_EVENT_SET_VAL,
 +                                     ctrl_dev->apcs_spm_events_addr[i]);
 +      }
 +
 +      /*
 +       * Ensure that HMSS v1.0/v1.1 register writes are completed before
 +       * bailing out in the case of a switching time out.
 +       */
 +      if (ret)
 +              goto done;
 +
 +      ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
 +      if (ret)
 +              goto done;
 +
 +      ctrl_dev->supply = MSM_APM_SUPPLY_MX;
 +      dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
 +
 +done:
 +      spin_unlock_irqrestore(&ctrl_dev->lock, flags);
 +      mutex_unlock(&scm_lmh_lock);
 +
 +      return ret;
 +}
 +
 +static int msm8996_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      unsigned long flags;
 +      int i, ret;
 +
 +      mutex_lock(&scm_lmh_lock);
 +      spin_lock_irqsave(&ctrl_dev->lock, flags);
 +
 +      ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
 +      if (ret)
 +              goto done;
 +
 +      /* Perform revision-specific programming steps */
 +      if (ctrl_dev->version < HMSS_VERSION_1P2) {
 +              /* Clear SPM events */
 +              for (i = 0; i < SPM_EVENT_NUM; i++)
 +                      writel_relaxed(SPM_EVENT_CLEAR_VAL,
 +                                     ctrl_dev->apcs_spm_events_addr[i]);
 +
 +              udelay(SPM_WAKEUP_DELAY_US);
 +
 +              /* Switch APC/CBF to GPLL0 clock */
 +              writel_relaxed(APCS_GFMUXA_SEL_VAL,
 +                             ctrl_dev->apcs_csr_base + APCS_SPARE);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
 +                             ctrl_dev->apc0_pll_ctl_addr);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
 +                             ctrl_dev->apc1_pll_ctl_addr);
 +
 +              /* Ensure previous writes complete before proceeding */
 +              mb();
 +      }
 +
 +      /* Switch arrays to APCC supply and wait for its completion */
 +      writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
 +                     APCC_APM_MODE);
 +
 +      /* Ensure write above completes before delaying */
 +      mb();
 +
 +      ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_APCC_DONE_VAL);
 +
 +      /* Perform revision-specific programming steps */
 +      if (ctrl_dev->version < HMSS_VERSION_1P2) {
 +              /* Set SPM events */
 +              for (i = 0; i < SPM_EVENT_NUM; i++)
 +                      writel_relaxed(SPM_EVENT_SET_VAL,
 +                                     ctrl_dev->apcs_spm_events_addr[i]);
 +
 +              /* Complete SPM event sequence before clock source switch */
 +              mb();
 +
 +              /* Switch APC/CBF clocks to original source */
 +              writel_relaxed(APCS_GFMUXA_DESEL_VAL,
 +                             ctrl_dev->apcs_csr_base + APCS_SPARE);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
 +                             ctrl_dev->apc0_pll_ctl_addr);
 +              ndelay(200);
 +              writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
 +                             ctrl_dev->apc1_pll_ctl_addr);
 +      }
 +
 +      /*
 +       * Ensure that HMSS v1.0/v1.1 register writes are completed before
 +       * bailing out in the case of a switching time out.
 +       */
 +      if (ret)
 +              goto done;
 +
 +      ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
 +      if (ret)
 +              goto done;
 +
 +      ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
 +      dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
 +
 +done:
 +      spin_unlock_irqrestore(&ctrl_dev->lock, flags);
 +      mutex_unlock(&scm_lmh_lock);
 +
 +      return ret;
 +}
 +
 +static int msm8996pro_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      unsigned long flags;
 +      int ret;
 +
 +      spin_lock_irqsave(&ctrl_dev->lock, flags);
 +
 +      /* Switch arrays to MX supply and wait for its completion */
 +      writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
 +                     APCC_APM_MODE);
 +
 +      /* Ensure write above completes before delaying */
 +      mb();
 +
 +      ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_MX_DONE_VAL);
 +      if (ret)
 +              goto done;
 +
 +      ctrl_dev->supply = MSM_APM_SUPPLY_MX;
 +      dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
 +
 +done:
 +      spin_unlock_irqrestore(&ctrl_dev->lock, flags);
 +
 +      return ret;
 +}
 +
 +static int msm8996pro_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      unsigned long flags;
 +      int ret;
 +
 +      spin_lock_irqsave(&ctrl_dev->lock, flags);
 +
 +      /* Switch arrays to APCC supply and wait for its completion */
 +      writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
 +                     APCC_APM_MODE);
 +
 +      /* Ensure write above completes before delaying */
 +      mb();
 +
 +      ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_APCC_DONE_VAL);
 +      if (ret)
 +              goto done;
 +
 +      ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
 +      dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
 +
 +done:
 +      spin_unlock_irqrestore(&ctrl_dev->lock, flags);
 +
 +      return ret;
 +}
 +
 +/* MSM8953 register value definitions */
 +#define MSM8953_APM_MX_MODE_VAL            0x00
 +#define MSM8953_APM_APCC_MODE_VAL          0x02
 +#define MSM8953_APM_MX_DONE_VAL            0x00
 +#define MSM8953_APM_APCC_DONE_VAL          0x03
 +
 +/* MSM8953 register offset definitions */
 +#define MSM8953_APCC_APM_MODE              0x000002a8
 +#define MSM8953_APCC_APM_CTL_STS           0x000002b0
 +
 +/* 8953 constants */
 +#define MSM8953_APM_SWITCH_TIMEOUT_US      500
 +
 +/* Register bit mask definitions */
 +#define MSM8953_APM_CTL_STS_MASK           0x1f
 +
 +static int msm8953_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      int timeout = MSM8953_APM_SWITCH_TIMEOUT_US;
 +      u32 regval;
 +      int ret = 0;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ctrl_dev->lock, flags);
 +
 +      /* Switch arrays to MX supply and wait for its completion */
 +      writel_relaxed(MSM8953_APM_MX_MODE_VAL, ctrl_dev->reg_base +
 +                     MSM8953_APCC_APM_MODE);
 +
 +      /* Ensure write above completes before delaying */
 +      mb();
 +
 +      while (timeout > 0) {
 +              regval = readl_relaxed(ctrl_dev->reg_base +
 +                                      MSM8953_APCC_APM_CTL_STS);
 +              if ((regval & MSM8953_APM_CTL_STS_MASK) ==
 +                              MSM8953_APM_MX_DONE_VAL)
 +                      break;
 +
 +              udelay(1);
 +              timeout--;
 +      }
 +
 +      if (timeout == 0) {
 +              ret = -ETIMEDOUT;
 +              dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
 +                      regval);
 +      } else {
 +              ctrl_dev->supply = MSM_APM_SUPPLY_MX;
 +              dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
 +      }
 +
 +      spin_unlock_irqrestore(&ctrl_dev->lock, flags);
 +
 +      return ret;
 +}
 +
 +static int msm8953_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      int timeout = MSM8953_APM_SWITCH_TIMEOUT_US;
 +      u32 regval;
 +      int ret = 0;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ctrl_dev->lock, flags);
 +
 +      /* Switch arrays to APCC supply and wait for its completion */
 +      writel_relaxed(MSM8953_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
 +                     MSM8953_APCC_APM_MODE);
 +
 +      /* Ensure write above completes before delaying */
 +      mb();
 +
 +      while (timeout > 0) {
 +              regval = readl_relaxed(ctrl_dev->reg_base +
 +                                      MSM8953_APCC_APM_CTL_STS);
 +              if ((regval & MSM8953_APM_CTL_STS_MASK) ==
 +                              MSM8953_APM_APCC_DONE_VAL)
 +                      break;
 +
 +              udelay(1);
 +              timeout--;
 +      }
 +
 +      if (timeout == 0) {
 +              ret = -ETIMEDOUT;
 +              dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
 +                      regval);
 +      } else {
 +              ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
 +              dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
 +      }
 +
 +      spin_unlock_irqrestore(&ctrl_dev->lock, flags);
 +
 +      return ret;
 +}
 +
 +static int msm_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      int ret = 0;
 +
 +      switch (ctrl_dev->msm_id) {
 +      case MSM8996_ID:
 +              ret = msm8996_apm_switch_to_mx(ctrl_dev);
 +              break;
 +      case MSM8996PRO_ID:
 +              ret = msm8996pro_apm_switch_to_mx(ctrl_dev);
 +              break;
 +      case MSM8953_ID:
 +              ret = msm8953_apm_switch_to_mx(ctrl_dev);
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +static int msm_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      int ret = 0;
 +
 +      switch (ctrl_dev->msm_id) {
 +      case MSM8996_ID:
 +              ret = msm8996_apm_switch_to_apcc(ctrl_dev);
 +              break;
 +      case MSM8996PRO_ID:
 +              ret = msm8996pro_apm_switch_to_apcc(ctrl_dev);
 +              break;
 +      case MSM8953_ID:
 +              ret = msm8953_apm_switch_to_apcc(ctrl_dev);
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +/**
 + * msm_apm_get_supply() - Returns the supply that is currently
 + *                    powering the memory arrays
 + * @ctrl_dev:                   Pointer to an MSM APM controller device
 + *
 + * Returns the supply currently selected by the APM.
 + */
 +int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      return ctrl_dev->supply;
 +}
 +EXPORT_SYMBOL(msm_apm_get_supply);
 +
 +/**
 + * msm_apm_set_supply() - Perform the necessary steps to switch the voltage
 + *                        source of the memory arrays to a given supply
 + * @ctrl_dev:                   Pointer to an MSM APM controller device
 + * @supply:                     Power rail to use as supply for the memory
 + *                              arrays
 + *
 + * Returns 0 on success, -ETIMEDOUT on APM switch timeout, or -EPERM if
 + * the supply is not supported.
 + */
 +int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
 +                     enum msm_apm_supply supply)
 +{
 +      int ret;
 +
 +      switch (supply) {
 +      case MSM_APM_SUPPLY_APCC:
 +              ret = msm_apm_switch_to_apcc(ctrl_dev);
 +              break;
 +      case MSM_APM_SUPPLY_MX:
 +              ret = msm_apm_switch_to_mx(ctrl_dev);
 +              break;
 +      default:
 +              ret = -EPERM;
 +              break;
 +      }
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL(msm_apm_set_supply);
 +
 +/**
 + * msm_apm_ctrl_dev_get() - get a handle to the MSM APM controller linked to
 + *                          the device in device tree
 + * @dev:                    Pointer to the device
 + *
 + * The device must specify "qcom,apm-ctrl" property in its device tree
 + * node which points to an MSM APM controller device node.
 + *
 + * Returns an MSM APM controller handle if successful or ERR_PTR on any error.
 + * If the APM controller device hasn't probed yet, ERR_PTR(-EPROBE_DEFER) is
 + * returned.
 + */
 +struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev)
 +{
 +      struct msm_apm_ctrl_dev *ctrl_dev = NULL;
 +      struct msm_apm_ctrl_dev *dev_found = ERR_PTR(-EPROBE_DEFER);
 +      struct device_node *ctrl_node;
 +
 +      if (!dev || !dev->of_node) {
 +              pr_err("Invalid device node\n");
 +              return ERR_PTR(-EINVAL);
 +      }
 +
 +      ctrl_node = of_parse_phandle(dev->of_node, "qcom,apm-ctrl", 0);
 +      if (!ctrl_node) {
 +              pr_err("Could not find qcom,apm-ctrl property in %s\n",
 +                     dev->of_node->full_name);
 +              return ERR_PTR(-ENXIO);
 +      }
 +
 +      mutex_lock(&apm_ctrl_list_mutex);
 +      list_for_each_entry(ctrl_dev, &apm_ctrl_list, list) {
 +              if (ctrl_dev->dev && ctrl_dev->dev->of_node == ctrl_node) {
 +                      dev_found = ctrl_dev;
 +                      break;
 +              }
 +      }
 +      mutex_unlock(&apm_ctrl_list_mutex);
 +
 +      of_node_put(ctrl_node);
 +      return dev_found;
 +}
 +EXPORT_SYMBOL(msm_apm_ctrl_dev_get);
 +
 +#if defined(CONFIG_DEBUG_FS)
 +
 +static int apm_supply_dbg_open(struct inode *inode, struct file *filep)
 +{
 +      filep->private_data = inode->i_private;
 +
 +      return 0;
 +}
 +
 +static ssize_t apm_supply_dbg_read(struct file *filep, char __user *ubuf,
 +                                 size_t count, loff_t *ppos)
 +{
 +      struct msm_apm_ctrl_dev *ctrl_dev = filep->private_data;
 +      char buf[10];
 +      int len;
 +
 +      if (!ctrl_dev) {
 +              pr_err("invalid apm ctrl handle\n");
 +              return -ENODEV;
 +      }
 +
 +      if (ctrl_dev->supply == MSM_APM_SUPPLY_APCC)
 +              len = snprintf(buf, sizeof(buf), "APCC\n");
 +      else if (ctrl_dev->supply == MSM_APM_SUPPLY_MX)
 +              len = snprintf(buf, sizeof(buf), "MX\n");
 +      else
 +              len = snprintf(buf, sizeof(buf), "ERR\n");
 +
 +      return simple_read_from_buffer(ubuf, count, ppos, buf, len);
 +}
 +
 +static const struct file_operations apm_supply_fops = {
 +      .open = apm_supply_dbg_open,
 +      .read = apm_supply_dbg_read,
 +};
 +
 +static void apm_debugfs_base_init(void)
 +{
 +      apm_debugfs_base = debugfs_create_dir("msm-apm", NULL);
 +
 +      if (IS_ERR_OR_NULL(apm_debugfs_base))
 +              pr_err("msm-apm debugfs base directory creation failed\n");
 +}
 +
 +static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      struct dentry *temp;
 +
 +      if (IS_ERR_OR_NULL(apm_debugfs_base)) {
 +              pr_err("Base directory missing, cannot create apm debugfs nodes\n");
 +              return;
 +      }
 +
 +      ctrl_dev->debugfs = debugfs_create_dir(dev_name(ctrl_dev->dev),
 +                                             apm_debugfs_base);
 +      if (IS_ERR_OR_NULL(ctrl_dev->debugfs)) {
 +              pr_err("%s debugfs directory creation failed\n",
 +                     dev_name(ctrl_dev->dev));
 +              return;
 +      }
 +
 +      temp = debugfs_create_file("supply", 0444, ctrl_dev->debugfs,
 +                                 ctrl_dev, &apm_supply_fops);
 +      if (IS_ERR_OR_NULL(temp)) {
 +              pr_err("supply mode creation failed\n");
 +              return;
 +      }
 +}
 +
 +static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
 +{
 +      if (!IS_ERR_OR_NULL(ctrl_dev->debugfs))
 +              debugfs_remove_recursive(ctrl_dev->debugfs);
 +}
 +
 +static void apm_debugfs_base_remove(void)
 +{
 +      debugfs_remove_recursive(apm_debugfs_base);
 +}
 +#else
 +
 +static void apm_debugfs_base_init(void)
 +{}
 +
 +static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
 +{}
 +
 +static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
 +{}
 +
 +static void apm_debugfs_base_remove(void)
 +{}
 +
 +#endif
 +
 +static const struct of_device_id msm_apm_match_table[] = {
 +      {
 +              .compatible = "qcom,msm-apm",
 +              .data = (void *)(uintptr_t)MSM8996_ID,
 +      },
 +      {
 +              .compatible = "qcom,msm8996pro-apm",
 +              .data = (void *)(uintptr_t)MSM8996PRO_ID,
 +      },
 +      {
 +              .compatible = "qcom,msm8953-apm",
 +              .data = (void *)(uintptr_t)MSM8953_ID,
 +      },
 +      {}
 +};
 +
 +static int msm_apm_probe(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct msm_apm_ctrl_dev *ctrl;
 +      const struct of_device_id *match;
 +      int ret = 0;
 +
 +      dev_dbg(dev, "probing MSM Array Power Mux driver\n");
 +
 +      if (!dev->of_node) {
 +              dev_err(dev, "Device tree node is missing\n");
 +              return -ENODEV;
 +      }
 +
 +      match = of_match_device(msm_apm_match_table, dev);
 +      if (!match)
 +              return -ENODEV;
 +
 +      ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
 +      if (!ctrl)
 +              return -ENOMEM;
 +
 +      INIT_LIST_HEAD(&ctrl->list);
 +      spin_lock_init(&ctrl->lock);
 +      ctrl->dev = dev;
 +      ctrl->msm_id = (uintptr_t)match->data;
 +      platform_set_drvdata(pdev, ctrl);
 +
 +      switch (ctrl->msm_id) {
 +      case MSM8996_ID:
 +      case MSM8996PRO_ID:
 +              ret = msm_apm_ctrl_devm_ioremap(pdev, ctrl);
 +              if (ret) {
 +                      dev_err(dev, "Failed to add APM controller device\n");
 +                      return ret;
 +              }
 +              break;
 +      case MSM8953_ID:
 +              ret = msm8953_apm_ctrl_init(pdev, ctrl);
 +              if (ret) {
 +                      dev_err(dev, "Failed to initialize APM controller device: ret=%d\n",
 +                              ret);
 +                      return ret;
 +              }
 +              break;
 +      default:
 +              dev_err(dev, "unable to add APM controller device for msm_id:%d\n",
 +                      ctrl->msm_id);
 +              return -ENODEV;
 +      }
 +
 +      apm_debugfs_init(ctrl);
 +      mutex_lock(&apm_ctrl_list_mutex);
 +      list_add_tail(&ctrl->list, &apm_ctrl_list);
 +      mutex_unlock(&apm_ctrl_list_mutex);
 +
 +      dev_dbg(dev, "MSM Array Power Mux driver probe successful");
 +
 +      return ret;
 +}
 +
 +static int msm_apm_remove(struct platform_device *pdev)
 +{
 +      struct msm_apm_ctrl_dev *ctrl_dev;
 +
 +      ctrl_dev = platform_get_drvdata(pdev);
 +      if (ctrl_dev) {
 +              mutex_lock(&apm_ctrl_list_mutex);
 +              list_del(&ctrl_dev->list);
 +              mutex_unlock(&apm_ctrl_list_mutex);
 +              apm_debugfs_deinit(ctrl_dev);
 +      }
 +
 +      return 0;
 +}
 +
 +static struct platform_driver msm_apm_driver = {
 +      .driver         = {
 +              .name           = MSM_APM_DRIVER_NAME,
 +              .of_match_table = msm_apm_match_table,
 +              .owner          = THIS_MODULE,
 +      },
 +      .probe          = msm_apm_probe,
 +      .remove         = msm_apm_remove,
 +};
 +
 +static int __init msm_apm_init(void)
 +{
 +      apm_debugfs_base_init();
 +      return platform_driver_register(&msm_apm_driver);
 +}
 +
 +static void __exit msm_apm_exit(void)
 +{
 +      platform_driver_unregister(&msm_apm_driver);
 +      apm_debugfs_base_remove();
 +}
 +
 +arch_initcall(msm_apm_init);
 +module_exit(msm_apm_exit);
 +
 +MODULE_DESCRIPTION("MSM Array Power Mux driver");
 +MODULE_LICENSE("GPL v2");
index d469463,0000000..f98db3c
mode 100644,000000..100644
--- /dev/null
@@@ -1,1329 -1,0 +1,1330 @@@
 +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 and
 + * only version 2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + */
 +
 +#define pr_fmt(fmt) "%s: " fmt, __func__
 +
 +#include <linux/delay.h>
 +#include <linux/err.h>
 +#include <linux/init.h>
 +#include <linux/kernel.h>
 +#include <linux/regmap.h>
 +#include <linux/module.h>
 +#include <linux/of.h>
 +#include <linux/of_device.h>
 +#include <linux/slab.h>
 +#include <linux/spmi.h>
 +#include <linux/platform_device.h>
 +#include <linux/string.h>
 +#include <linux/regulator/driver.h>
 +#include <linux/regulator/machine.h>
 +#include <linux/regulator/of_regulator.h>
 +#include <linux/regulator/spm-regulator.h>
 +#include <soc/qcom/spm.h>
 +#include <linux/arm-smccc.h>
++#include <linux/psci.h>
 +
 +#if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI))
 +#else
 +      #define __invoke_psci_fn_smc(a, b, c, d) 0
 +#endif
 +
 +#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
 +
 +struct voltage_range {
 +      int min_uV;
 +      int set_point_min_uV;
 +      int max_uV;
 +      int step_uV;
 +};
 +
 +enum qpnp_regulator_uniq_type {
 +      QPNP_TYPE_HF,
 +      QPNP_TYPE_FTS2,
 +      QPNP_TYPE_FTS2p5,
 +      QPNP_TYPE_FTS426,
 +      QPNP_TYPE_ULT_HF,
 +};
 +
 +enum qpnp_regulator_type {
 +      QPNP_HF_TYPE            = 0x03,
 +      QPNP_FTS2_TYPE          = 0x1C,
 +      QPNP_FTS2p5_TYPE        = 0x1C,
 +      QPNP_FTS426_TYPE        = 0x1C,
 +      QPNP_ULT_HF_TYPE        = 0x22,
 +};
 +
 +enum qpnp_regulator_subtype {
 +      QPNP_FTS2_SUBTYPE       = 0x08,
 +      QPNP_HF_SUBTYPE         = 0x08,
 +      QPNP_FTS2p5_SUBTYPE     = 0x09,
 +      QPNP_FTS426_SUBTYPE     = 0x0A,
 +      QPNP_ULT_HF_SUBTYPE     = 0x0D,
 +};
 +
 +enum qpnp_logical_mode {
 +      QPNP_LOGICAL_MODE_AUTO,
 +      QPNP_LOGICAL_MODE_PWM,
 +};
 +
 +static const struct voltage_range fts2_range0 = {0, 350000, 1275000,  5000};
 +static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
 +static const struct voltage_range fts2p5_range0
 +                                       = { 80000, 350000, 1355000,  5000};
 +static const struct voltage_range fts2p5_range1
 +                                       = {160000, 700000, 2200000, 10000};
 +static const struct voltage_range fts426_range = {0, 320000, 1352000, 4000};
 +static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
 +                                                              12500};
 +static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
 +                                                              25000};
 +static const struct voltage_range hf_range0 = {375000, 375000, 1562500, 12500};
 +static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000,
 +                                                              25000};
 +
 +#define QPNP_SMPS_REG_TYPE            0x04
 +#define QPNP_SMPS_REG_SUBTYPE         0x05
 +#define QPNP_SMPS_REG_VOLTAGE_RANGE   0x40
 +#define QPNP_SMPS_REG_VOLTAGE_SETPOINT        0x41
 +#define QPNP_SMPS_REG_MODE            0x45
 +#define QPNP_SMPS_REG_STEP_CTRL               0x61
 +#define QPNP_SMPS_REG_UL_LL_CTRL      0x68
 +
 +/* FTS426 voltage control registers */
 +#define QPNP_FTS426_REG_VOLTAGE_LB            0x40
 +#define QPNP_FTS426_REG_VOLTAGE_UB            0x41
 +#define QPNP_FTS426_REG_VOLTAGE_VALID_LB      0x42
 +#define QPNP_FTS426_REG_VOLTAGE_VALID_UB      0x43
 +
 +/* HF voltage limit registers */
 +#define QPNP_HF_REG_VOLTAGE_ULS               0x69
 +#define QPNP_HF_REG_VOLTAGE_LLS               0x6B
 +
 +/* FTS voltage limit registers */
 +#define QPNP_FTS_REG_VOLTAGE_ULS_VALID        0x6A
 +#define QPNP_FTS_REG_VOLTAGE_LLS_VALID        0x6C
 +
 +/* FTS426 voltage limit registers */
 +#define QPNP_FTS426_REG_VOLTAGE_ULS_LB        0x68
 +#define QPNP_FTS426_REG_VOLTAGE_ULS_UB        0x69
 +
 +/* Common regulator UL & LL limits control register layout */
 +#define QPNP_COMMON_UL_EN_MASK                0x80
 +#define QPNP_COMMON_LL_EN_MASK                0x40
 +
 +#define QPNP_SMPS_MODE_PWM            0x80
 +#define QPNP_SMPS_MODE_AUTO           0x40
 +#define QPNP_FTS426_MODE_PWM          0x07
 +#define QPNP_FTS426_MODE_AUTO         0x06
 +
 +#define QPNP_SMPS_STEP_CTRL_STEP_MASK 0x18
 +#define QPNP_SMPS_STEP_CTRL_STEP_SHIFT        3
 +#define QPNP_SMPS_STEP_CTRL_DELAY_MASK        0x07
 +#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT       0
 +#define QPNP_FTS426_STEP_CTRL_DELAY_MASK      0x03
 +#define QPNP_FTS426_STEP_CTRL_DELAY_SHIFT     0
 +
 +/* Clock rate in kHz of the FTS2 regulator reference clock. */
 +#define QPNP_SMPS_CLOCK_RATE          19200
 +#define QPNP_FTS426_CLOCK_RATE                4800
 +
 +/* Time to delay in us to ensure that a mode change has completed. */
 +#define QPNP_FTS2_MODE_CHANGE_DELAY   50
 +
 +/* Minimum time in us that it takes to complete a single SPMI write. */
 +#define QPNP_SPMI_WRITE_MIN_DELAY     8
 +
 +/* Minimum voltage stepper delay for each step. */
 +#define QPNP_FTS2_STEP_DELAY          8
 +#define QPNP_HF_STEP_DELAY            20
 +#define QPNP_FTS426_STEP_DELAY                2
 +
 +/* Arbitrarily large max step size used to avoid possible numerical overflow */
 +#define SPM_REGULATOR_MAX_STEP_UV     10000000
 +
 +/*
 + * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
 + * adjust the step rate in order to account for oscillator variance.
 + */
 +#define QPNP_FTS2_STEP_MARGIN_NUM     4
 +#define QPNP_FTS2_STEP_MARGIN_DEN     5
 +#define QPNP_FTS426_STEP_MARGIN_NUM   10
 +#define QPNP_FTS426_STEP_MARGIN_DEN   11
 +
 +/*
 + * Settling delay for FTS2.5
 + * Warm-up=20uS, 0-10% & 90-100% non-linear V-ramp delay = 50uS
 + */
 +#define FTS2P5_SETTLING_DELAY_US      70
 +
 +/* VSET value to decide the range of ULT SMPS */
 +#define ULT_SMPS_RANGE_SPLIT 0x60
 +
 +struct spm_vreg {
 +      struct regulator_desc           rdesc;
 +      struct regulator_dev            *rdev;
 +      struct platform_device          *pdev;
 +      struct regmap                   *regmap;
 +      const struct voltage_range      *range;
 +      int                             uV;
 +      int                             last_set_uV;
 +      unsigned                        vlevel;
 +      unsigned                        last_set_vlevel;
 +      u32                             max_step_uV;
 +      bool                            online;
 +      u16                             spmi_base_addr;
 +      enum qpnp_logical_mode          init_mode;
 +      enum qpnp_logical_mode          mode;
 +      int                             step_rate;
 +      enum qpnp_regulator_uniq_type   regulator_type;
 +      u32                             cpu_num;
 +      bool                            bypass_spm;
 +      struct regulator_desc           avs_rdesc;
 +      struct regulator_dev            *avs_rdev;
 +      int                             avs_min_uV;
 +      int                             avs_max_uV;
 +      bool                            avs_enabled;
 +      u32                             recal_cluster_mask;
 +};
 +
 +static inline bool spm_regulator_using_avs(struct spm_vreg *vreg)
 +{
 +      return vreg->avs_rdev && !vreg->bypass_spm;
 +}
 +
 +static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV)
 +{
 +      int vlevel;
 +
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              return roundup(uV, vreg->range->step_uV) / 1000;
 +
 +      vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV);
 +
 +      /* Fix VSET for ULT HF Buck */
 +      if (vreg->regulator_type == QPNP_TYPE_ULT_HF
 +          && vreg->range == &ult_hf_range1) {
 +              vlevel &= 0x1F;
 +              vlevel |= ULT_SMPS_RANGE_SPLIT;
 +      }
 +
 +      return vlevel;
 +}
 +
 +static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel)
 +{
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              return vlevel * 1000;
 +      /*
 +       * Calculate ULT HF buck VSET based on range:
 +       * In case of range 0: VSET is a 7 bit value.
 +       * In case of range 1: VSET is a 5 bit value.
 +       */
 +      if (vreg->regulator_type == QPNP_TYPE_ULT_HF
 +          && vreg->range == &ult_hf_range1)
 +              vlevel &= ~ULT_SMPS_RANGE_SPLIT;
 +
 +      return vlevel * vreg->range->step_uV + vreg->range->min_uV;
 +}
 +
 +static unsigned spm_regulator_vlevel_to_selector(struct spm_vreg *vreg,
 +                                               unsigned vlevel)
 +{
 +      /* Fix VSET for ULT HF Buck */
 +      if (vreg->regulator_type == QPNP_TYPE_ULT_HF
 +          && vreg->range == &ult_hf_range1)
 +              vlevel &= ~ULT_SMPS_RANGE_SPLIT;
 +
 +      return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV)
 +                              / vreg->range->step_uV;
 +}
 +
 +static int qpnp_smps_read_voltage(struct spm_vreg *vreg)
 +{
 +      int rc;
 +      u8 val[2] = {0};
 +
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426) {
 +              rc = regmap_bulk_read(vreg->regmap,
 +                      vreg->spmi_base_addr + QPNP_FTS426_REG_VOLTAGE_VALID_LB,
 +                               val, 2);
 +              if (rc) {
 +                      dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint registers, rc=%d\n",
 +                              __func__, rc);
 +                      return rc;
 +              }
 +
 +              vreg->last_set_vlevel = ((unsigned)val[1] << 8) | val[0];
 +      } else {
 +              rc = regmap_bulk_read(vreg->regmap,
 +                      vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
 +                              val, 1);
 +              if (rc) {
 +                      dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
 +                              __func__, rc);
 +                      return rc;
 +              }
 +              vreg->last_set_vlevel = val[0];
 +      }
 +
 +      vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg,
 +                                              vreg->last_set_vlevel);
 +      return rc;
 +}
 +
 +static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned vlevel)
 +{
 +      int rc = 0;
 +      u8 reg[2];
 +
 +      /* Set voltage control registers via SPMI. */
 +      reg[0] = vlevel & 0xFF;
 +      reg[1] = (vlevel >> 8) & 0xFF;
 +
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426) {
 +              rc = regmap_bulk_write(vreg->regmap,
 +                        vreg->spmi_base_addr + QPNP_FTS426_REG_VOLTAGE_LB,
 +                        reg, 2);
 +      } else {
 +              rc = regmap_write(vreg->regmap,
 +                        vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
 +                        reg[0]);
 +      }
 +
 +      if (rc)
 +              pr_err("%s: regmap_write failed, rc=%d\n",
 +                      vreg->rdesc.name, rc);
 +
 +      return rc;
 +}
 +
 +static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg,
 +                                                      u8 regval)
 +{
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              return (regval == QPNP_FTS426_MODE_PWM)
 +                      ? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
 +      else
 +              return (regval & QPNP_SMPS_MODE_PWM)
 +                      ? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
 +}
 +
 +static inline u8 qpnp_mode_to_regval(struct spm_vreg *vreg,
 +                                      enum qpnp_logical_mode mode)
 +{
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              return (mode == QPNP_LOGICAL_MODE_PWM)
 +                      ? QPNP_FTS426_MODE_PWM : QPNP_FTS426_MODE_AUTO;
 +      else
 +              return (mode == QPNP_LOGICAL_MODE_PWM)
 +                      ? QPNP_SMPS_MODE_PWM : QPNP_SMPS_MODE_AUTO;
 +}
 +
 +static int qpnp_smps_set_mode(struct spm_vreg *vreg, u8 mode)
 +{
 +      int rc;
 +
 +      rc = regmap_write(vreg->regmap,
 +                        vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
 +                        qpnp_mode_to_regval(vreg, mode));
 +      if (rc)
 +              dev_err(&vreg->pdev->dev,
 +                      "%s: could not write to mode register, rc=%d\n",
 +                      __func__, rc);
 +
 +      return rc;
 +}
 +
 +static int spm_regulator_get_voltage(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      int vlevel, rc;
 +
 +      if (spm_regulator_using_avs(vreg)) {
 +              vlevel = msm_spm_get_vdd(vreg->cpu_num);
 +
 +              if (IS_ERR_VALUE(vlevel)) {
 +                      pr_debug("%s: msm_spm_get_vdd failed, rc=%d; falling back on SPMI read\n",
 +                              vreg->rdesc.name, vlevel);
 +
 +                      rc = qpnp_smps_read_voltage(vreg);
 +                      if (rc) {
 +                              pr_err("%s: voltage read failed, rc=%d\n",
 +                                     vreg->rdesc.name, rc);
 +                              return rc;
 +                      }
 +
 +                      return vreg->last_set_uV;
 +              }
 +
 +              vreg->last_set_vlevel = vlevel;
 +              vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
 +
 +              return vreg->last_set_uV;
 +      } else {
 +              return vreg->uV;
 +      }
 +};
 +
 +static int spm_regulator_write_voltage(struct spm_vreg *vreg, int uV)
 +{
 +      unsigned vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
 +      bool spm_failed = false;
 +      int rc = 0;
 +      u32 slew_delay;
 +
 +      if (likely(!vreg->bypass_spm)) {
 +              /* Set voltage control register via SPM. */
 +              rc = msm_spm_set_vdd(vreg->cpu_num, vlevel);
 +              if (rc) {
 +                      pr_debug("%s: msm_spm_set_vdd failed, rc=%d; falling back on SPMI write\n",
 +                              vreg->rdesc.name, rc);
 +                      spm_failed = true;
 +              }
 +      }
 +
 +      if (unlikely(vreg->bypass_spm || spm_failed)) {
 +              rc = qpnp_smps_write_voltage(vreg, vlevel);
 +              if (rc) {
 +                      pr_err("%s: voltage write failed, rc=%d\n",
 +                              vreg->rdesc.name, rc);
 +                      return rc;
 +              }
 +      }
 +
 +      if (uV > vreg->last_set_uV) {
 +              /* Wait for voltage stepping to complete. */
 +              slew_delay = DIV_ROUND_UP(uV - vreg->last_set_uV,
 +                                      vreg->step_rate);
 +              if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
 +                      slew_delay += FTS2P5_SETTLING_DELAY_US;
 +              udelay(slew_delay);
 +      } else if (vreg->regulator_type == QPNP_TYPE_FTS2p5) {
 +              /* add the ramp-down delay */
 +              slew_delay = DIV_ROUND_UP(vreg->last_set_uV - uV,
 +                              vreg->step_rate) + FTS2P5_SETTLING_DELAY_US;
 +              udelay(slew_delay);
 +      }
 +
 +      vreg->last_set_uV = uV;
 +      vreg->last_set_vlevel = vlevel;
 +
 +      return rc;
 +}
 +
 +static int spm_regulator_recalibrate(struct spm_vreg *vreg)
 +{
 +      int rc;
 +
 +      if (!vreg->recal_cluster_mask)
 +              return 0;
 +
 +      rc = __invoke_psci_fn_smc(0xC4000020, vreg->recal_cluster_mask,
 +                                2, 0);
 +      if (rc)
 +              pr_err("%s: recalibration failed, rc=%d\n", vreg->rdesc.name,
 +                      rc);
 +
 +      return rc;
 +}
 +
 +static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      bool pwm_required;
 +      int rc = 0;
 +      int uV;
 +
 +      rc = spm_regulator_get_voltage(rdev);
 +      if (IS_ERR_VALUE(rc))
 +              return rc;
 +
 +      if (vreg->vlevel == vreg->last_set_vlevel)
 +              return 0;
 +
 +      pwm_required = (vreg->regulator_type == QPNP_TYPE_FTS2)
 +                      && (vreg->init_mode != QPNP_LOGICAL_MODE_PWM)
 +                      && vreg->uV > vreg->last_set_uV;
 +
 +      if (pwm_required) {
 +              /* Switch to PWM mode so that voltage ramping is fast. */
 +              rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_PWM);
 +              if (rc)
 +                      return rc;
 +      }
 +
 +      do {
 +              uV = vreg->uV > vreg->last_set_uV
 +                  ? min(vreg->uV, vreg->last_set_uV + (int)vreg->max_step_uV)
 +                  : max(vreg->uV, vreg->last_set_uV - (int)vreg->max_step_uV);
 +
 +              rc = spm_regulator_write_voltage(vreg, uV);
 +              if (rc)
 +                      return rc;
 +      } while (vreg->last_set_uV != vreg->uV);
 +
 +      if (pwm_required) {
 +              /* Wait for mode transition to complete. */
 +              udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
 +              /* Switch to AUTO mode so that power consumption is lowered. */
 +              rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_AUTO);
 +              if (rc)
 +                      return rc;
 +      }
 +
 +      rc = spm_regulator_recalibrate(vreg);
 +
 +      return rc;
 +}
 +
 +static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
 +                                      int max_uV, unsigned *selector)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      const struct voltage_range *range = vreg->range;
 +      int uV = min_uV;
 +      unsigned vlevel;
 +
 +      if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
 +              uV = range->set_point_min_uV;
 +
 +      if (uV < range->set_point_min_uV || uV > range->max_uV) {
 +              pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
 +                      vreg->rdesc.name, min_uV, max_uV,
 +                      range->set_point_min_uV, range->max_uV);
 +              return -EINVAL;
 +      }
 +
 +      vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
 +      uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
 +
 +      if (uV > max_uV) {
 +              pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
 +                      vreg->rdesc.name, min_uV, max_uV);
 +              return -EINVAL;
 +      }
 +
 +      *selector = spm_regulator_vlevel_to_selector(vreg, vlevel);
 +      vreg->vlevel = vlevel;
 +      vreg->uV = uV;
 +
 +      if (!vreg->online)
 +              return 0;
 +
 +      return _spm_regulator_set_voltage(rdev);
 +}
 +
 +static int spm_regulator_list_voltage(struct regulator_dev *rdev,
 +                                      unsigned selector)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      if (selector >= vreg->rdesc.n_voltages)
 +              return 0;
 +
 +      return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
 +}
 +
 +static int spm_regulator_enable(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      int rc;
 +
 +      rc = _spm_regulator_set_voltage(rdev);
 +
 +      if (!rc)
 +              vreg->online = true;
 +
 +      return rc;
 +}
 +
 +static int spm_regulator_disable(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      vreg->online = false;
 +
 +      return 0;
 +}
 +
 +static int spm_regulator_is_enabled(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      return vreg->online;
 +}
 +
 +static unsigned int spm_regulator_get_mode(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      return vreg->mode == QPNP_LOGICAL_MODE_PWM
 +                      ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
 +}
 +
 +static int spm_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      /*
 +       * Map REGULATOR_MODE_NORMAL to PWM mode and REGULATOR_MODE_IDLE to
 +       * init_mode.  This ensures that the regulator always stays in PWM mode
 +       * in the case that qcom,mode has been specified as "pwm" in device
 +       * tree.
 +       */
 +      vreg->mode = (mode == REGULATOR_MODE_NORMAL) ? QPNP_LOGICAL_MODE_PWM
 +                                                   : vreg->init_mode;
 +
 +      return qpnp_smps_set_mode(vreg, vreg->mode);
 +}
 +
 +static struct regulator_ops spm_regulator_ops = {
 +      .get_voltage    = spm_regulator_get_voltage,
 +      .set_voltage    = spm_regulator_set_voltage,
 +      .list_voltage   = spm_regulator_list_voltage,
 +      .get_mode       = spm_regulator_get_mode,
 +      .set_mode       = spm_regulator_set_mode,
 +      .enable         = spm_regulator_enable,
 +      .disable        = spm_regulator_disable,
 +      .is_enabled     = spm_regulator_is_enabled,
 +};
 +
 +static int spm_regulator_avs_set_voltage(struct regulator_dev *rdev, int min_uV,
 +                                      int max_uV, unsigned *selector)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      const struct voltage_range *range = vreg->range;
 +      unsigned vlevel_min, vlevel_max;
 +      int uV, avs_min_uV, avs_max_uV, rc;
 +
 +      uV = min_uV;
 +
 +      if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
 +              uV = range->set_point_min_uV;
 +
 +      if (uV < range->set_point_min_uV || uV > range->max_uV) {
 +              pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
 +                      vreg->avs_rdesc.name, min_uV, max_uV,
 +                      range->set_point_min_uV, range->max_uV);
 +              return -EINVAL;
 +      }
 +
 +      vlevel_min = spm_regulator_uv_to_vlevel(vreg, uV);
 +      avs_min_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_min);
 +
 +      if (avs_min_uV > max_uV) {
 +              pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
 +                      vreg->avs_rdesc.name, min_uV, max_uV);
 +              return -EINVAL;
 +      }
 +
 +      uV = max_uV;
 +
 +      if (uV > range->max_uV && min_uV <= range->max_uV)
 +              uV = range->max_uV;
 +
 +      if (uV < range->set_point_min_uV || uV > range->max_uV) {
 +              pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
 +                      vreg->avs_rdesc.name, min_uV, max_uV,
 +                      range->set_point_min_uV, range->max_uV);
 +              return -EINVAL;
 +      }
 +
 +      vlevel_max = spm_regulator_uv_to_vlevel(vreg, uV);
 +      avs_max_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_max);
 +
 +      if (avs_max_uV < min_uV) {
 +              pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
 +                      vreg->avs_rdesc.name, min_uV, max_uV);
 +              return -EINVAL;
 +      }
 +
 +      if (likely(!vreg->bypass_spm)) {
 +              rc = msm_spm_avs_set_limit(vreg->cpu_num, vlevel_min,
 +                                              vlevel_max);
 +              if (rc) {
 +                      pr_err("%s: AVS limit setting failed, rc=%d\n",
 +                              vreg->avs_rdesc.name, rc);
 +                      return rc;
 +              }
 +      }
 +
 +      *selector = spm_regulator_vlevel_to_selector(vreg, vlevel_min);
 +      vreg->avs_min_uV = avs_min_uV;
 +      vreg->avs_max_uV = avs_max_uV;
 +
 +      return 0;
 +}
 +
 +static int spm_regulator_avs_get_voltage(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      return vreg->avs_min_uV;
 +}
 +
 +static int spm_regulator_avs_enable(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      int rc;
 +
 +      if (likely(!vreg->bypass_spm)) {
 +              rc = msm_spm_avs_enable(vreg->cpu_num);
 +              if (rc) {
 +                      pr_err("%s: AVS enable failed, rc=%d\n",
 +                              vreg->avs_rdesc.name, rc);
 +                      return rc;
 +              }
 +      }
 +
 +      vreg->avs_enabled = true;
 +
 +      return 0;
 +}
 +
 +static int spm_regulator_avs_disable(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +      int rc;
 +
 +      if (likely(!vreg->bypass_spm)) {
 +              rc = msm_spm_avs_disable(vreg->cpu_num);
 +              if (rc) {
 +                      pr_err("%s: AVS disable failed, rc=%d\n",
 +                              vreg->avs_rdesc.name, rc);
 +                      return rc;
 +              }
 +      }
 +
 +      vreg->avs_enabled = false;
 +
 +      return 0;
 +}
 +
 +static int spm_regulator_avs_is_enabled(struct regulator_dev *rdev)
 +{
 +      struct spm_vreg *vreg = rdev_get_drvdata(rdev);
 +
 +      return vreg->avs_enabled;
 +}
 +
 +static struct regulator_ops spm_regulator_avs_ops = {
 +      .get_voltage    = spm_regulator_avs_get_voltage,
 +      .set_voltage    = spm_regulator_avs_set_voltage,
 +      .list_voltage   = spm_regulator_list_voltage,
 +      .enable         = spm_regulator_avs_enable,
 +      .disable        = spm_regulator_avs_disable,
 +      .is_enabled     = spm_regulator_avs_is_enabled,
 +};
 +
 +static int qpnp_smps_check_type(struct spm_vreg *vreg)
 +{
 +      int rc;
 +      u8 type[2];
 +
 +      rc = regmap_bulk_read(vreg->regmap,
 +                            vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE,
 +                            type,
 +                            2);
 +      if (rc) {
 +              dev_err(&vreg->pdev->dev,
 +                      "%s: could not read type register, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +
 +      if (type[0] == QPNP_FTS2_TYPE && type[1] == QPNP_FTS2_SUBTYPE) {
 +              vreg->regulator_type = QPNP_TYPE_FTS2;
 +      } else if (type[0] == QPNP_FTS2p5_TYPE
 +                                      && type[1] == QPNP_FTS2p5_SUBTYPE) {
 +              vreg->regulator_type = QPNP_TYPE_FTS2p5;
 +      } else if (type[0] == QPNP_FTS426_TYPE
 +                                      && type[1] == QPNP_FTS426_SUBTYPE) {
 +              vreg->regulator_type = QPNP_TYPE_FTS426;
 +      } else if (type[0] == QPNP_ULT_HF_TYPE
 +                                      && type[1] == QPNP_ULT_HF_SUBTYPE) {
 +              vreg->regulator_type = QPNP_TYPE_ULT_HF;
 +      } else if (type[0] == QPNP_HF_TYPE
 +                                      && type[1] == QPNP_HF_SUBTYPE) {
 +              vreg->regulator_type = QPNP_TYPE_HF;
 +      } else {
 +              dev_err(&vreg->pdev->dev,
 +                      "%s: invalid type=0x%02X, subtype=0x%02X register pair\n",
 +                       __func__, type[0], type[1]);
 +              return -ENODEV;
 +      };
 +
 +      return rc;
 +}
 +
 +static int qpnp_smps_init_range(struct spm_vreg *vreg,
 +      const struct voltage_range *range0, const struct voltage_range *range1)
 +{
 +      int rc;
 +      u8 reg = 0;
 +      uint val;
 +
 +      rc = regmap_read(vreg->regmap,
 +                       vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_RANGE,
 +                       &val);
 +      if (rc) {
 +              dev_err(&vreg->pdev->dev,
 +                      "%s: could not read voltage range register, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +      reg = (u8)val;
 +
 +      if (reg == 0x00) {
 +              vreg->range = range0;
 +      } else if (reg == 0x01) {
 +              vreg->range = range1;
 +      } else {
 +              dev_err(&vreg->pdev->dev, "%s: voltage range=%d is invalid\n",
 +                      __func__, reg);
 +              rc = -EINVAL;
 +      }
 +
 +      return rc;
 +}
 +
 +static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
 +{
 +      int rc;
 +      u8 reg = 0;
 +      uint val;
 +
 +      rc = regmap_read(vreg->regmap,
 +                       vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
 +                       &val);
 +      if (rc) {
 +              dev_err(&vreg->pdev->dev,
 +                      "%s: could not read voltage range register, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +      reg = (u8)val;
 +
 +      vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
 +                                                      &ult_hf_range1;
 +      return rc;
 +}
 +
 +static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
 +{
 +      int rc;
 +
 +      rc = qpnp_smps_read_voltage(vreg);
 +      if (rc) {
 +              pr_err("%s: voltage read failed, rc=%d\n", vreg->rdesc.name,
 +                      rc);
 +              return rc;
 +      }
 +
 +      vreg->vlevel = vreg->last_set_vlevel;
 +      vreg->uV = vreg->last_set_uV;
 +
 +      /* Initialize SAW voltage control register */
 +      if (!vreg->bypass_spm) {
 +              rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
 +              if (rc)
 +                      pr_err("%s: msm_spm_set_vdd failed, rc=%d\n",
 +                             vreg->rdesc.name, rc);
 +      }
 +
 +      return 0;
 +}
 +
 +static int qpnp_smps_init_mode(struct spm_vreg *vreg)
 +{
 +      const char *mode_name;
 +      int rc;
 +      uint val;
 +
 +      rc = of_property_read_string(vreg->pdev->dev.of_node, "qcom,mode",
 +                                      &mode_name);
 +      if (!rc) {
 +              if (strcmp("pwm", mode_name) == 0) {
 +                      vreg->init_mode = QPNP_LOGICAL_MODE_PWM;
 +              } else if ((strcmp("auto", mode_name) == 0) &&
 +                              (vreg->regulator_type != QPNP_TYPE_ULT_HF)) {
 +                      vreg->init_mode = QPNP_LOGICAL_MODE_AUTO;
 +              } else {
 +                      dev_err(&vreg->pdev->dev,
 +                              "%s: unknown regulator mode: %s\n",
 +                              __func__, mode_name);
 +                      return -EINVAL;
 +              }
 +
 +              rc = qpnp_smps_set_mode(vreg, vreg->init_mode);
 +              if (rc)
 +                      return rc;
 +      } else {
 +              rc = regmap_read(vreg->regmap,
 +                               vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
 +                               &val);
 +              if (rc)
 +                      dev_err(&vreg->pdev->dev,
 +                              "%s: could not read mode register, rc=%d\n",
 +                              __func__, rc);
 +               vreg->init_mode = qpnp_regval_to_mode(vreg, val);
 +      }
 +
 +      vreg->mode = vreg->init_mode;
 +
 +      return rc;
 +}
 +
 +static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
 +{
 +      int rc;
 +      u8 reg = 0;
 +      int step = 0, delay;
 +      uint val;
 +
 +      rc = regmap_read(vreg->regmap,
 +                       vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &val);
 +      if (rc) {
 +              dev_err(&vreg->pdev->dev,
 +                      "%s: could not read stepping control register, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +      reg = (u8)val;
 +
 +      /* ULT and FTS426 bucks do not support steps */
 +      if (vreg->regulator_type != QPNP_TYPE_ULT_HF && vreg->regulator_type !=
 +                      QPNP_TYPE_FTS426)
 +              step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK)
 +                      >> QPNP_SMPS_STEP_CTRL_STEP_SHIFT;
 +
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426) {
 +              delay = (reg & QPNP_FTS426_STEP_CTRL_DELAY_MASK)
 +                      >> QPNP_FTS426_STEP_CTRL_DELAY_SHIFT;
 +
 +              /* step_rate has units of uV/us. */
 +              vreg->step_rate = QPNP_FTS426_CLOCK_RATE * vreg->range->step_uV;
 +      } else {
 +              delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
 +                      >> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
 +
 +              /* step_rate has units of uV/us. */
 +              vreg->step_rate = QPNP_SMPS_CLOCK_RATE * vreg->range->step_uV
 +                                      * (1 << step);
 +      }
 +
 +      if ((vreg->regulator_type == QPNP_TYPE_ULT_HF)
 +                      || (vreg->regulator_type == QPNP_TYPE_HF))
 +              vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay);
 +      else if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              vreg->step_rate /= 1000 * (QPNP_FTS426_STEP_DELAY << delay);
 +      else
 +              vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
 +
 +      if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              vreg->step_rate = vreg->step_rate * QPNP_FTS426_STEP_MARGIN_NUM
 +                                      / QPNP_FTS426_STEP_MARGIN_DEN;
 +      else
 +              vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
 +                                      / QPNP_FTS2_STEP_MARGIN_DEN;
 +
 +      /* Ensure that the stepping rate is greater than 0. */
 +      vreg->step_rate = max(vreg->step_rate, 1);
 +
 +      return rc;
 +}
 +
 +static int qpnp_smps_check_constraints(struct spm_vreg *vreg,
 +                                      struct regulator_init_data *init_data)
 +{
 +      int rc = 0, limit_min_uV, limit_max_uV;
 +      u16 ul_reg, ll_reg;
 +      u8 reg[2];
 +
 +      limit_min_uV = 0;
 +      limit_max_uV = INT_MAX;
 +
 +      ul_reg = QPNP_FTS_REG_VOLTAGE_ULS_VALID;
 +      ll_reg = QPNP_FTS_REG_VOLTAGE_LLS_VALID;
 +
 +      switch (vreg->regulator_type) {
 +      case QPNP_TYPE_HF:
 +              ul_reg = QPNP_HF_REG_VOLTAGE_ULS;
 +              ll_reg = QPNP_HF_REG_VOLTAGE_LLS;
 +      case QPNP_TYPE_FTS2:
 +      case QPNP_TYPE_FTS2p5:
 +              rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
 +                                      + QPNP_SMPS_REG_UL_LL_CTRL, reg, 1);
 +              if (rc) {
 +                      dev_err(&vreg->pdev->dev, "%s: UL_LL register read failed, rc=%d\n",
 +                              __func__, rc);
 +                      return rc;
 +              }
 +
 +              if (reg[0] & QPNP_COMMON_UL_EN_MASK) {
 +                      rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
 +                                              + ul_reg, &reg[1], 1);
 +                      if (rc) {
 +                              dev_err(&vreg->pdev->dev, "%s: ULS register read failed, rc=%d\n",
 +                                      __func__, rc);
 +                              return rc;
 +                      }
 +
 +                      limit_max_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
 +              }
 +
 +              if (reg[0] & QPNP_COMMON_LL_EN_MASK) {
 +                      rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
 +                                              + ll_reg, &reg[1], 1);
 +                      if (rc) {
 +                              dev_err(&vreg->pdev->dev, "%s: LLS register read failed, rc=%d\n",
 +                                      __func__, rc);
 +                              return rc;
 +                      }
 +
 +                      limit_min_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
 +              }
 +
 +              break;
 +      case QPNP_TYPE_FTS426:
 +              rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
 +                                      + QPNP_FTS426_REG_VOLTAGE_ULS_LB,
 +                                      reg, 2);
 +              if (rc) {
 +                      dev_err(&vreg->pdev->dev, "%s: could not read voltage limit registers, rc=%d\n",
 +                              __func__, rc);
 +                      return rc;
 +              }
 +
 +              limit_max_uV = spm_regulator_vlevel_to_uv(vreg,
 +                                      ((unsigned)reg[1] << 8) | reg[0]);
 +              break;
 +      case QPNP_TYPE_ULT_HF:
 +              /* no HW voltage limit configuration */
 +              break;
 +      }
 +
 +      if (init_data->constraints.min_uV < limit_min_uV
 +          || init_data->constraints.max_uV >  limit_max_uV) {
 +              dev_err(&vreg->pdev->dev, "regulator min/max(%d/%d) constraints do not fit within HW configured min/max(%d/%d) constraints\n",
 +                      init_data->constraints.min_uV,
 +                      init_data->constraints.max_uV, limit_min_uV,
 +                      limit_max_uV);
 +              return -EINVAL;
 +      }
 +
 +      return rc;
 +}
 +
 +static bool spm_regulator_using_range0(struct spm_vreg *vreg)
 +{
 +      return vreg->range == &fts2_range0 || vreg->range == &fts2p5_range0
 +              || vreg->range == &ult_hf_range0 || vreg->range == &hf_range0
 +              || vreg->range == &fts426_range;
 +}
 +
 +/* Register a regulator to enable/disable AVS and set AVS min/max limits. */
 +static int spm_regulator_avs_register(struct spm_vreg *vreg,
 +                              struct device *dev, struct device_node *node)
 +{
 +      struct regulator_config reg_config = {};
 +      struct device_node *avs_node = NULL;
 +      struct device_node *child_node;
 +      struct regulator_init_data *init_data;
 +      int rc;
 +
 +      /*
 +       * Find the first available child node (if any).  It corresponds to an
 +       * AVS limits regulator.
 +       */
 +      for_each_available_child_of_node(node, child_node) {
 +              avs_node = child_node;
 +              break;
 +      }
 +
 +      if (!avs_node)
 +              return 0;
 +
 +      init_data = of_get_regulator_init_data(dev, avs_node, &vreg->avs_rdesc);
 +      if (!init_data) {
 +              dev_err(dev, "%s: unable to allocate memory\n", __func__);
 +              return -ENOMEM;
 +      }
 +      init_data->constraints.input_uV = init_data->constraints.max_uV;
 +      init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
 +                                              | REGULATOR_CHANGE_VOLTAGE;
 +
 +      if (!init_data->constraints.name) {
 +              dev_err(dev, "%s: AVS node is missing regulator name\n",
 +                      __func__);
 +              return -EINVAL;
 +      }
 +
 +      vreg->avs_rdesc.name    = init_data->constraints.name;
 +      vreg->avs_rdesc.type    = REGULATOR_VOLTAGE;
 +      vreg->avs_rdesc.owner   = THIS_MODULE;
 +      vreg->avs_rdesc.ops     = &spm_regulator_avs_ops;
 +      vreg->avs_rdesc.n_voltages
 +              = (vreg->range->max_uV - vreg->range->set_point_min_uV)
 +                      / vreg->range->step_uV + 1;
 +
 +      reg_config.dev = dev;
 +      reg_config.init_data = init_data;
 +      reg_config.driver_data = vreg;
 +      reg_config.of_node = avs_node;
 +
 +      vreg->avs_rdev = regulator_register(&vreg->avs_rdesc, &reg_config);
 +      if (IS_ERR(vreg->avs_rdev)) {
 +              rc = PTR_ERR(vreg->avs_rdev);
 +              dev_err(dev, "%s: AVS regulator_register failed, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +
 +      if (vreg->bypass_spm)
 +              pr_debug("%s: SPM bypassed so AVS regulator calls are no-ops\n",
 +                      vreg->avs_rdesc.name);
 +
 +      return 0;
 +}
 +
 +static int spm_regulator_probe(struct platform_device *pdev)
 +{
 +      struct regulator_config reg_config = {};
 +      struct device_node *node = pdev->dev.of_node;
 +      struct regulator_init_data *init_data;
 +      struct spm_vreg *vreg;
 +      unsigned int base;
 +      bool bypass_spm;
 +      int rc;
 +
 +      if (!node) {
 +              dev_err(&pdev->dev, "%s: device node missing\n", __func__);
 +              return -ENODEV;
 +      }
 +
 +      bypass_spm = of_property_read_bool(node, "qcom,bypass-spm");
 +      if (!bypass_spm) {
 +              rc = msm_spm_probe_done();
 +              if (rc) {
 +                      if (rc != -EPROBE_DEFER)
 +                              dev_err(&pdev->dev,
 +                                      "%s: spm unavailable, rc=%d\n",
 +                                      __func__, rc);
 +                      return rc;
 +              }
 +      }
 +
 +      vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
 +      if (!vreg) {
 +              pr_err("allocation failed.\n");
 +              return -ENOMEM;
 +      }
 +      vreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
 +      if (!vreg->regmap) {
 +              dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
 +              return -EINVAL;
 +      }
 +      vreg->pdev = pdev;
 +      vreg->bypass_spm = bypass_spm;
 +
 +      rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
 +      if (rc < 0) {
 +              dev_err(&pdev->dev,
 +                      "Couldn't find reg in node = %s rc = %d\n",
 +                      pdev->dev.of_node->full_name, rc);
 +              return rc;
 +      }
 +      vreg->spmi_base_addr = base;
 +
 +      rc = qpnp_smps_check_type(vreg);
 +      if (rc)
 +              return rc;
 +
 +      /* Specify CPU 0 as default in order to handle shared regulator case. */
 +      vreg->cpu_num = 0;
 +      of_property_read_u32(vreg->pdev->dev.of_node, "qcom,cpu-num",
 +                                              &vreg->cpu_num);
 +
 +      of_property_read_u32(vreg->pdev->dev.of_node, "qcom,recal-mask",
 +                                              &vreg->recal_cluster_mask);
 +
 +      /*
 +       * The regulator must be initialized to range 0 or range 1 during
 +       * PMIC power on sequence.  Once it is set, it cannot be changed
 +       * dynamically.
 +       */
 +      if (vreg->regulator_type == QPNP_TYPE_FTS2)
 +              rc = qpnp_smps_init_range(vreg, &fts2_range0, &fts2_range1);
 +      else if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
 +              rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1);
 +      else if (vreg->regulator_type == QPNP_TYPE_FTS426)
 +              vreg->range = &fts426_range;
 +      else if (vreg->regulator_type == QPNP_TYPE_HF)
 +              rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1);
 +      else if (vreg->regulator_type == QPNP_TYPE_ULT_HF)
 +              rc = qpnp_ult_hf_init_range(vreg);
 +      if (rc)
 +              return rc;
 +
 +      rc = qpnp_smps_init_voltage(vreg);
 +      if (rc)
 +              return rc;
 +
 +      rc = qpnp_smps_init_mode(vreg);
 +      if (rc)
 +              return rc;
 +
 +      rc = qpnp_smps_init_step_rate(vreg);
 +      if (rc)
 +              return rc;
 +
 +      init_data = of_get_regulator_init_data(&pdev->dev, node, &vreg->rdesc);
 +      if (!init_data) {
 +              dev_err(&pdev->dev, "%s: unable to allocate memory\n",
 +                              __func__);
 +              return -ENOMEM;
 +      }
 +      init_data->constraints.input_uV = init_data->constraints.max_uV;
 +      init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
 +                      | REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE;
 +      init_data->constraints.valid_modes_mask
 +                              = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
 +
 +      if (!init_data->constraints.name) {
 +              dev_err(&pdev->dev, "%s: node is missing regulator name\n",
 +                      __func__);
 +              return -EINVAL;
 +      }
 +
 +      rc = qpnp_smps_check_constraints(vreg, init_data);
 +      if (rc) {
 +              dev_err(&pdev->dev, "%s: regulator constraints check failed, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +
 +      vreg->rdesc.name        = init_data->constraints.name;
 +      vreg->rdesc.type        = REGULATOR_VOLTAGE;
 +      vreg->rdesc.owner       = THIS_MODULE;
 +      vreg->rdesc.ops         = &spm_regulator_ops;
 +      vreg->rdesc.n_voltages
 +              = (vreg->range->max_uV - vreg->range->set_point_min_uV)
 +                      / vreg->range->step_uV + 1;
 +
 +      vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
 +      of_property_read_u32(vreg->pdev->dev.of_node,
 +                              "qcom,max-voltage-step", &vreg->max_step_uV);
 +
 +      if (vreg->max_step_uV > SPM_REGULATOR_MAX_STEP_UV)
 +              vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
 +
 +      vreg->max_step_uV = rounddown(vreg->max_step_uV, vreg->range->step_uV);
 +      pr_debug("%s: max single voltage step size=%u uV\n",
 +              vreg->rdesc.name, vreg->max_step_uV);
 +
 +      reg_config.dev = &pdev->dev;
 +      reg_config.init_data = init_data;
 +      reg_config.driver_data = vreg;
 +      reg_config.of_node = node;
 +      vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
 +
 +      if (IS_ERR(vreg->rdev)) {
 +              rc = PTR_ERR(vreg->rdev);
 +              dev_err(&pdev->dev, "%s: regulator_register failed, rc=%d\n",
 +                      __func__, rc);
 +              return rc;
 +      }
 +
 +      rc = spm_regulator_avs_register(vreg, &pdev->dev, node);
 +      if (rc) {
 +              regulator_unregister(vreg->rdev);
 +              return rc;
 +      }
 +
 +      dev_set_drvdata(&pdev->dev, vreg);
 +
 +      pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
 +              vreg->rdesc.name,
 +              spm_regulator_using_range0(vreg) ? "LV" : "MV",
 +              vreg->uV,
 +              vreg->init_mode == QPNP_LOGICAL_MODE_PWM ? "PWM" :
 +                 (vreg->init_mode == QPNP_LOGICAL_MODE_AUTO ? "AUTO" : "PFM"),
 +              vreg->step_rate);
 +
 +      return rc;
 +}
 +
 +static int spm_regulator_remove(struct platform_device *pdev)
 +{
 +      struct spm_vreg *vreg = dev_get_drvdata(&pdev->dev);
 +
 +      if (vreg->avs_rdev)
 +              regulator_unregister(vreg->avs_rdev);
 +      regulator_unregister(vreg->rdev);
 +
 +      return 0;
 +}
 +
 +static struct of_device_id spm_regulator_match_table[] = {
 +      { .compatible = SPM_REGULATOR_DRIVER_NAME, },
 +      {}
 +};
 +
 +static const struct platform_device_id spm_regulator_id[] = {
 +      { SPM_REGULATOR_DRIVER_NAME, 0 },
 +      {}
 +};
 +MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
 +
 +static struct platform_driver spm_regulator_driver = {
 +      .driver = {
 +              .name           = SPM_REGULATOR_DRIVER_NAME,
 +              .of_match_table = spm_regulator_match_table,
 +              .owner          = THIS_MODULE,
 +      },
 +      .probe          = spm_regulator_probe,
 +      .remove         = spm_regulator_remove,
 +      .id_table       = spm_regulator_id,
 +};
 +
 +/**
 + * spm_regulator_init() - register spmi driver for spm-regulator
 + *
 + * This initialization function should be called in systems in which driver
 + * registration ordering must be controlled precisely.
 + *
 + * Returns 0 on success or errno on failure.
 + */
 +int __init spm_regulator_init(void)
 +{
 +      static bool has_registered;
 +
 +      if (has_registered)
 +              return 0;
 +      else
 +              has_registered = true;
 +
 +      return platform_driver_register(&spm_regulator_driver);
 +}
 +EXPORT_SYMBOL(spm_regulator_init);
 +
 +static void __exit spm_regulator_exit(void)
 +{
 +      platform_driver_unregister(&spm_regulator_driver);
 +}
 +
 +arch_initcall(spm_regulator_init);
 +module_exit(spm_regulator_exit);
 +
 +MODULE_LICENSE("GPL v2");
 +MODULE_DESCRIPTION("SPM regulator driver");
 +MODULE_ALIAS("platform:spm-regulator");
Simple merge
diff --cc fs/cifs/file.c
Simple merge
diff --cc fs/dcache.c
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -24,11 -24,18 +24,26 @@@ bool psci_tos_resident_on(int cpu)
  bool psci_power_state_loses_context(u32 state);
  bool psci_power_state_is_valid(u32 state);
  
 +int psci_cpu_init_idle(unsigned int cpu);
 +int psci_cpu_suspend_enter(unsigned long index);
 +
++unsigned long __invoke_psci_fn_smc(unsigned long function_id,
++                      unsigned long arg0, unsigned long arg1,
++                      unsigned long arg2);
++
+ enum psci_conduit {
+       PSCI_CONDUIT_NONE,
+       PSCI_CONDUIT_SMC,
+       PSCI_CONDUIT_HVC,
+ };
+ enum smccc_version {
+       SMCCC_VERSION_1_0,
+       SMCCC_VERSION_1_1,
+ };
  struct psci_operations {
 +      u32 (*get_version)(void);
        int (*cpu_suspend)(u32 state, unsigned long entry_point);
        int (*cpu_off)(u32 state);
        int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
Simple merge
Simple merge
diff --cc mm/filemap.c
Simple merge
diff --cc mm/vmstat.c
Simple merge
Simple merge
Simple merge
Simple merge