OSDN Git Service

Merge android-4.4.185 (14e1196) into msm-4.4
authorSrinivasarao P <spathi@codeaurora.org>
Thu, 11 Jul 2019 06:26:30 +0000 (11:56 +0530)
committerSrinivasarao P <spathi@codeaurora.org>
Thu, 11 Jul 2019 06:40:40 +0000 (12:10 +0530)
* refs/heads/tmp-14e1196
  ANDROID: Communicates LMK events to userland where they can be logged
  Linux 4.4.185
  dmaengine: imx-sdma: remove BD_INTR for channel0
  KVM: x86: degrade WARN to pr_warn_ratelimited
  arm64, vdso: Define vdso_{start,end} as array
  ARC: handle gcc generated __builtin_trap for older compiler
  tty: rocket: fix incorrect forward declaration of 'rp_init()'
  btrfs: Ensure replaced device doesn't have pending chunk allocation
  lib/mpi: Fix karactx leak in mpi_powm
  ALSA: usb-audio: fix sign unintended sign extension on left shifts
  ALSA: firewire-lib/fireworks: fix miss detection of received MIDI messages
  ALSA: seq: fix incorrect order of dest_client/dest_ports arguments
  crypto: user - prevent operating on larval algorithms
  ptrace: Fix ->ptracer_cred handling for PTRACE_TRACEME
  MIPS: Workaround GCC __builtin_unreachable reordering bug
  bug.h: work around GCC PR82365 in BUG()
  swiotlb: Make linux/swiotlb.h standalone includible
  mfd: omap-usb-tll: Fix register offsets
  MIPS: math-emu: do not use bools for arithmetic
  ARC: fix build warning in elf.h
  ARC: Assume multiplier is always present
  scsi: hpsa: correct ioaccel2 chaining
  usb: gadget: udc: lpc32xx: allocate descriptor with GFP_ATOMIC
  usb: gadget: fusb300_udc: Fix memory leak of fusb300->ep[i]
  ASoC: max98090: remove 24-bit format support if RJ is 0
  spi: bitbang: Fix NULL pointer dereference in spi_unregister_master
  ASoC : cs4265 : readable register too low
  um: Compile with modern headers
  Bluetooth: Fix faulty expression for minimum encryption key size check
  net: check before dereferencing netdev_ops during busy poll
  bonding: Always enable vlan tx offload
  ipv4: Use return value of inet_iif() for __raw_v4_lookup in the while loop
  team: Always enable vlan tx offload
  tipc: check msg->req data len in tipc_nl_compat_bearer_disable
  tipc: change to use register_pernet_device
  sctp: change to hold sk after auth shkey is created successfully
  cpu/speculation: Warn on unsupported mitigations= parameter
  x86/speculation: Allow guests to use SSBD even if host does not
  ovl: modify ovl_permission() to do checks on two inodes
  KVM: X86: Fix scan ioapic use-before-initialization
  net/9p: include trans_common.h to fix missing prototype warning.
  9p: p9dirent_read: check network-provided name length
  9p/rdma: remove useless check in cm_event_handler
  9p: acl: fix uninitialized iattr access
  9p/rdma: do not disconnect on down_interruptible EAGAIN
  perf help: Remove needless use of strncpy()
  perf ui helpline: Use strlcpy() as a shorter form of strncpy() + explicit set nul
  mac80211: drop robust management frames from unknown TA
  cfg80211: fix memory leak of wiphy device name
  SMB3: retry on STATUS_INSUFFICIENT_RESOURCES instead of failing write
  Bluetooth: Fix regression with minimum encryption key size alignment
  Bluetooth: Align minimum encryption key size for LE and BR/EDR connections
  ARM: imx: cpuidle-imx6sx: Restrict the SW2ISO increase to i.MX6SX
  can: purge socket error queue on sock destruct
  can: flexcan: fix timeout when set small bitrate
  btrfs: start readahead also in seed devices
  Btrfs: fix race between readahead and device replace/removal
  hwmon: (pmbus/core) Treat parameters as paged if on multiple pages
  s390/qeth: fix VLAN attribute in bridge_hostnotify udev event
  scsi: ufs: Check that space was properly alloced in copy_query_response
  scripts/checkstack.pl: Fix arm64 wrong or unknown architecture
  sparc: perf: fix updated event period in response to PERF_EVENT_IOC_PERIOD
  net: hns: Fix loopback test failed at copper ports
  MIPS: uprobes: remove set but not used variable 'epc'
  IB/hfi1: Insure freeze_work work_struct is canceled on shutdown
  parisc: Fix compiler warnings in float emulation code
  parport: Fix mem leak in parport_register_dev_model
  apparmor: enforce nullbyte at end of tag string
  Input: uinput - add compat ioctl number translation for UI_*_FF_UPLOAD
  usb: chipidea: udc: workaround for endpoint conflict issue
  gcc-9: silence 'address-of-packed-member' warning
  tracing: Silence GCC 9 array bounds warning
  scsi: vmw_pscsi: Fix use-after-free in pvscsi_queue_lck()
  mm/page_idle.c: fix oops because end_pfn is larger than max_pfn
  fs/binfmt_flat.c: make load_flat_shared_library() work
  ANDROID: Fixes to locking around handle_lmk_event
  ANDROID: Avoid taking multiple locks in handle_lmk_event

Side effects from the commit "ANDROID: Communicates LMK events to userland
where they can be logged" is addressed here, so picking this commit which
ignored from 4.4.180 merge.

Conflicts:
drivers/staging/android/lowmemorykiller.c

Change-Id: I1156dc21d0f35e74e86d2ad202f99b7bc173b874
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
1  2 
Makefile
drivers/scsi/ufs/ufshcd.c
drivers/staging/android/lowmemorykiller.c
kernel/cpu.c
kernel/trace/trace.c
net/bluetooth/hci_conn.c
net/bluetooth/l2cap_core.c
net/mac80211/rx.c
net/wireless/core.c

diff --combined Makefile
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 4
  PATCHLEVEL = 4
- SUBLEVEL = 184
+ SUBLEVEL = 185
  EXTRAVERSION =
  NAME = Blurry Fish Butt
  
@@@ -343,7 -343,7 +343,7 @@@ include scripts/Kbuild.includ
  # Make variables (CC, etc...)
  AS            = $(CROSS_COMPILE)as
  LD            = $(CROSS_COMPILE)ld
 -CC            = $(CROSS_COMPILE)gcc
 +REAL_CC               = $(CROSS_COMPILE)gcc
  CPP           = $(CC) -E
  AR            = $(CROSS_COMPILE)ar
  NM            = $(CROSS_COMPILE)nm
@@@ -358,10 -358,6 +358,10 @@@ PERL             = per
  PYTHON                = python
  CHECK         = sparse
  
 +# Use the wrapper for the compiler.  This wrapper scans for new
 +# warnings and causes the build to stop upon encountering them.
 +CC            = $(PYTHON) $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
 +
  CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
                  -Wbitwise -Wno-return-void $(CF)
  CFLAGS_MODULE   =
@@@ -399,9 -395,7 +399,9 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Wno-format-security \
                   -std=gnu89 $(call cc-option,-fno-PIE)
  
 -
 +ifeq ($(TARGET_BOARD_TYPE),auto)
 +KBUILD_CFLAGS    += -DCONFIG_PLATFORM_AUTO
 +endif
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
  KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
@@@ -421,7 -415,7 +421,7 @@@ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MOD
  
  export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
  export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
 -export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
 +export CFLAGS_KASAN CFLAGS_UBSAN CFLAGS_KASAN_NOSANITIZE
  export CFLAGS_KCOV
  export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
  export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
@@@ -648,6 -642,7 +648,7 @@@ KBUILD_CFLAGS      += $(call cc-disable-warn
  KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
  KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
  KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
+ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
  KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
  
  ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
@@@ -732,7 -727,6 +733,6 @@@ ifeq ($(cc-name),clang
  KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
  KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
  KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
- KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
  KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier)
  # Quiet clang warning: comparison of unsigned expression < 0 is always false
  KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
@@@ -849,7 -843,6 +849,7 @@@ KBUILD_ARFLAGS := $(call ar-option,D
  
  include scripts/Makefile.kasan
  include scripts/Makefile.extrawarn
 +include scripts/Makefile.ubsan
  
  # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
  # last assignments
@@@ -3,7 -3,7 +3,7 @@@
   *
   * This code is based on drivers/scsi/ufs/ufshcd.c
   * Copyright (C) 2011-2013 Samsung India Software Operations
 - * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
   *
   * Authors:
   *    Santosh Yaraganavi <santosh.sy@samsung.com>
   */
  
  #include <linux/async.h>
 +#include <scsi/ufs/ioctl.h>
  #include <linux/devfreq.h>
 +#include <linux/nls.h>
 +#include <linux/of.h>
  #include <linux/blkdev.h>
  
  #include "ufshcd.h"
 -#include "unipro.h"
 +#include "ufshci.h"
 +#include "ufs_quirks.h"
 +#include "ufs-debugfs.h"
 +#include "ufs-qcom.h"
 +
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/ufs.h>
 +
 +#ifdef CONFIG_DEBUG_FS
 +
 +static int ufshcd_tag_req_type(struct request *rq)
 +{
 +      int rq_type = TS_WRITE;
 +
 +      if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
 +              rq_type = TS_NOT_SUPPORTED;
 +      else if (rq->cmd_flags & REQ_FLUSH)
 +              rq_type = TS_FLUSH;
 +      else if (rq_data_dir(rq) == READ)
 +              rq_type = (rq->cmd_flags & REQ_URGENT) ?
 +                      TS_URGENT_READ : TS_READ;
 +      else if (rq->cmd_flags & REQ_URGENT)
 +              rq_type = TS_URGENT_WRITE;
 +
 +      return rq_type;
 +}
 +
 +static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
 +{
 +      ufsdbg_set_err_state(hba);
 +      if (type < UFS_ERR_MAX)
 +              hba->ufs_stats.err_stats[type]++;
 +}
 +
 +static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
 +{
 +      struct request *rq =
 +              hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
 +      u64 **tag_stats = hba->ufs_stats.tag_stats;
 +      int rq_type;
 +
 +      if (!hba->ufs_stats.enabled)
 +              return;
 +
 +      tag_stats[tag][TS_TAG]++;
 +      if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
 +              return;
 +
 +      WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
 +      rq_type = ufshcd_tag_req_type(rq);
 +      if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
 +              tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
 +}
 +
 +static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
 +              struct scsi_cmnd *cmd)
 +{
 +      struct request *rq = cmd ? cmd->request : NULL;
 +
 +      if (rq && rq->cmd_type & REQ_TYPE_FS)
 +              hba->ufs_stats.q_depth--;
 +}
 +
 +static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +      int rq_type;
 +      struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
 +      s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
 +              lrbp->issue_time_stamp);
 +
 +      /* update general request statistics */
 +      if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
 +              hba->ufs_stats.req_stats[TS_TAG].min = delta;
 +      hba->ufs_stats.req_stats[TS_TAG].count++;
 +      hba->ufs_stats.req_stats[TS_TAG].sum += delta;
 +      if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
 +              hba->ufs_stats.req_stats[TS_TAG].max = delta;
 +      if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
 +                      hba->ufs_stats.req_stats[TS_TAG].min = delta;
 +
 +      rq_type = ufshcd_tag_req_type(rq);
 +      if (rq_type == TS_NOT_SUPPORTED)
 +              return;
 +
 +      /* update request type specific statistics */
 +      if (hba->ufs_stats.req_stats[rq_type].count == 0)
 +              hba->ufs_stats.req_stats[rq_type].min = delta;
 +      hba->ufs_stats.req_stats[rq_type].count++;
 +      hba->ufs_stats.req_stats[rq_type].sum += delta;
 +      if (delta > hba->ufs_stats.req_stats[rq_type].max)
 +              hba->ufs_stats.req_stats[rq_type].max = delta;
 +      if (delta < hba->ufs_stats.req_stats[rq_type].min)
 +                      hba->ufs_stats.req_stats[rq_type].min = delta;
 +}
 +
 +static void
 +ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
 +{
 +      if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
 +              hba->ufs_stats.query_stats_arr[opcode][idn]++;
 +}
 +
 +#else
 +static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
 +{
 +}
 +
 +static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
 +              struct scsi_cmnd *cmd)
 +{
 +}
 +
 +static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
 +{
 +}
 +
 +static inline
 +void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +}
 +
 +static inline
 +void ufshcd_update_query_stats(struct ufs_hba *hba,
 +                             enum query_opcode opcode, u8 idn)
 +{
 +}
 +#endif
 +
 +#define PWR_INFO_MASK 0xF
 +#define PWR_RX_OFFSET 4
 +
 +#define UFSHCD_REQ_SENSE_SIZE 18
  
  #define UFSHCD_ENABLE_INTRS   (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
  #define NOP_OUT_TIMEOUT    30 /* msecs */
  
  /* Query request retries */
 -#define QUERY_REQ_RETRIES 10
 +#define QUERY_REQ_RETRIES 3
  /* Query request timeout */
 -#define QUERY_REQ_TIMEOUT 30 /* msec */
 +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  
  /* Task management command timeout */
  #define TM_CMD_TIMEOUT        100 /* msecs */
  
 +/* maximum number of retries for a general UIC command  */
 +#define UFS_UIC_COMMAND_RETRIES 3
 +
  /* maximum number of link-startup retries */
  #define DME_LINKSTARTUP_RETRIES 3
  
 +/* Maximum retries for Hibern8 enter */
 +#define UIC_HIBERN8_ENTER_RETRIES 3
 +
  /* maximum number of reset retries before giving up */
  #define MAX_HOST_RESET_RETRIES 5
  
  /* Interrupt aggregation default timeout, unit: 40us */
  #define INT_AGGR_DEF_TO       0x02
  
 +/* default value of auto suspend is 3 seconds */
 +#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
 +
 +#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE   10
 +#define UFSHCD_CLK_GATING_DELAY_MS_PERF               50
 +
 +/* IOCTL opcode for command - ufs set device read only */
 +#define UFS_IOCTL_BLKROSET      BLKROSET
 +
 +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION            2
 +
  #define ufshcd_toggle_vreg(_dev, _vreg, _on)                          \
        ({                                                              \
                int _ret;                                               \
                _ret;                                                   \
        })
  
 +#define ufshcd_hex_dump(prefix_str, buf, len) \
 +print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
 +
  static u32 ufs_query_desc_max_size[] = {
        QUERY_DESC_DEVICE_MAX_SIZE,
        QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@@ -274,11 -120,9 +274,11 @@@ enum 
  /* UFSHCD UIC layer error flags */
  enum {
        UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
 -      UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
 -      UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
 -      UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
 +      UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
 +      UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
 +      UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
 +      UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
 +      UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
  };
  
  /* Interrupt configuration options */
@@@ -288,8 -132,6 +288,8 @@@ enum 
        UFSHCD_INT_CLEAR,
  };
  
 +#define DEFAULT_UFSHCD_DBG_PRINT_EN   UFSHCD_DBG_PRINT_ALL
 +
  #define ufshcd_set_eh_in_progress(h) \
        (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
  #define ufshcd_eh_in_progress(h) \
@@@ -331,1702 -173,489 +331,1702 @@@ ufs_get_pm_lvl_to_link_pwr_state(enum u
        return ufs_pm_lvl_states[lvl].link_state;
  }
  
 -static void ufshcd_tmc_handler(struct ufs_hba *hba);
 +static inline enum ufs_pm_level
 +ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 +                                      enum uic_link_state link_state)
 +{
 +      enum ufs_pm_level lvl;
 +
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
 +              if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
 +                      (ufs_pm_lvl_states[lvl].link_state == link_state))
 +                      return lvl;
 +      }
 +
 +      /* if no match found, return the level 0 */
 +      return UFS_PM_LVL_0;
 +}
 +
 +static inline bool ufshcd_is_valid_pm_lvl(int lvl)
 +{
 +      if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
 +              return true;
 +      else
 +              return false;
 +}
 +
 +static irqreturn_t ufshcd_intr(int irq, void *__hba);
 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
  static void ufshcd_async_scan(void *data, async_cookie_t cookie);
  static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
  static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
  static void ufshcd_hba_exit(struct ufs_hba *hba);
  static int ufshcd_probe_hba(struct ufs_hba *hba);
 -static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 -                               bool skip_ref_clk);
 -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 +static int ufshcd_enable_clocks(struct ufs_hba *hba);
 +static int ufshcd_disable_clocks(struct ufs_hba *hba,
 +                               bool is_gating_context);
 +static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 +                                            bool is_gating_context);
 +static void ufshcd_hold_all(struct ufs_hba *hba);
 +static void ufshcd_release_all(struct ufs_hba *hba);
 +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
  static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 +static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
  static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 -static irqreturn_t ufshcd_intr(int irq, void *__hba);
 -static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
 -              struct ufs_pa_layer_attr *desired_pwr_mode);
 -static int ufshcd_change_power_mode(struct ufs_hba *hba,
 -                           struct ufs_pa_layer_attr *pwr_mode);
 +static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 +static void ufshcd_release_all(struct ufs_hba *hba);
 +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 +static int ufshcd_devfreq_target(struct device *dev,
 +                              unsigned long *freq, u32 flags);
 +static int ufshcd_devfreq_get_dev_status(struct device *dev,
 +              struct devfreq_dev_status *stat);
 +
 +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 +static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
 +      .upthreshold = 35,
 +      .downdifferential = 30,
 +      .simple_scaling = 1,
 +};
 +
 +static void *gov_data = &ufshcd_ondemand_data;
 +#else
 +static void *gov_data;
 +#endif
  
 -static inline int ufshcd_enable_irq(struct ufs_hba *hba)
 +static struct devfreq_dev_profile ufs_devfreq_profile = {
 +      .polling_ms     = 40,
 +      .target         = ufshcd_devfreq_target,
 +      .get_dev_status = ufshcd_devfreq_get_dev_status,
 +};
 +
 +static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
  {
 -      int ret = 0;
 +      return tag >= 0 && tag < hba->nutrs;
 +}
  
 +static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 +{
        if (!hba->is_irq_enabled) {
 -              ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
 -                              hba);
 -              if (ret)
 -                      dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
 -                              __func__, ret);
 +              enable_irq(hba->irq);
                hba->is_irq_enabled = true;
        }
 -
 -      return ret;
  }
  
  static inline void ufshcd_disable_irq(struct ufs_hba *hba)
  {
        if (hba->is_irq_enabled) {
 -              free_irq(hba->irq, hba);
 +              disable_irq(hba->irq);
                hba->is_irq_enabled = false;
        }
  }
  
 -/*
 - * ufshcd_wait_for_register - wait for register value to change
 - * @hba - per-adapter interface
 - * @reg - mmio register offset
 - * @mask - mask to apply to read register value
 - * @val - wait condition
 - * @interval_us - polling interval in microsecs
 - * @timeout_ms - timeout in millisecs
 - *
 - * Returns -ETIMEDOUT on error, zero on success
 - */
 -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 -              u32 val, unsigned long interval_us, unsigned long timeout_ms)
 +void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
  {
 -      int err = 0;
 -      unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 -
 -      /* ignore bits that we don't intend to wait on */
 -      val = val & mask;
 -
 -      while ((ufshcd_readl(hba, reg) & mask) != val) {
 -              /* wakeup within 50us of expiry */
 -              usleep_range(interval_us, interval_us + 50);
 -
 -              if (time_after(jiffies, timeout)) {
 -                      if ((ufshcd_readl(hba, reg) & mask) != val)
 -                              err = -ETIMEDOUT;
 -                      break;
 -              }
 -      }
 +      unsigned long flags;
 +      bool unblock = false;
  
 -      return err;
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->scsi_block_reqs_cnt--;
 +      unblock = !hba->scsi_block_reqs_cnt;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      if (unblock)
 +              scsi_unblock_requests(hba->host);
  }
 +EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
  
 -/**
 - * ufshcd_get_intr_mask - Get the interrupt bit mask
 - * @hba - Pointer to adapter instance
 - *
 - * Returns interrupt bit mask per version
 - */
 -static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 +static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
  {
 -      if (hba->ufs_version == UFSHCI_VERSION_10)
 -              return INTERRUPT_MASK_ALL_VER_10;
 -      else
 -              return INTERRUPT_MASK_ALL_VER_11;
 +      if (!hba->scsi_block_reqs_cnt++)
 +              scsi_block_requests(hba->host);
  }
  
 -/**
 - * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 - * @hba - Pointer to adapter instance
 - *
 - * Returns UFSHCI version supported by the controller
 - */
 -static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 +void ufshcd_scsi_block_requests(struct ufs_hba *hba)
  {
 -      if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 -              return ufshcd_vops_get_ufs_hci_version(hba);
 +      unsigned long flags;
  
 -      return ufshcd_readl(hba, REG_UFS_VERSION);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_scsi_block_requests(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
  }
 +EXPORT_SYMBOL(ufshcd_scsi_block_requests);
  
 -/**
 - * ufshcd_is_device_present - Check if any device connected to
 - *                          the host controller
 - * @hba: pointer to adapter instance
 - *
 - * Returns 1 if device present, 0 if no device detected
 - */
 -static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 +static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
  {
 -      return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 -                                              DEVICE_PRESENT) ? 1 : 0;
 +      int ret = 0;
 +
 +      if (!hba->pctrl)
 +              return 0;
 +
 +      /* Assert reset if ctrl == true */
 +      if (ctrl)
 +              ret = pinctrl_select_state(hba->pctrl,
 +                      pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
 +      else
 +              ret = pinctrl_select_state(hba->pctrl,
 +                      pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
 +
 +      if (ret < 0)
 +              dev_err(hba->dev, "%s: %s failed with err %d\n",
 +                      __func__, ctrl ? "Assert" : "Deassert", ret);
 +
 +      return ret;
  }
  
 -/**
 - * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 - * @lrb: pointer to local command reference block
 - *
 - * This function is used to get the OCS field from UTRD
 - * Returns the OCS field in the UTRD
 - */
 -static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 +static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
  {
 -      return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 +      return ufshcd_device_reset_ctrl(hba, true);
  }
  
 -/**
 - * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 - * @task_req_descp: pointer to utp_task_req_desc structure
 - *
 - * This function is used to get the OCS field from UTMRD
 - * Returns the OCS field in the UTMRD
 - */
 -static inline int
 -ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 +static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
  {
 -      return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 +      return ufshcd_device_reset_ctrl(hba, false);
  }
  
 -/**
 - * ufshcd_get_tm_free_slot - get a free slot for task management request
 - * @hba: per adapter instance
 - * @free_slot: pointer to variable with available slot value
 - *
 - * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 - * Returns 0 if free slot is not available, else return 1 with tag value
 - * in @free_slot.
 - */
 -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 +static int ufshcd_reset_device(struct ufs_hba *hba)
  {
 -      int tag;
 -      bool ret = false;
 +      int ret;
  
 -      if (!free_slot)
 +      /* reset the connected UFS device */
 +      ret = ufshcd_assert_device_reset(hba);
 +      if (ret)
                goto out;
 +      /*
 +       * The reset signal is active low.
 +       * The UFS device shall detect more than or equal to 1us of positive
 +       * or negative RST_n pulse width.
 +       * To be on safe side, keep the reset low for atleast 10us.
 +       */
 +      usleep_range(10, 15);
  
 -      do {
 -              tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 -              if (tag >= hba->nutmrs)
 -                      goto out;
 -      } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 -
 -      *free_slot = tag;
 -      ret = true;
 +      ret = ufshcd_deassert_device_reset(hba);
 +      if (ret)
 +              goto out;
 +      /* same as assert, wait for atleast 10us after deassert */
 +      usleep_range(10, 15);
  out:
        return ret;
  }
  
 -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 +/* replace non-printable or non-ASCII characters with spaces */
 +static inline void ufshcd_remove_non_printable(char *val)
  {
 -      clear_bit_unlock(slot, &hba->tm_slots_in_use);
 +      if (!val || !*val)
 +              return;
 +
 +      if (*val < 0x20 || *val > 0x7e)
 +              *val = ' ';
  }
  
 -/**
 - * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 - * @hba: per adapter instance
 - * @pos: position of the bit to be cleared
 - */
 -static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 +#define UFSHCD_MAX_CMD_LOGGING        200
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
 +                      struct ufshcd_cmd_log_entry *entry, u8 opcode)
 +{
 +      if (trace_ufshcd_command_enabled()) {
 +              u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +
 +              trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
 +                                   entry->doorbell, entry->transfer_len, intr,
 +                                   entry->lba, opcode);
 +      }
 +}
 +#else
 +static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
 +                      struct ufshcd_cmd_log_entry *entry, u8 opcode)
  {
 -      ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  }
 +#endif
  
 -/**
 - * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 - * @reg: Register value of host controller status
 - *
 - * Returns integer, 0 on Success and positive value if failed
 - */
 -static inline int ufshcd_get_lists_status(u32 reg)
 +#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
 +static void ufshcd_cmd_log_init(struct ufs_hba *hba)
  {
 -      /*
 -       * The mask 0xFF is for the following HCS register bits
 -       * Bit          Description
 -       *  0           Device Present
 -       *  1           UTRLRDY
 -       *  2           UTMRLRDY
 -       *  3           UCRDY
 -       *  4           HEI
 -       *  5           DEI
 -       * 6-7          reserved
 -       */
 -      return (((reg) & (0xFF)) >> 1) ^ (0x07);
 +      /* Allocate log entries */
 +      if (!hba->cmd_log.entries) {
 +              hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
 +                      sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
 +              if (!hba->cmd_log.entries)
 +                      return;
 +              dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
 +                              __func__);
 +      }
  }
  
 -/**
 - * ufshcd_get_uic_cmd_result - Get the UIC command result
 - * @hba: Pointer to adapter instance
 - *
 - * This function gets the result of UIC command completion
 - * Returns 0 on success, non zero value on error
 - */
 -static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 +static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +                           unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
 +                           sector_t lba, int transfer_len, u8 opcode)
  {
 -      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 -             MASK_UIC_COMMAND_RESULT;
 +      struct ufshcd_cmd_log_entry *entry;
 +
 +      if (!hba->cmd_log.entries)
 +              return;
 +
 +      entry = &hba->cmd_log.entries[hba->cmd_log.pos];
 +      entry->lun = lun;
 +      entry->str = str;
 +      entry->cmd_type = cmd_type;
 +      entry->cmd_id = cmd_id;
 +      entry->lba = lba;
 +      entry->transfer_len = transfer_len;
 +      entry->idn = idn;
 +      entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      entry->tag = tag;
 +      entry->tstamp = ktime_get();
 +      entry->outstanding_reqs = hba->outstanding_reqs;
 +      entry->seq_num = hba->cmd_log.seq_num;
 +      hba->cmd_log.seq_num++;
 +      hba->cmd_log.pos =
 +                      (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 +
 +      ufshcd_add_command_trace(hba, entry, opcode);
  }
  
 -/**
 - * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 - * @hba: Pointer to adapter instance
 - *
 - * This function gets UIC command argument3
 - * Returns 0 on success, non zero value on error
 - */
 -static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 +static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +      unsigned int tag, u8 cmd_id, u8 idn)
  {
 -      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 +      __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
 +                       0xff, (sector_t)-1, -1, -1);
  }
  
 -/**
 - * ufshcd_get_req_rsp - returns the TR response transaction type
 - * @ucd_rsp_ptr: pointer to response UPIU
 - */
 -static inline int
 -ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 +      ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
 +}
 +
 +static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 +{
 +      int i;
 +      int pos;
 +      struct ufshcd_cmd_log_entry *p;
 +
 +      if (!hba->cmd_log.entries)
 +              return;
 +
 +      pos = hba->cmd_log.pos;
 +      for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
 +              p = &hba->cmd_log.entries[pos];
 +              pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 +
 +              if (ktime_to_us(p->tstamp)) {
 +                      pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
 +                              p->cmd_type, p->str, p->seq_num,
 +                              p->lun, p->cmd_id, (unsigned long long)p->lba,
 +                              p->transfer_len, p->tag, p->doorbell,
 +                              p->outstanding_reqs, p->idn,
 +                              ktime_to_us(p->tstamp));
 +                              usleep_range(1000, 1100);
 +              }
 +      }
 +}
 +#else
 +static void ufshcd_cmd_log_init(struct ufs_hba *hba)
 +{
 +}
 +
 +static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +                           unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
 +                           sector_t lba, int transfer_len, u8 opcode)
 +{
 +      struct ufshcd_cmd_log_entry entry;
 +
 +      entry.str = str;
 +      entry.lba = lba;
 +      entry.cmd_id = cmd_id;
 +      entry.transfer_len = transfer_len;
 +      entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      entry.tag = tag;
 +
 +      ufshcd_add_command_trace(hba, &entry, opcode);
 +}
 +
 +static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
 +{
 +}
 +
 +static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 +{
 +}
 +#endif
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
 +                                      unsigned int tag, const char *str)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      char *cmd_type = NULL;
 +      u8 opcode = 0;
 +      u8 cmd_id = 0, idn = 0;
 +      sector_t lba = -1;
 +      int transfer_len = -1;
 +
 +      lrbp = &hba->lrb[tag];
 +
 +      if (lrbp->cmd) { /* data phase exists */
 +              opcode = (u8)(*lrbp->cmd->cmnd);
 +              if ((opcode == READ_10) || (opcode == WRITE_10)) {
 +                      /*
 +                       * Currently we only fully trace read(10) and write(10)
 +                       * commands
 +                       */
 +                      if (lrbp->cmd->request && lrbp->cmd->request->bio)
 +                              lba =
 +                              lrbp->cmd->request->bio->bi_iter.bi_sector;
 +                      transfer_len = be32_to_cpu(
 +                              lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 +              }
 +      }
 +
 +      if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
 +              cmd_type = "scsi";
 +              cmd_id = (u8)(*lrbp->cmd->cmnd);
 +      } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 +              if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
 +                      cmd_type = "nop";
 +                      cmd_id = 0;
 +              } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
 +                      cmd_type = "query";
 +                      cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
 +                      idn = hba->dev_cmd.query.request.upiu_req.idn;
 +              }
 +      }
 +
 +      __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
 +                       lrbp->lun, lba, transfer_len, opcode);
 +}
 +#else
 +static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
 +                                      unsigned int tag, const char *str)
 +{
 +}
 +#endif
 +
 +static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
 +{
 +      struct ufs_clk_info *clki;
 +      struct list_head *head = &hba->clk_list_head;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
 +              return;
 +
 +      if (!head || list_empty(head))
 +              return;
 +
 +      list_for_each_entry(clki, head, list) {
 +              if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
 +                              clki->max_freq)
 +                      dev_err(hba->dev, "clk: %s, rate: %u\n",
 +                                      clki->name, clki->curr_freq);
 +      }
 +}
 +
 +static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
 +              struct ufs_uic_err_reg_hist *err_hist, char *err_name)
 +{
 +      int i;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
 +              return;
 +
 +      for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
 +              int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
 +
 +              if (err_hist->reg[p] == 0)
 +                      continue;
 +              dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
 +                      err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
 +      }
 +}
 +
 +static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
 +{
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
 +              return;
 +
 +      /*
 +       * hex_dump reads its data without the readl macro. This might
 +       * cause inconsistency issues on some platform, as the printed
 +       * values may be from cache and not the most recent value.
 +       * To know whether you are looking at an un-cached version verify
 +       * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
 +       * during platform/pci probe function.
 +       */
 +      ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
 +      dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
 +              hba->ufs_version, hba->capabilities);
 +      dev_err(hba->dev,
 +              "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
 +              (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
 +      dev_err(hba->dev,
 +              "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
 +              ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 +              hba->ufs_stats.hibern8_exit_cnt);
 +
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
 +
 +      ufshcd_print_clk_freqs(hba);
 +
 +      ufshcd_vops_dbg_register_dump(hba, no_sleep);
 +}
 +
 +static void ufshcd_print_host_regs(struct ufs_hba *hba)
 +{
 +      __ufshcd_print_host_regs(hba, false);
 +}
 +
 +static
 +void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      int prdt_length;
 +      int tag;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
 +              return;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutrs) {
 +              lrbp = &hba->lrb[tag];
 +
 +              dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
 +                              tag, ktime_to_us(lrbp->issue_time_stamp));
 +              dev_err(hba->dev,
 +                      "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
 +                      tag, (u64)lrbp->utrd_dma_addr);
 +              ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
 +                              sizeof(struct utp_transfer_req_desc));
 +              dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
 +                      (u64)lrbp->ucd_req_dma_addr);
 +              ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
 +                              sizeof(struct utp_upiu_req));
 +              dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
 +                      (u64)lrbp->ucd_rsp_dma_addr);
 +              ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
 +                              sizeof(struct utp_upiu_rsp));
 +              prdt_length =
 +                      le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
 +              dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
 +                      tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
 +              if (pr_prdt)
 +                      ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 +                              sizeof(struct ufshcd_sg_entry) * prdt_length);
 +      }
 +}
 +
 +static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 +{
 +      struct utp_task_req_desc *tmrdp;
 +      int tag;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
 +              return;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutmrs) {
 +              tmrdp = &hba->utmrdl_base_addr[tag];
 +              dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
 +              ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
 +                              sizeof(struct request_desc_header));
 +              dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
 +                              tag);
 +              ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
 +                              sizeof(struct utp_upiu_req));
 +              dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
 +                              tag);
 +              ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
 +                              sizeof(struct utp_task_req_desc));
 +      }
 +}
 +
 +static void ufshcd_print_fsm_state(struct ufs_hba *hba)
 +{
 +      int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
 +
 +      err = ufshcd_dme_get(hba,
 +                      UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 +                      UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 +                      &tx_fsm_val);
 +      dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
 +                      tx_fsm_val, err);
 +      err = ufshcd_dme_get(hba,
 +                      UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
 +                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                      &rx_fsm_val);
 +      dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
 +                      rx_fsm_val, err);
 +}
 +
 +static void ufshcd_print_host_state(struct ufs_hba *hba)
 +{
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
 +              return;
 +
 +      dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 +      dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
 +              hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
 +      dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
 +              hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
 +      dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
 +              hba->pm_op_in_progress, hba->is_sys_suspended);
 +      dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
 +              hba->auto_bkops_enabled, hba->host->host_self_blocked);
 +      dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
 +              hba->clk_gating.state, hba->hibern8_on_idle.state);
 +      dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
 +              hba->eh_flags, hba->req_abort_count);
 +      dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
 +              hba->capabilities, hba->caps);
 +      dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
 +              hba->dev_quirks);
  }
  
  /**
 - * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 - * @ucd_rsp_ptr: pointer to response UPIU
 - *
 - * This function gets the response status and scsi_status from response UPIU
 - * Returns the response result code.
 + * ufshcd_print_pwr_info - print power params as saved in hba
 + * power info
 + * @hba: per-adapter instance
   */
 -static inline int
 -ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 -{
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 +static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 +{
 +      char *names[] = {
 +              "INVALID MODE",
 +              "FAST MODE",
 +              "SLOW_MODE",
 +              "INVALID MODE",
 +              "FASTAUTO_MODE",
 +              "SLOWAUTO_MODE",
 +              "INVALID MODE",
 +      };
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
 +              return;
 +
 +      dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 +               __func__,
 +               hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 +               hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 +               names[hba->pwr_info.pwr_rx],
 +               names[hba->pwr_info.pwr_tx],
 +               hba->pwr_info.hs_rate);
  }
  
  /*
 - * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 - *                            from response UPIU
 - * @ucd_rsp_ptr: pointer to response UPIU
 - *
 - * Return the data segment length.
 + * ufshcd_wait_for_register - wait for register value to change
 + * @hba - per-adapter interface
 + * @reg - mmio register offset
 + * @mask - mask to apply to read register value
 + * @val - wait condition
 + * @interval_us - polling interval in microsecs
 + * @timeout_ms - timeout in millisecs
 + * @can_sleep - perform sleep or just spin
 + * Returns -ETIMEDOUT on error, zero on success
   */
 -static inline unsigned int
 -ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 +                              u32 val, unsigned long interval_us,
 +                              unsigned long timeout_ms, bool can_sleep)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 -              MASK_RSP_UPIU_DATA_SEG_LEN;
 +      int err = 0;
 +      unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 +
 +      /* ignore bits that we don't intend to wait on */
 +      val = val & mask;
 +
 +      while ((ufshcd_readl(hba, reg) & mask) != val) {
 +              if (can_sleep)
 +                      usleep_range(interval_us, interval_us + 50);
 +              else
 +                      udelay(interval_us);
 +              if (time_after(jiffies, timeout)) {
 +                      if ((ufshcd_readl(hba, reg) & mask) != val)
 +                              err = -ETIMEDOUT;
 +                      break;
 +              }
 +      }
 +
 +      return err;
  }
  
  /**
 - * ufshcd_is_exception_event - Check if the device raised an exception event
 - * @ucd_rsp_ptr: pointer to response UPIU
 - *
 - * The function checks if the device raised an exception event indicated in
 - * the Device Information field of response UPIU.
 + * ufshcd_get_intr_mask - Get the interrupt bit mask
 + * @hba - Pointer to adapter instance
   *
 - * Returns true if exception is raised, false otherwise.
 + * Returns interrupt bit mask per version
   */
 -static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 -                      MASK_RSP_EXCEPTION_EVENT ? true : false;
 +      u32 intr_mask = 0;
 +
 +      switch (hba->ufs_version) {
 +      case UFSHCI_VERSION_10:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_10;
 +              break;
 +      /* allow fall through */
 +      case UFSHCI_VERSION_11:
 +      case UFSHCI_VERSION_20:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_11;
 +              break;
 +      /* allow fall through */
 +      case UFSHCI_VERSION_21:
 +      default:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_21;
 +      }
 +
 +      if (!ufshcd_is_crypto_supported(hba))
 +              intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
 +
 +      return intr_mask;
  }
  
  /**
 - * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 - * @hba: per adapter instance
 + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 + * @hba - Pointer to adapter instance
 + *
 + * Returns UFSHCI version supported by the controller
   */
 -static inline void
 -ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  {
 -      ufshcd_writel(hba, INT_AGGR_ENABLE |
 -                    INT_AGGR_COUNTER_AND_TIMER_RESET,
 -                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 +              return ufshcd_vops_get_ufs_hci_version(hba);
 +
 +      return ufshcd_readl(hba, REG_UFS_VERSION);
  }
  
  /**
 - * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 - * @hba: per adapter instance
 - * @cnt: Interrupt aggregation counter threshold
 - * @tmout: Interrupt aggregation timeout value
 + * ufshcd_is_device_present - Check if any device connected to
 + *                          the host controller
 + * @hba: pointer to adapter instance
 + *
 + * Returns 1 if device present, 0 if no device detected
   */
 -static inline void
 -ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 +static inline int ufshcd_is_device_present(struct ufs_hba *hba)
  {
 -      ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 -                    INT_AGGR_COUNTER_THLD_VAL(cnt) |
 -                    INT_AGGR_TIMEOUT_VAL(tmout),
 -                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 +                                              DEVICE_PRESENT) ? 1 : 0;
  }
  
  /**
 - * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 - * @hba: per adapter instance
 + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 + * @lrb: pointer to local command reference block
 + *
 + * This function is used to get the OCS field from UTRD
 + * Returns the OCS field in the UTRD
   */
 -static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 +static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  {
 -      ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
  }
  
  /**
 - * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 - *                    When run-stop registers are set to 1, it indicates the
 - *                    host controller that it can process the requests
 - * @hba: per adapter instance
 + * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 + * @task_req_descp: pointer to utp_task_req_desc structure
 + *
 + * This function is used to get the OCS field from UTMRD
 + * Returns the OCS field in the UTMRD
   */
 -static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 +static inline int
 +ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
  {
 -      ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 -                    REG_UTP_TASK_REQ_LIST_RUN_STOP);
 -      ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 -                    REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 +      return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
  }
  
  /**
 - * ufshcd_hba_start - Start controller initialization sequence
 + * ufshcd_get_tm_free_slot - get a free slot for task management request
   * @hba: per adapter instance
 + * @free_slot: pointer to variable with available slot value
 + *
 + * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 + * Returns 0 if free slot is not available, else return 1 with tag value
 + * in @free_slot.
   */
 -static inline void ufshcd_hba_start(struct ufs_hba *hba)
 +static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
  {
 -      ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
 -}
 +      int tag;
 +      bool ret = false;
 +
 +      if (!free_slot)
 +              goto out;
 +
 +      do {
 +              tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 +              if (tag >= hba->nutmrs)
 +                      goto out;
 +      } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 +
 +      *free_slot = tag;
 +      ret = true;
 +out:
 +      return ret;
 +}
 +
 +static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 +{
 +      clear_bit_unlock(slot, &hba->tm_slots_in_use);
 +}
 +
 +/**
 + * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 + * @hba: per adapter instance
 + * @pos: position of the bit to be cleared
 + */
 +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 +{
 +      ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 +}
 +
 +/**
 + * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
 + * @hba: per adapter instance
 + * @tag: position of the bit to be cleared
 + */
 +static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
 +{
 +      __clear_bit(tag, &hba->outstanding_reqs);
 +}
 +
 +/**
 + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 + * @reg: Register value of host controller status
 + *
 + * Returns integer, 0 on Success and positive value if failed
 + */
 +static inline int ufshcd_get_lists_status(u32 reg)
 +{
 +      /*
 +       * The mask 0xFF is for the following HCS register bits
 +       * Bit          Description
 +       *  0           Device Present
 +       *  1           UTRLRDY
 +       *  2           UTMRLRDY
 +       *  3           UCRDY
 +       * 4-7          reserved
 +       */
 +      return ((reg & 0xFF) >> 1) ^ 0x07;
 +}
 +
 +/**
 + * ufshcd_get_uic_cmd_result - Get the UIC command result
 + * @hba: Pointer to adapter instance
 + *
 + * This function gets the result of UIC command completion
 + * Returns 0 on success, non zero value on error
 + */
 +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 +{
 +      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 +             MASK_UIC_COMMAND_RESULT;
 +}
 +
 +/**
 + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 + * @hba: Pointer to adapter instance
 + *
 + * This function gets UIC command argument3
 + * Returns 0 on success, non zero value on error
 + */
 +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 +{
 +      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 +}
 +
 +/**
 + * ufshcd_get_req_rsp - returns the TR response transaction type
 + * @ucd_rsp_ptr: pointer to response UPIU
 + */
 +static inline int
 +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 +}
 +
 +/**
 + * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * This function gets the response status and scsi_status from response UPIU
 + * Returns the response result code.
 + */
 +static inline int
 +ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 +}
 +
 +/*
 + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 + *                            from response UPIU
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * Return the data segment length.
 + */
 +static inline unsigned int
 +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 +              MASK_RSP_UPIU_DATA_SEG_LEN;
 +}
 +
 +/**
 + * ufshcd_is_exception_event - Check if the device raised an exception event
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * The function checks if the device raised an exception event indicated in
 + * the Device Information field of response UPIU.
 + *
 + * Returns true if exception is raised, false otherwise.
 + */
 +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 +                      MASK_RSP_EXCEPTION_EVENT ? true : false;
 +}
 +
 +/**
 + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 + * @hba: per adapter instance
 + */
 +static inline void
 +ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, INT_AGGR_ENABLE |
 +                    INT_AGGR_COUNTER_AND_TIMER_RESET,
 +                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 + * @hba: per adapter instance
 + * @cnt: Interrupt aggregation counter threshold
 + * @tmout: Interrupt aggregation timeout value
 + */
 +static inline void
 +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 +{
 +      ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 +                    INT_AGGR_COUNTER_THLD_VAL(cnt) |
 +                    INT_AGGR_TIMEOUT_VAL(tmout),
 +                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 + * @hba: per adapter instance
 + */
 +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 + *                    When run-stop registers are set to 1, it indicates the
 + *                    host controller that it can process the requests
 + * @hba: per adapter instance
 + */
 +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 +                    REG_UTP_TASK_REQ_LIST_RUN_STOP);
 +      ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 +                    REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 +}
 +
 +/**
 + * ufshcd_hba_start - Start controller initialization sequence
 + * @hba: per adapter instance
 + */
 +static inline void ufshcd_hba_start(struct ufs_hba *hba)
 +{
 +      u32 val = CONTROLLER_ENABLE;
 +
 +      if (ufshcd_is_crypto_supported(hba))
 +              val |= CRYPTO_GENERAL_ENABLE;
 +      ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
 +}
 +
 +/**
 + * ufshcd_is_hba_active - Get controller state
 + * @hba: per adapter instance
 + *
 + * Returns zero if controller is active, 1 otherwise
 + */
 +static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 +{
 +      return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 +}
 +
 +static const char *ufschd_uic_link_state_to_string(
 +                      enum uic_link_state state)
 +{
 +      switch (state) {
 +      case UIC_LINK_OFF_STATE:        return "OFF";
 +      case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
 +      case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
 +      default:                        return "UNKNOWN";
 +      }
 +}
 +
 +static const char *ufschd_ufs_dev_pwr_mode_to_string(
 +                      enum ufs_dev_pwr_mode state)
 +{
 +      switch (state) {
 +      case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
 +      case UFS_SLEEP_PWR_MODE:        return "SLEEP";
 +      case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
 +      default:                        return "UNKNOWN";
 +      }
 +}
 +
 +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 +{
 +      /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
 +      if ((hba->ufs_version == UFSHCI_VERSION_10) ||
 +          (hba->ufs_version == UFSHCI_VERSION_11))
 +              return UFS_UNIPRO_VER_1_41;
 +      else
 +              return UFS_UNIPRO_VER_1_6;
 +}
 +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
 +
 +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 +{
 +      /*
 +       * If both host and device support UniPro ver1.6 or later, PA layer
 +       * parameters tuning happens during link startup itself.
 +       *
 +       * We can manually tune PA layer parameters if either host or device
 +       * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
 +       * logic simple, we will only do manual tuning if local unipro version
 +       * doesn't support ver1.6 or later.
 +       */
 +      if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
 +              return true;
 +      else
 +              return false;
 +}
 +
 +/**
 + * ufshcd_set_clk_freq - set UFS controller clock frequencies
 + * @hba: per adapter instance
 + * @scale_up: If True, set max possible frequency othewise set low frequency
 + *
 + * Returns 0 if successful
 + * Returns < 0 for any other errors
 + */
 +static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +      struct ufs_clk_info *clki;
 +      struct list_head *head = &hba->clk_list_head;
 +
 +      if (!head || list_empty(head))
 +              goto out;
 +
 +      list_for_each_entry(clki, head, list) {
 +              if (!IS_ERR_OR_NULL(clki->clk)) {
 +                      if (scale_up && clki->max_freq) {
 +                              if (clki->curr_freq == clki->max_freq)
 +                                      continue;
 +
 +                              ret = clk_set_rate(clki->clk, clki->max_freq);
 +                              if (ret) {
 +                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 +                                              __func__, clki->name,
 +                                              clki->max_freq, ret);
 +                                      break;
 +                              }
 +                              trace_ufshcd_clk_scaling(dev_name(hba->dev),
 +                                              "scaled up", clki->name,
 +                                              clki->curr_freq,
 +                                              clki->max_freq);
 +                              clki->curr_freq = clki->max_freq;
 +
 +                      } else if (!scale_up && clki->min_freq) {
 +                              if (clki->curr_freq == clki->min_freq)
 +                                      continue;
 +
 +                              ret = clk_set_rate(clki->clk, clki->min_freq);
 +                              if (ret) {
 +                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 +                                              __func__, clki->name,
 +                                              clki->min_freq, ret);
 +                                      break;
 +                              }
 +                              trace_ufshcd_clk_scaling(dev_name(hba->dev),
 +                                              "scaled down", clki->name,
 +                                              clki->curr_freq,
 +                                              clki->min_freq);
 +                              clki->curr_freq = clki->min_freq;
 +                      }
 +              }
 +              dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 +                              clki->name, clk_get_rate(clki->clk));
 +      }
 +
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_scale_clks - scale up or scale down UFS controller clocks
 + * @hba: per adapter instance
 + * @scale_up: True if scaling up and false if scaling down
 + *
 + * Returns 0 if successful
 + * Returns < 0 for any other errors
 + */
 +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +
 +      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 +      if (ret)
 +              return ret;
 +
 +      ret = ufshcd_set_clk_freq(hba, scale_up);
 +      if (ret)
 +              return ret;
 +
 +      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 +      if (ret) {
 +              ufshcd_set_clk_freq(hba, !scale_up);
 +              return ret;
 +      }
 +
 +      return ret;
 +}
 +
 +static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
 +{
 +      hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
 +      cancel_work_sync(&hba->clk_gating.gate_work);
 +}
 +
 +static void ufshcd_ungate_work(struct work_struct *work)
 +{
 +      int ret;
 +      unsigned long flags;
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                      clk_gating.ungate_work);
 +
 +      ufshcd_cancel_gate_work(hba);
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_gating.state == CLKS_ON) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              goto unblock_reqs;
 +      }
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_hba_vreg_set_hpm(hba);
 +      ufshcd_enable_clocks(hba);
 +
 +      /* Exit from hibern8 */
 +      if (ufshcd_can_hibern8_during_gating(hba)) {
 +              /* Prevent gating in this path */
 +              hba->clk_gating.is_suspended = true;
 +              if (ufshcd_is_link_hibern8(hba)) {
 +                      ret = ufshcd_uic_hibern8_exit(hba);
 +                      if (ret)
 +                              dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 +                                      __func__, ret);
 +                      else
 +                              ufshcd_set_link_active(hba);
 +              }
 +              hba->clk_gating.is_suspended = false;
 +      }
 +unblock_reqs:
 +      ufshcd_scsi_unblock_requests(hba);
 +}
 +
 +/**
 + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
 + * Also, exit from hibern8 mode and set the link as active.
 + * @hba: per adapter instance
 + * @async: This indicates whether caller should ungate clocks asynchronously.
 + */
 +int ufshcd_hold(struct ufs_hba *hba, bool async)
 +{
 +      int rc = 0;
 +      unsigned long flags;
 +
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              goto out;
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_gating.active_reqs++;
 +
 +      if (ufshcd_eh_in_progress(hba)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return 0;
 +      }
 +
 +start:
 +      switch (hba->clk_gating.state) {
 +      case CLKS_ON:
 +              /*
 +               * Wait for the ungate work to complete if in progress.
 +               * Though the clocks may be in ON state, the link could
 +               * still be in hibner8 state if hibern8 is allowed
 +               * during clock gating.
 +               * Make sure we exit hibern8 state also in addition to
 +               * clocks being ON.
 +               */
 +              if (ufshcd_can_hibern8_during_gating(hba) &&
 +                  ufshcd_is_link_hibern8(hba)) {
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      flush_work(&hba->clk_gating.ungate_work);
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      goto start;
 +              }
 +              break;
 +      case REQ_CLKS_OFF:
 +              /*
 +               * If the timer was active but the callback was not running
 +               * we have nothing to do, just change state and return.
 +               */
 +              if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
 +                      hba->clk_gating.state = CLKS_ON;
 +                      trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                              hba->clk_gating.state);
 +                      break;
 +              }
 +              /*
 +               * If we are here, it means gating work is either done or
 +               * currently running. Hence, fall through to cancel gating
 +               * work and to enable clocks.
 +               */
 +      case CLKS_OFF:
 +              __ufshcd_scsi_block_requests(hba);
 +              hba->clk_gating.state = REQ_CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +              queue_work(hba->clk_gating.clk_gating_workq,
 +                              &hba->clk_gating.ungate_work);
 +              /*
 +               * fall through to check if we should wait for this
 +               * work to be done or not.
 +               */
 +      case REQ_CLKS_ON:
 +              if (async) {
 +                      rc = -EAGAIN;
 +                      hba->clk_gating.active_reqs--;
 +                      break;
 +              }
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              flush_work(&hba->clk_gating.ungate_work);
 +              /* Make sure state is CLKS_ON before returning */
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              goto start;
 +      default:
 +              dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 +                              __func__, hba->clk_gating.state);
 +              break;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +out:
 +      hba->ufs_stats.clk_hold.ts = ktime_get();
 +      return rc;
 +}
 +EXPORT_SYMBOL_GPL(ufshcd_hold);
 +
 +static void ufshcd_gate_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                              clk_gating.gate_work);
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * In case you are here to cancel this work the gating state
 +       * would be marked as REQ_CLKS_ON. In this case save time by
 +       * skipping the gating work and exit after changing the clock
 +       * state to CLKS_ON.
 +       */
 +      if (hba->clk_gating.is_suspended ||
 +              (hba->clk_gating.state != REQ_CLKS_OFF)) {
 +              hba->clk_gating.state = CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +              goto rel_lock;
 +      }
 +
 +      if (hba->clk_gating.active_reqs
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done)
 +              goto rel_lock;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          hba->hibern8_on_idle.is_enabled)
 +              /*
 +               * Hibern8 enter work (on Idle) needs clocks to be ON hence
 +               * make sure that it is flushed before turning off the clocks.
 +               */
 +              flush_delayed_work(&hba->hibern8_on_idle.enter_work);
 +
 +      /* put the link into hibern8 mode before turning off clocks */
 +      if (ufshcd_can_hibern8_during_gating(hba)) {
 +              if (ufshcd_uic_hibern8_enter(hba)) {
 +                      hba->clk_gating.state = CLKS_ON;
 +                      trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                              hba->clk_gating.state);
 +                      goto out;
 +              }
 +              ufshcd_set_link_hibern8(hba);
 +      }
 +
 +      /*
 +       * If auto hibern8 is supported then the link will already
 +       * be in hibern8 state and the ref clock can be gated.
 +       */
 +      if ((ufshcd_is_auto_hibern8_supported(hba) ||
 +           !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
 +              ufshcd_disable_clocks(hba, true);
 +      else
 +              /* If link is active, device ref_clk can't be switched off */
 +              ufshcd_disable_clocks_skip_ref_clk(hba, true);
 +
 +      /* Put the host controller in low power mode if possible */
 +      ufshcd_hba_vreg_set_lpm(hba);
 +
 +      /*
 +       * In case you are here to cancel this work the gating state
 +       * would be marked as REQ_CLKS_ON. In this case keep the state
 +       * as REQ_CLKS_ON which would anyway imply that clocks are off
 +       * and a request to turn them on is pending. By doing this way,
 +       * we keep the state machine in tact and this would ultimately
 +       * prevent from doing cancel work multiple times when there are
 +       * new requests arriving before the current cancel work is done.
 +       */
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_gating.state == REQ_CLKS_OFF) {
 +              hba->clk_gating.state = CLKS_OFF;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +      }
 +rel_lock:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +out:
 +      return;
 +}
 +
 +/* host lock must be held before calling this variant */
 +static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +
 +      hba->clk_gating.active_reqs--;
 +
 +      if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done
 +              || ufshcd_eh_in_progress(hba) || no_sched)
 +              return;
 +
 +      hba->clk_gating.state = REQ_CLKS_OFF;
 +      trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
 +      hba->ufs_stats.clk_rel.ts = ktime_get();
 +
 +      hrtimer_start(&hba->clk_gating.gate_hrtimer,
 +                      ms_to_ktime(hba->clk_gating.delay_ms),
 +                      HRTIMER_MODE_REL);
 +}
 +
 +void ufshcd_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_release(hba, no_sched);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +EXPORT_SYMBOL_GPL(ufshcd_release);
 +
 +static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
 +}
 +
 +static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_gating.delay_ms = value;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
 +}
 +
 +static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n",
 +                      hba->clk_gating.delay_ms_pwr_save);
 +}
 +
 +static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      hba->clk_gating.delay_ms_pwr_save = value;
 +      if (ufshcd_is_clkscaling_supported(hba) &&
 +          !hba->clk_scaling.is_scaled_up)
 +              hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
 +}
 +
 +static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
 +}
 +
 +static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      hba->clk_gating.delay_ms_perf = value;
 +      if (ufshcd_is_clkscaling_supported(hba) &&
 +          hba->clk_scaling.is_scaled_up)
 +              hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
 +}
 +
 +static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
 +}
 +
 +static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags;
 +      u32 value;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->clk_gating.is_enabled)
 +              goto out;
 +
 +      if (value) {
 +              ufshcd_release(hba, false);
 +      } else {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->clk_gating.active_reqs++;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      }
 +
 +      hba->clk_gating.is_enabled = value;
 +out:
 +      return count;
 +}
 +
 +static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
 +                                      struct hrtimer *timer)
 +{
 +      struct ufs_hba *hba = container_of(timer, struct ufs_hba,
 +                                         clk_gating.gate_hrtimer);
 +
 +      queue_work(hba->clk_gating.clk_gating_workq,
 +                              &hba->clk_gating.gate_work);
 +
 +      return HRTIMER_NORESTART;
 +}
 +
 +static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 +{
 +      struct ufs_clk_gating *gating = &hba->clk_gating;
 +      char wq_name[sizeof("ufs_clk_gating_00")];
 +
 +      hba->clk_gating.state = CLKS_ON;
 +
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +
 +      /*
 +       * Disable hibern8 during clk gating if
 +       * auto hibern8 is supported
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba))
 +              hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 +
 +      INIT_WORK(&gating->gate_work, ufshcd_gate_work);
 +      INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
 +      /*
 +       * Clock gating work must be executed only after auto hibern8
 +       * timeout has expired in the hardware or after aggressive
 +       * hibern8 on idle software timeout. Using jiffy based low
 +       * resolution delayed work is not reliable to guarantee this,
 +       * hence use a high resolution timer to make sure we schedule
 +       * the gate work precisely more than hibern8 timeout.
 +       *
 +       * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
 +       */
 +      hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +      gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
 +
 +      snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
 +                      hba->host->host_no);
 +      hba->clk_gating.clk_gating_workq =
 +              create_singlethread_workqueue(wq_name);
 +
 +      gating->is_enabled = true;
 +
 +      gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
 +      gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
 +
 +      /* start with performance mode */
 +      gating->delay_ms = gating->delay_ms_perf;
  
 -/**
 - * ufshcd_is_hba_active - Get controller state
 - * @hba: per adapter instance
 - *
 - * Returns zero if controller is active, 1 otherwise
 - */
 -static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 -{
 -      return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 -}
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              goto scaling_not_supported;
  
 -static void ufshcd_ungate_work(struct work_struct *work)
 -{
 -      int ret;
 -      unsigned long flags;
 -      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 -                      clk_gating.ungate_work);
 +      gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
 +      gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
 +      sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
 +      gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
 +      gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
  
 -      cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 +      gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
 +      gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
 +      sysfs_attr_init(&gating->delay_perf_attr.attr);
 +      gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
 +      gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->delay_perf_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
  
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.state == CLKS_ON) {
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              goto unblock_reqs;
 -      }
 +      goto add_clkgate_enable;
  
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      ufshcd_setup_clocks(hba, true);
 +scaling_not_supported:
 +      hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 +      hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 +      sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
 +      hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
 +      hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
  
 -      /* Exit from hibern8 */
 -      if (ufshcd_can_hibern8_during_gating(hba)) {
 -              /* Prevent gating in this path */
 -              hba->clk_gating.is_suspended = true;
 -              if (ufshcd_is_link_hibern8(hba)) {
 -                      ret = ufshcd_uic_hibern8_exit(hba);
 -                      if (ret)
 -                              dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 -                                      __func__, ret);
 -                      else
 -                              ufshcd_set_link_active(hba);
 -              }
 -              hba->clk_gating.is_suspended = false;
 +add_clkgate_enable:
 +      gating->enable_attr.show = ufshcd_clkgate_enable_show;
 +      gating->enable_attr.store = ufshcd_clkgate_enable_store;
 +      sysfs_attr_init(&gating->enable_attr.attr);
 +      gating->enable_attr.attr.name = "clkgate_enable";
 +      gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
 +}
 +
 +static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 +{
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev,
 +                                 &hba->clk_gating.delay_pwr_save_attr);
 +              device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
 +      } else {
 +              device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
        }
 -unblock_reqs:
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 -      scsi_unblock_requests(hba->host);
 +      device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
 +      ufshcd_cancel_gate_work(hba);
 +      cancel_work_sync(&hba->clk_gating.ungate_work);
 +      destroy_workqueue(hba->clk_gating.clk_gating_workq);
 +}
 +
 +static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
 +{
 +      ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
 +                       AUTO_HIBERN8_IDLE_TIMER_MASK,
 +                      AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
 +                      REG_AUTO_HIBERN8_IDLE_TIMER);
 +      /* Make sure the timer gets applied before further operations */
 +      mb();
  }
  
  /**
 - * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
 - * Also, exit from hibern8 mode and set the link as active.
 + * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
 + *
   * @hba: per adapter instance
 - * @async: This indicates whether caller should ungate clocks asynchronously.
 + * @async: This indicates whether caller wants to exit hibern8 asynchronously.
 + *
 + * Exit from hibern8 mode and set the link as active.
 + *
 + * Return 0 on success, non-zero on failure.
   */
 -int ufshcd_hold(struct ufs_hba *hba, bool async)
 +static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
  {
        int rc = 0;
        unsigned long flags;
  
 -      if (!ufshcd_is_clkgating_allowed(hba))
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba))
                goto out;
 +
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->clk_gating.active_reqs++;
 +      hba->hibern8_on_idle.active_reqs++;
 +
 +      if (ufshcd_eh_in_progress(hba)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return 0;
 +      }
  
  start:
 -      switch (hba->clk_gating.state) {
 -      case CLKS_ON:
 -              /*
 -               * Wait for the ungate work to complete if in progress.
 -               * Though the clocks may be in ON state, the link could
 -               * still be in hibner8 state if hibern8 is allowed
 -               * during clock gating.
 -               * Make sure we exit hibern8 state also in addition to
 -               * clocks being ON.
 -               */
 -              if (ufshcd_can_hibern8_during_gating(hba) &&
 -                  ufshcd_is_link_hibern8(hba)) {
 -                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -                      flush_work(&hba->clk_gating.ungate_work);
 -                      spin_lock_irqsave(hba->host->host_lock, flags);
 -                      goto start;
 -              }
 +      switch (hba->hibern8_on_idle.state) {
 +      case HIBERN8_EXITED:
                break;
 -      case REQ_CLKS_OFF:
 -              if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 -                      hba->clk_gating.state = CLKS_ON;
 +      case REQ_HIBERN8_ENTER:
 +              if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +                      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                              hba->hibern8_on_idle.state);
                        break;
                }
                /*
 -               * If we here, it means gating work is either done or
 -               * currently running. Hence, fall through to cancel gating
 -               * work and to enable clocks.
 +               * If we here, it means Hibern8 enter work is either done or
 +               * currently running. Hence, fall through to cancel hibern8
 +               * work and exit hibern8.
                 */
 -      case CLKS_OFF:
 -              scsi_block_requests(hba->host);
 -              hba->clk_gating.state = REQ_CLKS_ON;
 -              schedule_work(&hba->clk_gating.ungate_work);
 +      case HIBERN8_ENTERED:
 +              __ufshcd_scsi_block_requests(hba);
 +              hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +              schedule_work(&hba->hibern8_on_idle.exit_work);
                /*
                 * fall through to check if we should wait for this
                 * work to be done or not.
                 */
 -      case REQ_CLKS_ON:
 +      case REQ_HIBERN8_EXIT:
                if (async) {
                        rc = -EAGAIN;
 -                      hba->clk_gating.active_reqs--;
 +                      hba->hibern8_on_idle.active_reqs--;
                        break;
 +              } else {
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      flush_work(&hba->hibern8_on_idle.exit_work);
 +                      /* Make sure state is HIBERN8_EXITED before returning */
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      goto start;
                }
 -
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              flush_work(&hba->clk_gating.ungate_work);
 -              /* Make sure state is CLKS_ON before returning */
 -              spin_lock_irqsave(hba->host->host_lock, flags);
 -              goto start;
        default:
 -              dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 -                              __func__, hba->clk_gating.state);
 +              dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
 +                              __func__, hba->hibern8_on_idle.state);
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
        return rc;
  }
 -EXPORT_SYMBOL_GPL(ufshcd_hold);
  
 -static void ufshcd_gate_work(struct work_struct *work)
 +/* host lock must be held before calling this variant */
 +static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long delay_in_jiffies;
 +
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba))
 +              return;
 +
 +      hba->hibern8_on_idle.active_reqs--;
 +      BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
 +
 +      if (hba->hibern8_on_idle.active_reqs
 +              || hba->hibern8_on_idle.is_suspended
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done
 +              || ufshcd_eh_in_progress(hba) || no_sched)
 +              return;
 +
 +      hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
 +      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +              hba->hibern8_on_idle.state);
 +      /*
 +       * Scheduling the delayed work after 1 jiffies will make the work to
 +       * get schedule any time from 0ms to 1000/HZ ms which is not desirable
 +       * for hibern8 enter work as it may impact the performance if it gets
 +       * scheduled almost immediately. Hence make sure that hibern8 enter
 +       * work gets scheduled atleast after 2 jiffies (any time between
 +       * 1000/HZ ms to 2000/HZ ms).
 +       */
 +      delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
 +      if (delay_in_jiffies == 1)
 +              delay_in_jiffies++;
 +
 +      schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
 +                            delay_in_jiffies);
 +}
 +
 +static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_hibern8_release(hba, no_sched);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_hibern8_enter_work(struct work_struct *work)
  {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
 -                      clk_gating.gate_work.work);
 +                                         hibern8_on_idle.enter_work.work);
        unsigned long flags;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.is_suspended) {
 -              hba->clk_gating.state = CLKS_ON;
 +      if (hba->hibern8_on_idle.is_suspended) {
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
                goto rel_lock;
        }
  
 -      if (hba->clk_gating.active_reqs
 +      if (hba->hibern8_on_idle.active_reqs
                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
                || hba->lrb_in_use || hba->outstanding_tasks
                || hba->active_uic_cmd || hba->uic_async_done)
  
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 -      /* put the link into hibern8 mode before turning off clocks */
 -      if (ufshcd_can_hibern8_during_gating(hba)) {
 -              if (ufshcd_uic_hibern8_enter(hba)) {
 -                      hba->clk_gating.state = CLKS_ON;
 -                      goto out;
 -              }
 -              ufshcd_set_link_hibern8(hba);
 -      }
 -
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 +      if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
 +              /* Enter failed */
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +              goto out;
        }
 -
 -      if (!ufshcd_is_link_active(hba))
 -              ufshcd_setup_clocks(hba, false);
 -      else
 -              /* If link is active, device ref_clk can't be switched off */
 -              __ufshcd_setup_clocks(hba, false, true);
 +      ufshcd_set_link_hibern8(hba);
  
        /*
 -       * In case you are here to cancel this work the gating state
 -       * would be marked as REQ_CLKS_ON. In this case keep the state
 -       * as REQ_CLKS_ON which would anyway imply that clocks are off
 -       * and a request to turn them on is pending. By doing this way,
 +       * In case you are here to cancel this work the hibern8_on_idle.state
 +       * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
 +       * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
 +       * and a request to exit from it is pending. By doing this way,
         * we keep the state machine in tact and this would ultimately
         * prevent from doing cancel work multiple times when there are
         * new requests arriving before the current cancel work is done.
         */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.state == REQ_CLKS_OFF)
 -              hba->clk_gating.state = CLKS_OFF;
 -
 +      if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +      }
  rel_lock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
        return;
  }
  
 -/* host lock must be held before calling this variant */
 -static void __ufshcd_release(struct ufs_hba *hba)
 +static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
 +                                          unsigned long delay_ms)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 -              return;
 -
 -      hba->clk_gating.active_reqs--;
 -
 -      if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 -              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 -              || hba->lrb_in_use || hba->outstanding_tasks
 -              || hba->active_uic_cmd || hba->uic_async_done)
 -              return;
 -
 -      hba->clk_gating.state = REQ_CLKS_OFF;
 -      schedule_delayed_work(&hba->clk_gating.gate_work,
 -                      msecs_to_jiffies(hba->clk_gating.delay_ms));
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold_all(hba);
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      /* wait for all the outstanding requests to finish */
 +      ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      ufshcd_set_auto_hibern8_timer(hba, delay_ms);
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
 +      ufshcd_release_all(hba);
 +      pm_runtime_put_sync(hba->dev);
  }
  
 -void ufshcd_release(struct ufs_hba *hba)
 +static void ufshcd_hibern8_exit_work(struct work_struct *work)
  {
 +      int ret;
        unsigned long flags;
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         hibern8_on_idle.exit_work);
 +
 +      cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      __ufshcd_release(hba);
 +      if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
 +           || ufshcd_is_link_active(hba)) {
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              goto unblock_reqs;
 +      }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /* Exit from hibern8 */
 +      if (ufshcd_is_link_hibern8(hba)) {
 +              hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
 +              ufshcd_hold(hba, false);
 +              ret = ufshcd_uic_hibern8_exit(hba);
 +              hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
 +              ufshcd_release(hba, false);
 +              if (!ret) {
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      ufshcd_set_link_active(hba);
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +                      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                              hba->hibern8_on_idle.state);
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              }
 +      }
 +unblock_reqs:
 +      ufshcd_scsi_unblock_requests(hba);
  }
 -EXPORT_SYMBOL_GPL(ufshcd_release);
  
 -static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 +static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
        struct ufs_hba *hba = dev_get_drvdata(dev);
  
 -      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
  }
  
 -static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
 +static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
  {
        struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags, value;
 +      bool change = true;
  
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->clk_gating.delay_ms = value;
 +      if (hba->hibern8_on_idle.delay_ms == value)
 +              change = false;
 +
 +      if (value >= hba->clk_gating.delay_ms_pwr_save ||
 +          value >= hba->clk_gating.delay_ms_perf) {
 +              dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
 +                      value, hba->clk_gating.delay_ms_pwr_save,
 +                      hba->clk_gating.delay_ms_perf);
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return -EINVAL;
 +      }
 +
 +      hba->hibern8_on_idle.delay_ms = value;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /* Update auto hibern8 timer value if supported */
 +      if (change && ufshcd_is_auto_hibern8_supported(hba) &&
 +          hba->hibern8_on_idle.is_enabled)
 +              __ufshcd_set_auto_hibern8_timer(hba,
 +                                              hba->hibern8_on_idle.delay_ms);
 +
        return count;
  }
  
 -static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 +static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 -              return;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
  
 -      hba->clk_gating.delay_ms = 150;
 -      INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
 -      INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 +      return snprintf(buf, PAGE_SIZE, "%d\n",
 +                      hba->hibern8_on_idle.is_enabled);
 +}
  
 -      hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 -      hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 -      sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
 -      hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
 -      hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 -      if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 -              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
 +static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags;
 +      u32 value;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->hibern8_on_idle.is_enabled)
 +              goto out;
 +
 +      /* Update auto hibern8 timer value if supported */
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              __ufshcd_set_auto_hibern8_timer(hba,
 +                      value ? hba->hibern8_on_idle.delay_ms : value);
 +              goto update;
 +      }
 +
 +      if (value) {
 +              /*
 +               * As clock gating work would wait for the hibern8 enter work
 +               * to finish, clocks would remain on during hibern8 enter work.
 +               */
 +              ufshcd_hold(hba, false);
 +              ufshcd_release_all(hba);
 +      } else {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->hibern8_on_idle.active_reqs++;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      }
 +
 +update:
 +      hba->hibern8_on_idle.is_enabled = value;
 +out:
 +      return count;
  }
  
 -static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 +static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 +      /* initialize the state variable here */
 +      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          !ufshcd_is_auto_hibern8_supported(hba))
                return;
 -      device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
 -      cancel_work_sync(&hba->clk_gating.ungate_work);
 -      cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 +
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              hba->hibern8_on_idle.delay_ms = 1;
 +              hba->hibern8_on_idle.state = AUTO_HIBERN8;
 +              /*
 +               * Disable SW hibern8 enter on idle in case
 +               * auto hibern8 is supported
 +               */
 +              hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
 +      } else {
 +              hba->hibern8_on_idle.delay_ms = 10;
 +              INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
 +                                ufshcd_hibern8_enter_work);
 +              INIT_WORK(&hba->hibern8_on_idle.exit_work,
 +                        ufshcd_hibern8_exit_work);
 +      }
 +
 +      hba->hibern8_on_idle.is_enabled = true;
 +
 +      hba->hibern8_on_idle.delay_attr.show =
 +                                      ufshcd_hibern8_on_idle_delay_show;
 +      hba->hibern8_on_idle.delay_attr.store =
 +                                      ufshcd_hibern8_on_idle_delay_store;
 +      sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
 +      hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
 +      hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
 +
 +      hba->hibern8_on_idle.enable_attr.show =
 +                                      ufshcd_hibern8_on_idle_enable_show;
 +      hba->hibern8_on_idle.enable_attr.store =
 +                                      ufshcd_hibern8_on_idle_enable_store;
 +      sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
 +      hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
 +      hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
 +}
 +
 +static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
 +{
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          !ufshcd_is_auto_hibern8_supported(hba))
 +              return;
 +      device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
 +      device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
 +}
 +
 +static void ufshcd_hold_all(struct ufs_hba *hba)
 +{
 +      ufshcd_hold(hba, false);
 +      ufshcd_hibern8_hold(hba, false);
 +}
 +
 +static void ufshcd_release_all(struct ufs_hba *hba)
 +{
 +      ufshcd_hibern8_release(hba, false);
 +      ufshcd_release(hba, false);
  }
  
  /* Must be called with host lock acquired */
  static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
  {
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      bool queue_resume_work = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      if (!hba->clk_scaling.active_reqs++)
 +              queue_resume_work = true;
 +
 +      if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
                return;
  
 +      if (queue_resume_work)
 +              queue_work(hba->clk_scaling.workq,
 +                         &hba->clk_scaling.resume_work);
 +
 +      if (!hba->clk_scaling.window_start_t) {
 +              hba->clk_scaling.window_start_t = jiffies;
 +              hba->clk_scaling.tot_busy_t = 0;
 +              hba->clk_scaling.is_busy_started = false;
 +      }
 +
        if (!hba->clk_scaling.is_busy_started) {
                hba->clk_scaling.busy_start_t = ktime_get();
                hba->clk_scaling.is_busy_started = true;
@@@ -2313,7 -797,7 +2313,7 @@@ static void ufshcd_clk_scaling_update_b
  {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return;
  
        if (!hba->outstanding_reqs && scaling->is_busy_started) {
                scaling->is_busy_started = false;
        }
  }
 +
  /**
   * ufshcd_send_command - Send SCSI or device management commands
   * @hba: per adapter instance
   * @task_tag: Task tag of the command
   */
  static inline
 -void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 +int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  {
 +      int ret = 0;
 +
 +      hba->lrb[task_tag].issue_time_stamp = ktime_get();
 +      hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      /* Make sure that doorbell is committed immediately */
 +      wmb();
 +      ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
 +      ufshcd_update_tag_stats(hba, task_tag);
 +      return ret;
  }
  
  /**
@@@ -2362,7 -836,7 +2362,7 @@@ static inline void ufshcd_copy_sense_da
  
                memcpy(lrbp->sense_buffer,
                        lrbp->ucd_rsp_ptr->sr.sense_data,
 -                      min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
 +                      min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
        }
  }
  
@@@ -2380,7 -854,8 +2380,8 @@@ int ufshcd_copy_query_response(struct u
        memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
  
        /* Get the descriptor */
-       if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+       if (hba->dev_cmd.query.descriptor &&
+           lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
                u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
                                GENERAL_UPIU_REQUEST_SIZE;
                u16 resp_len;
@@@ -2458,7 -933,6 +2459,7 @@@ ufshcd_dispatch_uic_cmd(struct ufs_hba 
  
        hba->active_uic_cmd = uic_cmd;
  
 +      ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
        /* Write Args */
        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@@ -2489,11 -963,6 +2490,11 @@@ ufshcd_wait_for_uic_cmd(struct ufs_hba 
        else
                ret = -ETIMEDOUT;
  
 +      if (ret)
 +              ufsdbg_set_err_state(hba);
 +
 +      ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
 +
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->active_uic_cmd = NULL;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
   * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
   * @hba: per adapter instance
   * @uic_cmd: UIC command
 + * @completion: initialize the completion only if this is set to true
   *
   * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
   * with mutex held and host_lock locked.
   * Returns 0 only if success.
   */
  static int
 -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
 +                    bool completion)
  {
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
                return -EIO;
        }
  
 -      init_completion(&uic_cmd->done);
 +      if (completion)
 +              init_completion(&uic_cmd->done);
  
        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  
@@@ -2542,25 -1008,19 +2543,25 @@@ ufshcd_send_uic_cmd(struct ufs_hba *hba
        int ret;
        unsigned long flags;
  
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->uic_cmd_mutex);
        ufshcd_add_delay_before_dme_cmd(hba);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
 +      ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (!ret)
                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  
 +      ufshcd_save_tstamp_of_last_dme_cmd(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 +      ufshcd_release_all(hba);
 +      hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_UIC, 0, &ret);
  
 -      ufshcd_release(hba);
        return ret;
  }
  
@@@ -2596,7 -1056,6 +2597,7 @@@ static int ufshcd_map_sg(struct ufshcd_
                                cpu_to_le32(lower_32_bits(sg->dma_address));
                        prd_table[i].upper_addr =
                                cpu_to_le32(upper_32_bits(sg->dma_address));
 +                      prd_table[i].reserved = 0;
                }
        } else {
                lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@@ -2647,52 -1106,15 +2648,52 @@@ static void ufshcd_disable_intr(struct 
        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  }
  
 +static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
 +              struct ufshcd_lrb *lrbp)
 +{
 +      struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
 +      u8 cc_index = 0;
 +      bool enable = false;
 +      u64 dun = 0;
 +      int ret;
 +
 +      /*
 +       * Call vendor specific code to get crypto info for this request:
 +       * enable, crypto config. index, DUN.
 +       * If bypass is set, don't bother setting the other fields.
 +       */
 +      ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
 +      if (ret) {
 +              if (ret != -EAGAIN) {
 +                      dev_err(hba->dev,
 +                              "%s: failed to setup crypto request (%d)\n",
 +                              __func__, ret);
 +              }
 +
 +              return ret;
 +      }
 +
 +      if (!enable)
 +              goto out;
 +
 +      req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
 +      req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
 +      req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
 +out:
 +      return 0;
 +}
 +
  /**
   * ufshcd_prepare_req_desc_hdr() - Fills the requests header
   * descriptor according to request
 + * @hba: per adapter instance
   * @lrbp: pointer to local reference block
   * @upiu_flags: flags required in the header
   * @cmd_dir: requests data direction
   */
 -static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 -              u32 *upiu_flags, enum dma_data_direction cmd_dir)
 +static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
 +      struct ufshcd_lrb *lrbp, u32 *upiu_flags,
 +      enum dma_data_direction cmd_dir)
  {
        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
        u32 data_direction;
  
        /* Transfer request descriptor header fields */
        req_desc->header.dword_0 = cpu_to_le32(dword_0);
 -
 +      /* dword_1 is reserved, hence it is set to 0 */
 +      req_desc->header.dword_1 = 0;
        /*
         * assigning invalid value for command status. Controller
         * updates OCS on command completion, with the command
         */
        req_desc->header.dword_2 =
                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
 +      /* dword_3 is reserved, hence it is set to 0 */
 +      req_desc->header.dword_3 = 0;
 +
 +      req_desc->prd_table_length = 0;
 +
 +      if (ufshcd_is_crypto_supported(hba))
 +              return ufshcd_prepare_crypto_utrd(hba, lrbp);
 +
 +      return 0;
  }
  
  /**
@@@ -2746,7 -1158,6 +2747,7 @@@ stati
  void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
  {
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
 +      unsigned short cdb_len;
  
        /* command descriptor fields */
        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
        ucd_req_ptr->sc.exp_data_transfer_len =
                cpu_to_be32(lrbp->cmd->sdb.length);
  
 -      memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
 -              (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
 +      cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
 +      memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
 +      if (cdb_len < MAX_CDB_SIZE)
 +              memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
 +                     (MAX_CDB_SIZE - cdb_len));
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  /**
@@@ -2803,7 -1210,6 +2804,7 @@@ static void ufshcd_prepare_utp_query_re
        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
                memcpy(descp, query->descriptor, len);
  
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
        ucd_req_ptr->header.dword_0 =
                UPIU_HEADER_DWORD(
                        UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
 +      /* clear rest of the fields of basic header */
 +      ucd_req_ptr->header.dword_1 = 0;
 +      ucd_req_ptr->header.dword_2 = 0;
 +
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  /**
@@@ -2836,16 -1237,15 +2837,16 @@@ static int ufshcd_compose_upiu(struct u
        switch (lrbp->command_type) {
        case UTP_CMD_TYPE_SCSI:
                if (likely(lrbp->cmd)) {
 -                      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
 -                                      lrbp->cmd->sc_data_direction);
 +                      ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
 +                              &upiu_flags, lrbp->cmd->sc_data_direction);
                        ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
                } else {
                        ret = -EINVAL;
                }
                break;
        case UTP_CMD_TYPE_DEV_MANAGE:
 -              ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
 +              ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
 +                      DMA_NONE);
                if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
                        ufshcd_prepare_utp_query_req_upiu(
                                        hba, lrbp, upiu_flags);
@@@ -2897,61 -1297,6 +2898,61 @@@ static inline u16 ufshcd_upiu_wlun_to_s
  }
  
  /**
 + * ufshcd_get_write_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Lock is predominantly held by shutdown context thus, ensuring
 + * that no requests from any other context may sneak through.
 + */
 +static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
 +{
 +      down_write(&hba->lock);
 +}
 +
 +/**
 + * ufshcd_get_read_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Returns 1 if acquired, < 0 on contention
 + *
 + * After shutdown's initiated, allow requests only directed to the
 + * well known device lun. The sync between scaling & issue is maintained
 + * as is and this restructuring syncs shutdown with these too.
 + */
 +static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
 +{
 +      int err = 0;
 +
 +      err = down_read_trylock(&hba->lock);
 +      if (err > 0)
 +              goto out;
 +      /* let requests for well known device lun to go through */
 +      if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
 +              return 0;
 +      else if (!ufshcd_is_shutdown_ongoing(hba))
 +              return -EAGAIN;
 +      else
 +              return -EPERM;
 +
 +out:
 +      return err;
 +}
 +
 +/**
 + * ufshcd_put_read_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Returns none
 + */
 +static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
 +{
 +      up_read(&hba->lock);
 +}
 +
 +/**
   * ufshcd_queuecommand - main entry point for SCSI requests
   * @cmd: command from SCSI Midlayer
   * @done: call back function
@@@ -2965,42 -1310,12 +2966,42 @@@ static int ufshcd_queuecommand(struct S
        unsigned long flags;
        int tag;
        int err = 0;
 +      bool has_read_lock = false;
  
        hba = shost_priv(host);
  
 +      if (!cmd || !cmd->request || !hba)
 +              return -EINVAL;
 +
        tag = cmd->request->tag;
 +      if (!ufshcd_valid_tag(hba, tag)) {
 +              dev_err(hba->dev,
 +                      "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
 +                      __func__, tag, cmd, cmd->request);
 +              BUG();
 +      }
 +
 +      err = ufshcd_get_read_lock(hba, cmd->device->lun);
 +      if (unlikely(err < 0)) {
 +              if (err == -EPERM) {
 +                      set_host_byte(cmd, DID_ERROR);
 +                      cmd->scsi_done(cmd);
 +                      return 0;
 +              }
 +              if (err == -EAGAIN)
 +                      return SCSI_MLQUEUE_HOST_BUSY;
 +      } else if (err == 1) {
 +              has_read_lock = true;
 +      }
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      /* if error handling is in progress, return host busy */
 +      if (ufshcd_eh_in_progress(hba)) {
 +              err = SCSI_MLQUEUE_HOST_BUSY;
 +              goto out_unlock;
 +      }
 +
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      hba->req_abort_count = 0;
 +
        /* acquire the tag to make sure device cmds don't use it */
        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
                /*
                goto out;
        }
  
 +      hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
        err = ufshcd_hold(hba, true);
        if (err) {
                err = SCSI_MLQUEUE_HOST_BUSY;
                clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
 +      if (ufshcd_is_clkgating_allowed(hba))
 +              WARN_ON(hba->clk_gating.state != CLKS_ON);
 +
 +      err = ufshcd_hibern8_hold(hba, true);
 +      if (err) {
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              err = SCSI_MLQUEUE_HOST_BUSY;
 +              hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
 +              ufshcd_release(hba, true);
 +              goto out;
 +      }
 +      if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +              WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
 +
 +      /* Vote PM QoS for the request */
 +      ufshcd_vops_pm_qos_req_start(hba, cmd->request);
  
        /* IO svc time latency histogram */
 -      if (hba != NULL && cmd->request != NULL) {
 -              if (hba->latency_hist_enabled &&
 -                  (cmd->request->cmd_type == REQ_TYPE_FS)) {
 -                      cmd->request->lat_hist_io_start = ktime_get();
 -                      cmd->request->lat_hist_enabled = 1;
 -              } else
 -                      cmd->request->lat_hist_enabled = 0;
 +      if (hba->latency_hist_enabled &&
 +          (cmd->request->cmd_type == REQ_TYPE_FS)) {
 +              cmd->request->lat_hist_io_start = ktime_get();
 +              cmd->request->lat_hist_enabled = 1;
 +      } else {
 +              cmd->request->lat_hist_enabled = 0;
        }
  
        WARN_ON(hba->clk_gating.state != CLKS_ON);
  
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
 -      lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
 +      lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
        lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
        lrbp->command_type = UTP_CMD_TYPE_SCSI;
 +      lrbp->req_abort_skip = false;
  
        /* form UPIU before issuing the command */
 -      ufshcd_compose_upiu(hba, lrbp);
 +      err = ufshcd_compose_upiu(hba, lrbp);
 +      if (err) {
 +              if (err != -EAGAIN)
 +                      dev_err(hba->dev,
 +                              "%s: failed to compose upiu %d\n",
 +                              __func__, err);
 +
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              goto out;
 +      }
 +
        err = ufshcd_map_sg(lrbp);
        if (err) {
                lrbp->cmd = NULL;
                clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              goto out;
 +      }
 +
 +      err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
 +      if (err) {
 +              if (err != -EAGAIN)
 +                      dev_err(hba->dev,
 +                              "%s: failed to configure crypto engine %d\n",
 +                              __func__, err);
 +
 +              scsi_dma_unmap(lrbp->cmd);
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +
                goto out;
        }
  
 +      /* Make sure descriptors are ready before ringing the doorbell */
 +      wmb();
        /* issue command to the controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_send_command(hba, tag);
 +
 +      err = ufshcd_send_command(hba, tag);
 +      if (err) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              scsi_dma_unmap(lrbp->cmd);
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
 +              dev_err(hba->dev, "%s: failed sending command, %d\n",
 +                                                      __func__, err);
 +              err = DID_ERROR;
 +              goto out;
 +      }
 +
  out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
 +      if (has_read_lock)
 +              ufshcd_put_read_lock(hba);
        return err;
  }
  
@@@ -3182,7 -1428,7 +3183,7 @@@ ufshcd_clear_cmd(struct ufs_hba *hba, i
         */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TRANSFER_REQ_DOOR_BELL,
 -                      mask, ~mask, 1000, 1000);
 +                      mask, ~mask, 1000, 1000, true);
  
        return err;
  }
@@@ -3209,7 -1455,6 +3210,7 @@@ ufshcd_dev_cmd_completion(struct ufs_hb
        int resp;
        int err = 0;
  
 +      hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  
        switch (resp) {
@@@ -3262,22 -1507,11 +3263,22 @@@ static int ufshcd_wait_for_dev_cmd(stru
  
        if (!time_left) {
                err = -ETIMEDOUT;
 +              dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
 +                      __func__, lrbp->task_tag);
                if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
 -                      /* sucessfully cleared the command, retry if needed */
 +                      /* successfully cleared the command, retry if needed */
                        err = -EAGAIN;
 +              /*
 +               * in case of an error, after clearing the doorbell,
 +               * we also need to clear the outstanding_request
 +               * field in hba
 +               */
 +              ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
        }
  
 +      if (err)
 +              ufsdbg_set_err_state(hba);
 +
        return err;
  }
  
@@@ -3338,15 -1572,6 +3339,15 @@@ static int ufshcd_exec_dev_cmd(struct u
        unsigned long flags;
  
        /*
 +       * May get invoked from shutdown and IOCTL contexts.
 +       * In shutdown context, it comes in with lock acquired.
 +       * In error recovery context, it may come with lock acquired.
 +       */
 +
 +      if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 +              down_read(&hba->lock);
 +
 +      /*
         * Get free slot, sleep if slots are unavailable.
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by SCSI request timeout.
  
        hba->dev_cmd.complete = &wait;
  
 +      /* Make sure descriptors are ready before ringing the doorbell */
 +      wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_send_command(hba, tag);
 +      err = ufshcd_send_command(hba, tag);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 -
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed sending command, %d\n",
 +                                                      __func__, err);
 +              goto out_put_tag;
 +      }
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
  
  out_put_tag:
        ufshcd_put_dev_cmd_tag(hba, tag);
        wake_up(&hba->dev_cmd.tag_wq);
 +      if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 +              up_read(&hba->lock);
        return err;
  }
  
@@@ -3396,12 -1613,6 +3397,12 @@@ static inline void ufshcd_init_query(st
                struct ufs_query_req **request, struct ufs_query_res **response,
                enum query_opcode opcode, u8 idn, u8 index, u8 selector)
  {
 +      int idn_t = (int)idn;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
 +      idn = idn_t;
 +
        *request = &hba->dev_cmd.query.request;
        *response = &hba->dev_cmd.query.response;
        memset(*request, 0, sizeof(struct ufs_query_req));
        (*request)->upiu_req.idn = idn;
        (*request)->upiu_req.index = index;
        (*request)->upiu_req.selector = selector;
 +
 +      ufshcd_update_query_stats(hba, opcode, idn);
 +}
 +
 +static int ufshcd_query_flag_retry(struct ufs_hba *hba,
 +      enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
 +{
 +      int ret;
 +      int retries;
 +
 +      for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
 +              ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
 +              if (ret)
 +                      dev_dbg(hba->dev,
 +                              "%s: failed with error %d, retries %d\n",
 +                              __func__, ret, retries);
 +              else
 +                      break;
 +      }
 +
 +      if (ret)
 +              dev_err(hba->dev,
 +                      "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
 +                      __func__, opcode, idn, ret, retries);
 +      return ret;
  }
  
  /**
   *
   * Returns 0 for success, non-zero in case of failure
   */
 -static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
                        enum flag_idn idn, bool *flag_res)
  {
        struct ufs_query_req *request = NULL;
        struct ufs_query_res *response = NULL;
        int err, index = 0, selector = 0;
 +      int timeout = QUERY_REQ_TIMEOUT;
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
                        selector);
                goto out_unlock;
        }
  
 -      err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 +      err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
  
        if (err) {
                dev_err(hba->dev,
                        "%s: Sending flag query for idn %d failed, err = %d\n",
 -                      __func__, idn, err);
 +                      __func__, request->upiu_req.idn, err);
                goto out_unlock;
        }
  
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_flag);
  
  /**
   * ufshcd_query_attr - API function for sending attribute requests
   *
   * Returns 0 for success, non-zero in case of failure
  */
 -static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                        enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  {
        struct ufs_query_req *request = NULL;
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        if (!attr_val) {
                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
                                __func__, opcode);
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
 -              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
 -                              __func__, opcode, idn, err);
 +              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
 +                              __func__, opcode,
 +                              request->upiu_req.idn, index, err);
                goto out_unlock;
        }
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
  out:
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_attr);
  
  /**
 - * ufshcd_query_descriptor - API function for sending descriptor requests
 - * hba: per-adapter instance
 - * opcode: attribute opcode
 - * idn: attribute idn to access
 - * index: index field
 - * selector: selector field
 - * desc_buf: the buffer that contains the descriptor
 - * buf_len: length parameter passed to the device
 + * ufshcd_query_attr_retry() - API function for sending query
 + * attribute with retries
 + * @hba: per-adapter instance
 + * @opcode: attribute opcode
 + * @idn: attribute idn to access
 + * @index: index field
 + * @selector: selector field
 + * @attr_val: the attribute value after the query request
 + * completes
   *
 - * Returns 0 for success, non-zero in case of failure.
 - * The buf_len parameter will contain, on return, the length parameter
 - * received on the response.
 - */
 -static int ufshcd_query_descriptor(struct ufs_hba *hba,
 + * Returns 0 for success, non-zero in case of failure
 +*/
 +static int ufshcd_query_attr_retry(struct ufs_hba *hba,
 +      enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
 +      u32 *attr_val)
 +{
 +      int ret = 0;
 +      u32 retries;
 +
 +       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 +              ret = ufshcd_query_attr(hba, opcode, idn, index,
 +                                              selector, attr_val);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
 +                              __func__, ret, retries);
 +              else
 +                      break;
 +      }
 +
 +      if (ret)
 +              dev_err(hba->dev,
 +                      "%s: query attribute, idn %d, failed with error %d after %d retires\n",
 +                      __func__, idn, ret, retries);
 +      return ret;
 +}
 +
 +static int __ufshcd_query_descriptor(struct ufs_hba *hba,
                        enum query_opcode opcode, enum desc_idn idn, u8 index,
                        u8 selector, u8 *desc_buf, int *buf_len)
  {
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        if (!desc_buf) {
                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
                                __func__, opcode);
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
 -              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
 -                              __func__, opcode, idn, err);
 +              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
 +                              __func__, opcode,
 +                              request->upiu_req.idn, index, err);
                goto out_unlock;
        }
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
  out:
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
 +      return err;
 +}
 +
 +/**
 + * ufshcd_query_descriptor - API function for sending descriptor requests
 + * hba: per-adapter instance
 + * opcode: attribute opcode
 + * idn: attribute idn to access
 + * index: index field
 + * selector: selector field
 + * desc_buf: the buffer that contains the descriptor
 + * buf_len: length parameter passed to the device
 + *
 + * Returns 0 for success, non-zero in case of failure.
 + * The buf_len parameter will contain, on return, the length parameter
 + * received on the response.
 + */
 +int ufshcd_query_descriptor(struct ufs_hba *hba,
 +                      enum query_opcode opcode, enum desc_idn idn, u8 index,
 +                      u8 selector, u8 *desc_buf, int *buf_len)
 +{
 +      int err;
 +      int retries;
 +
 +      for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 +              err = __ufshcd_query_descriptor(hba, opcode, idn, index,
 +                                              selector, desc_buf, buf_len);
 +              if (!err || err == -EINVAL)
 +                      break;
 +      }
 +
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_descriptor);
  
  /**
   * ufshcd_read_desc_param - read the specified descriptor parameter
@@@ -3751,38 -1877,15 +3752,38 @@@ static int ufshcd_read_desc_param(struc
                                      desc_id, desc_index, 0, desc_buf,
                                      &buff_len);
  
 -      if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
 -          (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
 -           ufs_query_desc_max_size[desc_id])
 -          || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
 -              dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
 -                      __func__, desc_id, param_offset, buff_len, ret);
 -              if (!ret)
 -                      ret = -EINVAL;
 +      if (ret) {
 +              dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
 +                      __func__, desc_id, desc_index, param_offset, ret);
 +
 +              goto out;
 +      }
 +
 +      /* Sanity check */
 +      if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
 +              dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
 +                      __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
 +              ret = -EINVAL;
 +              goto out;
 +      }
  
 +      /*
 +       * While reading variable size descriptors (like string descriptor),
 +       * some UFS devices may report the "LENGTH" (field in "Transaction
 +       * Specific fields" of Query Response UPIU) same as what was requested
 +       * in Query Request UPIU instead of reporting the actual size of the
 +       * variable size descriptor.
 +       * Although it's safe to ignore the "LENGTH" field for variable size
 +       * descriptors as we can always derive the length of the descriptor from
 +       * the descriptor header fields. Hence this change impose the length
 +       * match check only for fixed size descriptors (for which we always
 +       * request the correct size as part of Query Request UPIU).
 +       */
 +      if ((desc_id != QUERY_DESC_IDN_STRING) &&
 +          (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
 +              dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
 +                      __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
 +              ret = -EINVAL;
                goto out;
        }
  
@@@ -3810,82 -1913,6 +3811,82 @@@ static inline int ufshcd_read_power_des
        return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
  }
  
 +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
 +{
 +      return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
 +}
 +
 +/**
 + * ufshcd_read_string_desc - read string descriptor
 + * @hba: pointer to adapter instance
 + * @desc_index: descriptor index
 + * @buf: pointer to buffer where descriptor would be read
 + * @size: size of buf
 + * @ascii: if true convert from unicode to ascii characters
 + *
 + * Return 0 in case of success, non-zero otherwise
 + */
 +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
 +                              u32 size, bool ascii)
 +{
 +      int err = 0;
 +
 +      err = ufshcd_read_desc(hba,
 +                              QUERY_DESC_IDN_STRING, desc_index, buf, size);
 +
 +      if (err) {
 +              dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
 +                      __func__, QUERY_REQ_RETRIES, err);
 +              goto out;
 +      }
 +
 +      if (ascii) {
 +              int desc_len;
 +              int ascii_len;
 +              int i;
 +              char *buff_ascii;
 +
 +              desc_len = buf[0];
 +              /* remove header and divide by 2 to move from UTF16 to UTF8 */
 +              ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
 +              if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
 +                      dev_err(hba->dev, "%s: buffer allocated size is too small\n",
 +                                      __func__);
 +                      err = -ENOMEM;
 +                      goto out;
 +              }
 +
 +              buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
 +              if (!buff_ascii) {
 +                      dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 +                                      __func__, ascii_len);
 +                      err = -ENOMEM;
 +                      goto out_free_buff;
 +              }
 +
 +              /*
 +               * the descriptor contains string in UTF16 format
 +               * we need to convert to utf-8 so it can be displayed
 +               */
 +              utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
 +                              desc_len - QUERY_DESC_HDR_SIZE,
 +                              UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
 +
 +              /* replace non-printable or non-ASCII characters with spaces */
 +              for (i = 0; i < ascii_len; i++)
 +                      ufshcd_remove_non_printable(&buff_ascii[i]);
 +
 +              memset(buf + QUERY_DESC_HDR_SIZE, 0,
 +                              size - QUERY_DESC_HDR_SIZE);
 +              memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
 +              buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
 +out_free_buff:
 +              kfree(buff_ascii);
 +      }
 +out:
 +      return err;
 +}
 +
  /**
   * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
   * @hba: Pointer to adapter instance
@@@ -3906,7 -1933,7 +3907,7 @@@ static inline int ufshcd_read_unit_desc
         * Unit descriptors are only available for general purpose LUs (LUN id
         * from 0 to 7) and RPMB Well known LU.
         */
 -      if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
 +      if (!ufs_is_valid_unit_desc_lun(lun))
                return -EOPNOTSUPP;
  
        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@@ -4048,19 -2075,12 +4049,19 @@@ static void ufshcd_host_memory_configur
                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  
                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
 +              hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
 +                              (i * sizeof(struct utp_transfer_req_desc));
                hba->lrb[i].ucd_req_ptr =
                        (struct utp_upiu_req *)(cmd_descp + i);
 +              hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
                hba->lrb[i].ucd_rsp_ptr =
                        (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
 +              hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
 +                              response_offset;
                hba->lrb[i].ucd_prdt_ptr =
                        (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
 +              hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
 +                              prdt_offset;
        }
  }
  
@@@ -4084,7 -2104,7 +4085,7 @@@ static int ufshcd_dme_link_startup(stru
  
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
 -              dev_err(hba->dev,
 +              dev_dbg(hba->dev,
                        "dme-link-startup: error code %d\n", ret);
        return ret;
  }
@@@ -4120,13 -2140,6 +4121,13 @@@ static inline void ufshcd_add_delay_bef
        usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
  }
  
 +static inline void ufshcd_save_tstamp_of_last_dme_cmd(
 +                      struct ufs_hba *hba)
 +{
 +      if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
 +              hba->last_dme_cmd_tstamp = ktime_get();
 +}
 +
  /**
   * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
   * @hba: per adapter instance
@@@ -4147,10 -2160,6 +4148,10 @@@ int ufshcd_dme_set_attr(struct ufs_hba 
        };
        const char *set = action[!!peer];
        int ret;
 +      int retries = UFS_UIC_COMMAND_RETRIES;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
  
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
        uic_cmd.argument3 = mib_val;
  
 -      ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +      do {
 +              /* for peer attributes we retry upon failure */
 +              ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 +                              set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 +      } while (ret && peer && --retries);
 +
        if (ret)
 -              dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 -                      set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 +              dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
 +                      set, UIC_GET_ATTR_ID(attr_sel), mib_val,
 +                      UFS_UIC_COMMAND_RETRIES - retries);
  
        return ret;
  }
@@@ -4194,7 -2195,6 +4195,7 @@@ int ufshcd_dme_get_attr(struct ufs_hba 
        };
        const char *get = action[!!peer];
        int ret;
 +      int retries = UFS_UIC_COMMAND_RETRIES;
        struct ufs_pa_layer_attr orig_pwr_info;
        struct ufs_pa_layer_attr temp_pwr_info;
        bool pwr_mode_change = false;
  
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
 +
        uic_cmd.argument1 = attr_sel;
  
 -      ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 -      if (ret) {
 -              dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
 -                      get, UIC_GET_ATTR_ID(attr_sel), ret);
 -              goto out;
 -      }
 +      do {
 +              /* for peer attributes we retry upon failure */
 +              ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
 +                              get, UIC_GET_ATTR_ID(attr_sel), ret);
 +      } while (ret && peer && --retries);
 +
 +      if (ret)
 +              dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
 +                      get, UIC_GET_ATTR_ID(attr_sel),
 +                      UFS_UIC_COMMAND_RETRIES - retries);
  
 -      if (mib_val)
 +      if (mib_val && !ret)
                *mib_val = uic_cmd.argument3;
  
        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@@ -4275,7 -2265,6 +4276,7 @@@ static int ufshcd_uic_pwr_ctrl(struct u
        unsigned long flags;
        u8 status;
        int ret;
 +      bool reenable_intr = false;
  
        mutex_lock(&hba->uic_cmd_mutex);
        init_completion(&uic_async_done);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->uic_async_done = &uic_async_done;
 -      ret = __ufshcd_send_uic_cmd(hba, cmd);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      if (ret) {
 -              dev_err(hba->dev,
 -                      "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
 -                      cmd->command, cmd->argument3, ret);
 -              goto out;
 +      if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
 +              ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
 +              /*
 +               * Make sure UIC command completion interrupt is disabled before
 +               * issuing UIC command.
 +               */
 +              wmb();
 +              reenable_intr = true;
        }
 -      ret = ufshcd_wait_for_uic_cmd(hba, cmd);
 +      ret = __ufshcd_send_uic_cmd(hba, cmd, false);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (ret) {
                dev_err(hba->dev,
                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
                        cmd->command, status);
                ret = (status != PWR_OK) ? status : -1;
        }
 +      ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
 +
  out:
 +      if (ret) {
 +              ufsdbg_set_err_state(hba);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_cmd_log(hba);
 +      }
 +
 +      ufshcd_save_tstamp_of_last_dme_cmd(hba);
        spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->active_uic_cmd = NULL;
        hba->uic_async_done = NULL;
 +      if (reenable_intr)
 +              ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        mutex_unlock(&hba->uic_cmd_mutex);
 +      return ret;
 +}
 +
 +int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
 +{
 +      unsigned long flags;
 +      int ret = 0;
 +      u32 tm_doorbell;
 +      u32 tr_doorbell;
 +      bool timeout = false, do_last_check = false;
 +      ktime_t start;
 +
 +      ufshcd_hold_all(hba);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * Wait for all the outstanding tasks/transfer requests.
 +       * Verify by checking the doorbell registers are clear.
 +       */
 +      start = ktime_get();
 +      do {
 +              if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
 +                      ret = -EBUSY;
 +                      goto out;
 +              }
 +
 +              tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 +              tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +              if (!tm_doorbell && !tr_doorbell) {
 +                      timeout = false;
 +                      break;
 +              } else if (do_last_check) {
 +                      break;
 +              }
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              schedule();
 +              if (ktime_to_us(ktime_sub(ktime_get(), start)) >
 +                  wait_timeout_us) {
 +                      timeout = true;
 +                      /*
 +                       * We might have scheduled out for long time so make
 +                       * sure to check if doorbells are cleared by this time
 +                       * or not.
 +                       */
 +                      do_last_check = true;
 +              }
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (tm_doorbell || tr_doorbell);
  
 +      if (timeout) {
 +              dev_err(hba->dev,
 +                      "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
 +                      __func__, tm_doorbell, tr_doorbell);
 +              ret = -EBUSY;
 +      }
 +out:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_release_all(hba);
        return ret;
  }
  
@@@ -4423,149 -2339,33 +4424,149 @@@ static int ufshcd_uic_change_pwr_mode(s
        uic_cmd.command = UIC_CMD_DME_SET;
        uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
        uic_cmd.argument3 = mode;
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
 +      ufshcd_hold_all(hba);
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 -      ufshcd_release(hba);
 -
 +      hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
 +      ufshcd_release_all(hba);
  out:
        return ret;
  }
  
 -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 +static int ufshcd_link_recovery(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      unsigned long flags;
 +
 +      /*
 +       * Check if there is any race with fatal error handling.
 +       * If so, wait for it to complete. Even though fatal error
 +       * handling does reset and restore in some cases, don't assume
 +       * anything out of it. We are just avoiding race here.
 +       */
 +      do {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              flush_work(&hba->eh_work);
 +      } while (1);
 +
 +
 +      /*
 +       * we don't know if previous reset had really reset the host controller
 +       * or not. So let's force reset here to be sure.
 +       */
 +      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +      hba->force_host_reset = true;
 +      schedule_work(&hba->eh_work);
 +
 +      /* wait for the reset work to finish */
 +      do {
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              flush_work(&hba->eh_work);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (1);
 +
 +      if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
 +            ufshcd_is_link_active(hba)))
 +              ret = -ENOLINK;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      return ret;
 +}
 +
 +static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  {
 +      int ret;
        struct uic_command uic_cmd = {0};
 +      ktime_t start = ktime_get();
  
        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 +      ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
 +                           ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 +
 +      /*
 +       * Do full reinit if enter failed or if LINERESET was detected during
 +       * Hibern8 operation. After LINERESET, link moves to default PWM-G1
 +       * mode hence full reinit is required to move link to HS speeds.
 +       */
 +      if (ret || hba->full_init_linereset) {
 +              int err;
 +
 +              hba->full_init_linereset = false;
 +              ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
 +              dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 +                      __func__, ret);
 +              /*
 +               * If link recovery fails then return error code (-ENOLINK)
 +               * returned ufshcd_link_recovery().
 +               * If link recovery succeeds then return -EAGAIN to attempt
 +               * hibern8 enter retry again.
 +               */
 +              err = ufshcd_link_recovery(hba);
 +              if (err) {
 +                      dev_err(hba->dev, "%s: link recovery failed", __func__);
 +                      ret = err;
 +              } else {
 +                      ret = -EAGAIN;
 +              }
 +      } else {
 +              dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
 +                      ktime_to_us(ktime_get()));
 +      }
 +
 +      return ret;
 +}
 +
 +int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 +{
 +      int ret = 0, retries;
  
 -      return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 +              ret = __ufshcd_uic_hibern8_enter(hba);
 +              if (!ret)
 +                      goto out;
 +              else if (ret != -EAGAIN)
 +                      /* Unable to recover the link, so no point proceeding */
 +                      BUG();
 +      }
 +out:
 +      return ret;
  }
  
 -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
  {
        struct uic_command uic_cmd = {0};
        int ret;
 +      ktime_t start = ktime_get();
  
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
 +                           ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 +
 +      /* Do full reinit if exit failed */
        if (ret) {
 -              ufshcd_set_link_off(hba);
 -              ret = ufshcd_host_reset_and_restore(hba);
 +              ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
 +              dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
 +                      __func__, ret);
 +              ret = ufshcd_link_recovery(hba);
 +              /* Unable to recover the link, so no point proceeding */
 +              if (ret)
 +                      BUG();
 +      } else {
 +              dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
 +                      ktime_to_us(ktime_get()));
 +              hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
 +              hba->ufs_stats.hibern8_exit_cnt++;
        }
  
        return ret;
@@@ -4598,8 -2398,8 +4599,8 @@@ static int ufshcd_get_max_pwr_mode(stru
        if (hba->max_pwr_info.is_valid)
                return 0;
  
 -      pwr_info->pwr_tx = FASTAUTO_MODE;
 -      pwr_info->pwr_rx = FASTAUTO_MODE;
 +      pwr_info->pwr_tx = FAST_MODE;
 +      pwr_info->pwr_rx = FAST_MODE;
        pwr_info->hs_rate = PA_HS_MODE_B;
  
        /* Get the connected lane count */
                                __func__, pwr_info->gear_rx);
                        return -EINVAL;
                }
 -              pwr_info->pwr_rx = SLOWAUTO_MODE;
 +              pwr_info->pwr_rx = SLOW_MODE;
        }
  
        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
                                __func__, pwr_info->gear_tx);
                        return -EINVAL;
                }
 -              pwr_info->pwr_tx = SLOWAUTO_MODE;
 +              pwr_info->pwr_tx = SLOW_MODE;
        }
  
        hba->max_pwr_info.is_valid = true;
        return 0;
  }
  
 -static int ufshcd_change_power_mode(struct ufs_hba *hba,
 +int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode)
  {
 -      int ret;
 +      int ret = 0;
  
        /* if already configured to the requested pwr_mode */
 -      if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 -          pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
 +      if (!hba->restore_needed &&
 +              pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 +              pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
                return 0;
        }
  
 +      ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
 +      if (ret)
 +              return ret;
 +
        /*
         * Configure attributes for power mode change with below.
         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
                                                pwr_mode->hs_rate);
  
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
 +                      DL_FC0ProtectionTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
 +                      DL_TC0ReplayTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
 +                      DL_AFC0ReqTimeOutVal_Default);
 +
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
 +                      DL_FC0ProtectionTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
 +                      DL_TC0ReplayTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
 +                      DL_AFC0ReqTimeOutVal_Default);
 +
        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
                        | pwr_mode->pwr_tx);
  
        if (ret) {
 +              ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
                dev_err(hba->dev,
                        "%s: power mode change failed %d\n", __func__, ret);
        } else {
  
                memcpy(&hba->pwr_info, pwr_mode,
                        sizeof(struct ufs_pa_layer_attr));
 +              hba->ufs_stats.power_mode_change_cnt++;
        }
  
        return ret;
@@@ -4754,8 -2533,6 +4755,8 @@@ static int ufshcd_config_pwr_mode(struc
                memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
  
        ret = ufshcd_change_power_mode(hba, &final_params);
 +      if (!ret)
 +              ufshcd_print_pwr_info(hba);
  
        return ret;
  }
   */
  static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  {
 -      int i, retries, err = 0;
 +      int i;
 +      int err;
        bool flag_res = 1;
  
 -      for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 -              /* Set the fDeviceInit flag */
 -              err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 -                                      QUERY_FLAG_IDN_FDEVICEINIT, NULL);
 -              if (!err || err == -ETIMEDOUT)
 -                      break;
 -              dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
 -      }
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 +              QUERY_FLAG_IDN_FDEVICEINIT, NULL);
        if (err) {
                dev_err(hba->dev,
                        "%s setting fDeviceInit flag failed with error %d\n",
                goto out;
        }
  
 -      /* poll for max. 100 iterations for fDeviceInit flag to clear */
 -      for (i = 0; i < 100 && !err && flag_res; i++) {
 -              for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 -                      err = ufshcd_query_flag(hba,
 -                                      UPIU_QUERY_OPCODE_READ_FLAG,
 -                                      QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
 -                      if (!err || err == -ETIMEDOUT)
 -                              break;
 -                      dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
 -                                      err);
 -              }
 -      }
 +      /* poll for max. 1000 iterations for fDeviceInit flag to clear */
 +      for (i = 0; i < 1000 && !err && flag_res; i++)
 +              err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 +                      QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
 +
        if (err)
                dev_err(hba->dev,
                        "%s reading fDeviceInit flag failed with error %d\n",
@@@ -4806,7 -2595,7 +4807,7 @@@ out
   * To bring UFS host controller to operational state,
   * 1. Enable required interrupts
   * 2. Configure interrupt aggregation
 - * 3. Program UTRL and UTMRL base addres
 + * 3. Program UTRL and UTMRL base address
   * 4. Configure run-stop-registers
   *
   * Returns 0 on success, non-zero value on failure
@@@ -4836,13 -2625,8 +4837,13 @@@ static int ufshcd_make_hba_operational(
                        REG_UTP_TASK_REQ_LIST_BASE_H);
  
        /*
 +       * Make sure base address and interrupt setup are updated before
 +       * enabling the run/stop registers below.
 +       */
 +      wmb();
 +
 +      /*
         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
 -       * DEI, HEI bits must be 0
         */
        reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
        if (!(ufshcd_get_lists_status(reg))) {
  }
  
  /**
 + * ufshcd_hba_stop - Send controller to reset state
 + * @hba: per adapter instance
 + * @can_sleep: perform sleep or just spin
 + */
 +static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
 +{
 +      int err;
 +
 +      ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
 +      err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
 +                                      CONTROLLER_ENABLE, CONTROLLER_DISABLE,
 +                                      10, 1, can_sleep);
 +      if (err)
 +              dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
 +}
 +
 +/**
   * ufshcd_hba_enable - initialize the controller
   * @hba: per adapter instance
   *
@@@ -4895,9 -2662,18 +4896,9 @@@ static int ufshcd_hba_enable(struct ufs
         * development and testing of this driver. msleep can be changed to
         * mdelay and retry count can be reduced based on the controller.
         */
 -      if (!ufshcd_is_hba_active(hba)) {
 -
 +      if (!ufshcd_is_hba_active(hba))
                /* change controller state to "reset state" */
 -              ufshcd_hba_stop(hba);
 -
 -              /*
 -               * This delay is based on the testing done with UFS host
 -               * controller FPGA. The delay can be changed based on the
 -               * host controller used.
 -               */
 -              msleep(5);
 -      }
 +              ufshcd_hba_stop(hba, true);
  
        /* UniPro link is disabled at this point */
        ufshcd_set_link_off(hba);
@@@ -4971,11 -2747,6 +4972,11 @@@ static int ufshcd_disable_tx_lcc(struc
        return err;
  }
  
 +static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
 +{
 +      return ufshcd_disable_tx_lcc(hba, false);
 +}
 +
  static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
  {
        return ufshcd_disable_tx_lcc(hba, true);
@@@ -4991,26 -2762,14 +4992,26 @@@ static int ufshcd_link_startup(struct u
  {
        int ret;
        int retries = DME_LINKSTARTUP_RETRIES;
 +      bool link_startup_again = false;
  
 +      /*
 +       * If UFS device isn't active then we will have to issue link startup
 +       * 2 times to make sure the device state move to active.
 +       */
 +      if (!ufshcd_is_ufs_dev_active(hba))
 +              link_startup_again = true;
 +
 +link_startup:
        do {
                ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
  
                ret = ufshcd_dme_link_startup(hba);
 +              if (ret)
 +                      ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
  
                /* check if device is detected by inter-connect layer */
                if (!ret && !ufshcd_is_device_present(hba)) {
 +                      ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
                        dev_err(hba->dev, "%s: Device not present\n", __func__);
                        ret = -ENXIO;
                        goto out;
                /* failed to get the link up... retire */
                goto out;
  
 +      if (link_startup_again) {
 +              link_startup_again = false;
 +              retries = DME_LINKSTARTUP_RETRIES;
 +              goto link_startup;
 +      }
 +
 +      /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
 +      ufshcd_init_pwr_info(hba);
 +      ufshcd_print_pwr_info(hba);
 +
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
                ret = ufshcd_disable_device_tx_lcc(hba);
                if (ret)
                        goto out;
        }
  
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
 +              ret = ufshcd_disable_host_tx_lcc(hba);
 +              if (ret)
 +                      goto out;
 +      }
 +
        /* Include any host controller configuration via UIC commands */
        ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
        if (ret)
  
        ret = ufshcd_make_hba_operational(hba);
  out:
 -      if (ret)
 +      if (ret) {
                dev_err(hba->dev, "link startup failed %d\n", ret);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_host_regs(hba);
 +      }
        return ret;
  }
  
@@@ -5082,7 -2821,7 +5083,7 @@@ static int ufshcd_verify_dev_init(struc
        int err = 0;
        int retries;
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
        }
        mutex_unlock(&hba->dev_cmd.lock);
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
  
        if (err)
                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@@ -5120,10 -2859,10 +5121,10 @@@ static void ufshcd_set_queue_depth(stru
  
        lun_qdepth = hba->nutrs;
        ret = ufshcd_read_unit_desc_param(hba,
 -                                        ufshcd_scsi_to_upiu_lun(sdev->lun),
 -                                        UNIT_DESC_PARAM_LU_Q_DEPTH,
 -                                        &lun_qdepth,
 -                                        sizeof(lun_qdepth));
 +                        ufshcd_scsi_to_upiu_lun(sdev->lun),
 +                        UNIT_DESC_PARAM_LU_Q_DEPTH,
 +                        &lun_qdepth,
 +                        sizeof(lun_qdepth));
  
        /* Some WLUN doesn't support unit descriptor */
        if (ret == -EOPNOTSUPP)
@@@ -5253,9 -2992,6 +5254,9 @@@ static int ufshcd_slave_configure(struc
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
        blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
  
 +      sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
 +      sdev->use_rpm_auto = 1;
 +
        return 0;
  }
  
@@@ -5365,7 -3101,6 +5366,7 @@@ ufshcd_transfer_rsp_status(struct ufs_h
        int result = 0;
        int scsi_status;
        int ocs;
 +      bool print_prdt;
  
        /* overall command status of utrd */
        ocs = ufshcd_get_tr_ocs(lrbp);
        switch (ocs) {
        case OCS_SUCCESS:
                result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 -
 +              hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
                switch (result) {
                case UPIU_TRANSACTION_RESPONSE:
                        /*
                        scsi_status = result & MASK_SCSI_STATUS;
                        result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  
 -                      if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
 +                      /*
 +                       * Currently we are only supporting BKOPs exception
 +                       * events hence we can ignore BKOPs exception event
 +                       * during power management callbacks. BKOPs exception
 +                       * event is not expected to be raised in runtime suspend
 +                       * callback as it allows the urgent bkops.
 +                       * During system suspend, we are anyway forcefully
 +                       * disabling the bkops and if urgent bkops is needed
 +                       * it will be enabled on system resume. Long term
 +                       * solution could be to abort the system suspend if
 +                       * UFS device needs urgent BKOPs.
 +                       */
 +                      if (!hba->pm_op_in_progress &&
 +                          ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
                                schedule_work(&hba->eeh_work);
                        break;
                case UPIU_TRANSACTION_REJECT_UPIU:
        case OCS_MISMATCH_RESP_UPIU_SIZE:
        case OCS_PEER_COMM_FAILURE:
        case OCS_FATAL_ERROR:
 +      case OCS_DEVICE_FATAL_ERROR:
 +      case OCS_INVALID_CRYPTO_CONFIG:
 +      case OCS_GENERAL_CRYPTO_ERROR:
        default:
                result |= DID_ERROR << 16;
                dev_err(hba->dev,
 -              "OCS error from controller = %x\n", ocs);
 +                              "OCS error from controller = %x for tag %d\n",
 +                              ocs, lrbp->task_tag);
 +              /*
 +               * This is called in interrupt context, hence avoid sleep
 +               * while printing debug registers. Also print only the minimum
 +               * debug registers needed to debug OCS failure.
 +               */
 +              __ufshcd_print_host_regs(hba, true);
 +              ufshcd_print_host_state(hba);
                break;
        } /* end of switch */
  
 +      if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
 +              print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
 +                      ocs == OCS_MISMATCH_DATA_BUF_SIZE);
 +              ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
 +      }
 +
 +      if ((host_byte(result) == DID_ERROR) ||
 +          (host_byte(result) == DID_ABORT))
 +              ufsdbg_set_err_state(hba);
 +
        return result;
  }
  
   * ufshcd_uic_cmd_compl - handle completion of uic command
   * @hba: per adapter instance
   * @intr_status: interrupt status generated by the controller
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
  {
 +      irqreturn_t retval = IRQ_NONE;
 +
        if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
                hba->active_uic_cmd->argument2 |=
                        ufshcd_get_uic_cmd_result(hba);
                hba->active_uic_cmd->argument3 =
                        ufshcd_get_dme_attr_val(hba);
                complete(&hba->active_uic_cmd->done);
 +              retval = IRQ_HANDLED;
 +      }
 +
 +      if (intr_status & UFSHCD_UIC_PWR_MASK) {
 +              if (hba->uic_async_done) {
 +                      complete(hba->uic_async_done);
 +                      retval = IRQ_HANDLED;
 +              } else if (ufshcd_is_auto_hibern8_supported(hba)) {
 +                      /*
 +                       * If uic_async_done flag is not set then this
 +                       * is an Auto hibern8 err interrupt.
 +                       * Perform a host reset followed by a full
 +                       * link recovery.
 +                       */
 +                      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +                      hba->force_host_reset = true;
 +                      dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
 +                              __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
 +                              "Enter" : "Exit",
 +                              intr_status, ufshcd_get_upmcrs(hba));
 +                      __ufshcd_print_host_regs(hba, true);
 +                      ufshcd_print_host_state(hba);
 +                      schedule_work(&hba->eh_work);
 +                      retval = IRQ_HANDLED;
 +              }
        }
 +      return retval;
 +}
 +
 +/**
 + * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
 + * @hba: per adapter instance
 + * @result: error result to inform scsi layer about
 + */
 +void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
 +{
 +      u8 index;
 +      struct ufshcd_lrb *lrbp;
 +      struct scsi_cmnd *cmd;
 +
 +      if (!hba->outstanding_reqs)
 +              return;
  
 -      if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
 -              complete(hba->uic_async_done);
 +      for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
 +              lrbp = &hba->lrb[index];
 +              cmd = lrbp->cmd;
 +              if (cmd) {
 +                      ufshcd_cond_add_cmd_trace(hba, index, "failed");
 +                      ufshcd_update_error_stats(hba,
 +                                      UFS_ERR_INT_FATAL_ERRORS);
 +                      scsi_dma_unmap(cmd);
 +                      cmd->result = result;
 +                      /* Clear pending transfer requests */
 +                      ufshcd_clear_cmd(hba, index);
 +                      ufshcd_outstanding_req_clear(hba, index);
 +                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      lrbp->complete_time_stamp = ktime_get();
 +                      update_req_stats(hba, lrbp);
 +                      /* Mark completed command as NULL in LRB */
 +                      lrbp->cmd = NULL;
 +                      ufshcd_release_all(hba);
 +                      if (cmd->request) {
 +                              /*
 +                               * As we are accessing the "request" structure,
 +                               * this must be called before calling
 +                               * ->scsi_done() callback.
 +                               */
 +                              ufshcd_vops_pm_qos_req_end(hba, cmd->request,
 +                                      true);
 +                              ufshcd_vops_crypto_engine_cfg_end(hba,
 +                                              lrbp, cmd->request);
 +                      }
 +                      /* Do not touch lrbp after scsi done */
 +                      cmd->scsi_done(cmd);
 +              } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 +                      if (hba->dev_cmd.complete) {
 +                              ufshcd_cond_add_cmd_trace(hba, index,
 +                                                      "dev_failed");
 +                              ufshcd_outstanding_req_clear(hba, index);
 +                              complete(hba->dev_cmd.complete);
 +                      }
 +              }
 +              if (ufshcd_is_clkscaling_supported(hba))
 +                      hba->clk_scaling.active_reqs--;
 +      }
  }
  
  /**
 - * ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * __ufshcd_transfer_req_compl - handle SCSI and query command completion
   * @hba: per adapter instance
 + * @completed_reqs: requests to complete
   */
 -static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
 +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 +                                      unsigned long completed_reqs)
  {
        struct ufshcd_lrb *lrbp;
        struct scsi_cmnd *cmd;
 -      unsigned long completed_reqs;
 -      u32 tr_doorbell;
        int result;
        int index;
        struct request *req;
  
 -      /* Resetting interrupt aggregation counters first and reading the
 -       * DOOR_BELL afterward allows us to handle all the completed requests.
 -       * In order to prevent other interrupts starvation the DB is read once
 -       * after reset. The down side of this solution is the possibility of
 -       * false interrupt if device completes another request after resetting
 -       * aggregation and before reading the DB.
 -       */
 -      if (ufshcd_is_intr_aggr_allowed(hba))
 -              ufshcd_reset_intr_aggr(hba);
 -
 -      tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 -      completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
 -
        for_each_set_bit(index, &completed_reqs, hba->nutrs) {
                lrbp = &hba->lrb[index];
                cmd = lrbp->cmd;
                if (cmd) {
 +                      ufshcd_cond_add_cmd_trace(hba, index, "complete");
 +                      ufshcd_update_tag_stats_completion(hba, cmd);
                        result = ufshcd_transfer_rsp_status(hba, lrbp);
                        scsi_dma_unmap(cmd);
                        cmd->result = result;
 +                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      lrbp->complete_time_stamp = ktime_get();
 +                      update_req_stats(hba, lrbp);
                        /* Mark completed command as NULL in LRB */
                        lrbp->cmd = NULL;
 -                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
 +                      __ufshcd_release(hba, false);
 +                      __ufshcd_hibern8_release(hba, false);
 +                      if (cmd->request) {
 +                              /*
 +                               * As we are accessing the "request" structure,
 +                               * this must be called before calling
 +                               * ->scsi_done() callback.
 +                               */
 +                              ufshcd_vops_pm_qos_req_end(hba, cmd->request,
 +                                      false);
 +                              ufshcd_vops_crypto_engine_cfg_end(hba,
 +                                      lrbp, cmd->request);
 +                      }
 +
                        req = cmd->request;
                        if (req) {
                                /* Update IO svc time latency histogram */
                        }
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
 -                      __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 -                      if (hba->dev_cmd.complete)
 +                      if (hba->dev_cmd.complete) {
 +                              ufshcd_cond_add_cmd_trace(hba, index,
 +                                              "dcmp");
                                complete(hba->dev_cmd.complete);
 +                      }
                }
 +              if (ufshcd_is_clkscaling_supported(hba))
 +                      hba->clk_scaling.active_reqs--;
        }
  
 -      /* clear corresponding bits of completed commands */
 -      hba->outstanding_reqs ^= completed_reqs;
 +      /* clear corresponding bits of completed commands */
 +      hba->outstanding_reqs ^= completed_reqs;
 +
 +      ufshcd_clk_scaling_update_busy(hba);
 +
 +      /* we might have free'd some tags above */
 +      wake_up(&hba->dev_cmd.tag_wq);
 +}
 +
 +/**
 + * ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * @hba: per adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
 + */
 +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
 +{
 +      unsigned long completed_reqs;
 +      u32 tr_doorbell;
 +
 +      /* Resetting interrupt aggregation counters first and reading the
 +       * DOOR_BELL afterward allows us to handle all the completed requests.
 +       * In order to prevent other interrupts starvation the DB is read once
 +       * after reset. The down side of this solution is the possibility of
 +       * false interrupt if device completes another request after resetting
 +       * aggregation and before reading the DB.
 +       */
 +      if (ufshcd_is_intr_aggr_allowed(hba))
 +              ufshcd_reset_intr_aggr(hba);
  
 -      ufshcd_clk_scaling_update_busy(hba);
 +      tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
  
 -      /* we might have free'd some tags above */
 -      wake_up(&hba->dev_cmd.tag_wq);
 +      if (completed_reqs) {
 +              __ufshcd_transfer_req_compl(hba, completed_reqs);
 +              return IRQ_HANDLED;
 +      } else {
 +              return IRQ_NONE;
 +      }
  }
  
  /**
@@@ -5703,7 -3273,7 +5704,7 @@@ static int ufshcd_disable_ee(struct ufs
  
        val = hba->ee_ctrl_mask & ~mask;
        val &= 0xFFFF; /* 2 bytes */
 -      err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +      err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask &= ~mask;
@@@ -5731,7 -3301,7 +5732,7 @@@ static int ufshcd_enable_ee(struct ufs_
  
        val = hba->ee_ctrl_mask | mask;
        val &= 0xFFFF; /* 2 bytes */
 -      err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +      err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask |= mask;
@@@ -5757,7 -3327,7 +5758,7 @@@ static int ufshcd_enable_auto_bkops(str
        if (hba->auto_bkops_enabled)
                goto out;
  
 -      err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to enable bkops %d\n",
        }
  
        hba->auto_bkops_enabled = true;
 +      trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
  
        /* No need of URGENT_BKOPS exception from the device */
        err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@@ -5807,7 -3376,7 +5808,7 @@@ static int ufshcd_disable_auto_bkops(st
                goto out;
        }
  
 -      err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to disable bkops %d\n",
        }
  
        hba->auto_bkops_enabled = false;
 +      trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
  out:
        return err;
  }
@@@ -5846,7 -3414,7 +5847,7 @@@ static void ufshcd_force_reset_auto_bko
  
  static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
  {
 -      return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 +      return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
                        QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
  }
  
   */
  static int ufshcd_urgent_bkops(struct ufs_hba *hba)
  {
 -      return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
 +      return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
  }
  
  static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
  {
 -      return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 +      return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
                        QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
  }
  
 +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
 +{
 +      int err;
 +      u32 curr_status = 0;
 +
 +      if (hba->is_urgent_bkops_lvl_checked)
 +              goto enable_auto_bkops;
 +
 +      err = ufshcd_get_bkops_status(hba, &curr_status);
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
 +                              __func__, err);
 +              goto out;
 +      }
 +
 +      /*
 +       * We are seeing that some devices are raising the urgent bkops
 +       * exception events even when BKOPS status doesn't indicate performace
 +       * impacted or critical. Handle these device by determining their urgent
 +       * bkops status at runtime.
 +       */
 +      if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
 +              dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
 +                              __func__, curr_status);
 +              /* update the current status as the urgent bkops level */
 +              hba->urgent_bkops_lvl = curr_status;
 +              hba->is_urgent_bkops_lvl_checked = true;
 +      }
 +
 +enable_auto_bkops:
 +      err = ufshcd_enable_auto_bkops(hba);
 +out:
 +      if (err < 0)
 +              dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 +                              __func__, err);
 +}
 +
  /**
   * ufshcd_exception_event_handler - handle exceptions raised by device
   * @work: pointer to work data
@@@ -5965,7 -3496,7 +5966,7 @@@ static void ufshcd_exception_event_hand
        hba = container_of(work, struct ufs_hba, eeh_work);
  
        pm_runtime_get_sync(hba->dev);
 -      scsi_block_requests(hba->host);
 +      ufshcd_scsi_block_requests(hba);
        err = ufshcd_get_ee_status(hba, &status);
        if (err) {
                dev_err(hba->dev, "%s: failed to get exception status %d\n",
        }
  
        status &= hba->ee_ctrl_mask;
 -      if (status & MASK_EE_URGENT_BKOPS) {
 -              err = ufshcd_urgent_bkops(hba);
 -              if (err < 0)
 -                      dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 -                                      __func__, err);
 -      }
 +
 +      if (status & MASK_EE_URGENT_BKOPS)
 +              ufshcd_bkops_exception_event_handler(hba);
 +
  out:
 -      scsi_unblock_requests(hba->host);
 -      pm_runtime_put_sync(hba->dev);
 +      ufshcd_scsi_unblock_requests(hba);
 +      pm_runtime_put(hba->dev);
        return;
  }
  
 +/* Complete requests that have door-bell cleared */
 +static void ufshcd_complete_requests(struct ufs_hba *hba)
 +{
 +      ufshcd_transfer_req_compl(hba);
 +      ufshcd_tmc_handler(hba);
 +}
 +
 +/**
 + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
 + *                            to recover from the DL NAC errors or not.
 + * @hba: per-adapter instance
 + *
 + * Returns true if error handling is required, false otherwise
 + */
 +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool err_handling = true;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
 +       * device fatal error and/or DL NAC & REPLAY timeout errors.
 +       */
 +      if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
 +              goto out;
 +
 +      if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
 +          ((hba->saved_err & UIC_ERROR) &&
 +           (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
 +              /*
 +               * we have to do error recovery but atleast silence the error
 +               * logs.
 +               */
 +              hba->silence_err_logs = true;
 +              goto out;
 +      }
 +
 +      if ((hba->saved_err & UIC_ERROR) &&
 +          (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
 +              int err;
 +              /*
 +               * wait for 50ms to see if we can get any other errors or not.
 +               */
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              msleep(50);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +              /*
 +               * now check if we have got any other severe errors other than
 +               * DL NAC error?
 +               */
 +              if ((hba->saved_err & INT_FATAL_ERRORS) ||
 +                  ((hba->saved_err & UIC_ERROR) &&
 +                  (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
 +                      if (((hba->saved_err & INT_FATAL_ERRORS) ==
 +                              DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
 +                                      ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
 +                              hba->silence_err_logs = true;
 +                      goto out;
 +              }
 +
 +              /*
 +               * As DL NAC is the only error received so far, send out NOP
 +               * command to confirm if link is still active or not.
 +               *   - If we don't get any response then do error recovery.
 +               *   - If we get response then clear the DL NAC error bit.
 +               */
 +
 +              /* silence the error logs from NOP command */
 +              hba->silence_err_logs = true;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              err = ufshcd_verify_dev_init(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->silence_err_logs = false;
 +
 +              if (err) {
 +                      hba->silence_err_logs = true;
 +                      goto out;
 +              }
 +
 +              /* Link seems to be alive hence ignore the DL NAC errors */
 +              if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
 +                      hba->saved_err &= ~UIC_ERROR;
 +              /* clear NAC error */
 +              hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
 +              if (!hba->saved_uic_err) {
 +                      err_handling = false;
 +                      goto out;
 +              }
 +              /*
 +               * there seems to be some errors other than NAC, so do error
 +               * recovery
 +               */
 +              hba->silence_err_logs = true;
 +      }
 +out:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return err_handling;
 +}
 +
  /**
   * ufshcd_err_handler - handle UFS errors that require s/w attention
   * @work: pointer to work structure
@@@ -6093,149 -3525,51 +6094,149 @@@ static void ufshcd_err_handler(struct w
  {
        struct ufs_hba *hba;
        unsigned long flags;
 -      u32 err_xfer = 0;
 -      u32 err_tm = 0;
 +      bool err_xfer = false, err_tm = false;
        int err = 0;
        int tag;
 +      bool needs_reset = false;
 +      bool clks_enabled = false;
  
        hba = container_of(work, struct ufs_hba, eh_work);
  
 -      pm_runtime_get_sync(hba->dev);
 -      ufshcd_hold(hba, false);
 -
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufsdbg_set_err_state(hba);
 +
 +      if (hba->ufshcd_state == UFSHCD_STATE_RESET)
                goto out;
 +
 +      /*
 +       * Make sure the clocks are ON before we proceed with err
 +       * handling. For the majority of cases err handler would be
 +       * run with clocks ON. There is a possibility that the err
 +       * handler was scheduled due to auto hibern8 error interrupt,
 +       * in which case the clocks could be gated or be in the
 +       * process of gating when the err handler runs.
 +       */
 +      if (unlikely((hba->clk_gating.state != CLKS_ON) &&
 +          ufshcd_is_auto_hibern8_supported(hba))) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
 +              ufshcd_hold(hba, false);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              clks_enabled = true;
        }
  
        hba->ufshcd_state = UFSHCD_STATE_RESET;
        ufshcd_set_eh_in_progress(hba);
  
        /* Complete requests that have door-bell cleared by h/w */
 -      ufshcd_transfer_req_compl(hba);
 -      ufshcd_tmc_handler(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_complete_requests(hba);
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 +              bool ret;
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
 +              ret = ufshcd_quirk_dl_nac_errors(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              if (!ret)
 +                      goto skip_err_handling;
 +      }
 +
 +      /*
 +       * Dump controller state before resetting. Transfer requests state
 +       * will be dump as part of the request completion.
 +       */
 +      if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
 +              dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
 +                      __func__, hba->saved_err, hba->saved_uic_err);
 +              if (!hba->silence_err_logs) {
 +                      /* release lock as print host regs sleeps */
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      ufshcd_print_host_regs(hba);
 +                      ufshcd_print_host_state(hba);
 +                      ufshcd_print_pwr_info(hba);
 +                      ufshcd_print_tmrs(hba, hba->outstanding_tasks);
 +                      ufshcd_print_cmd_log(hba);
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +              }
 +      }
 +
 +      if ((hba->saved_err & INT_FATAL_ERRORS)
 +          || hba->saved_ce_err || hba->force_host_reset ||
 +          ((hba->saved_err & UIC_ERROR) &&
 +          (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
 +                                 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
 +                                 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
 +              needs_reset = true;
  
 +      /*
 +       * if host reset is required then skip clearing the pending
 +       * transfers forcefully because they will automatically get
 +       * cleared after link startup.
 +       */
 +      if (needs_reset)
 +              goto skip_pending_xfer_clear;
 +
 +      /* release lock as clear command might sleep */
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
        /* Clear pending transfer requests */
 -      for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
 -              if (ufshcd_clear_cmd(hba, tag))
 -                      err_xfer |= 1 << tag;
 +      for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
 +              if (ufshcd_clear_cmd(hba, tag)) {
 +                      err_xfer = true;
 +                      goto lock_skip_pending_xfer_clear;
 +              }
 +      }
  
        /* Clear pending task management requests */
 -      for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
 -              if (ufshcd_clear_tm_cmd(hba, tag))
 -                      err_tm |= 1 << tag;
 +      for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
 +              if (ufshcd_clear_tm_cmd(hba, tag)) {
 +                      err_tm = true;
 +                      goto lock_skip_pending_xfer_clear;
 +              }
 +      }
  
 -      /* Complete the requests that are cleared by s/w */
 +lock_skip_pending_xfer_clear:
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_transfer_req_compl(hba);
 -      ufshcd_tmc_handler(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      /* Complete the requests that are cleared by s/w */
 +      ufshcd_complete_requests(hba);
 +
 +      if (err_xfer || err_tm)
 +              needs_reset = true;
 +
 +skip_pending_xfer_clear:
        /* Fatal errors need reset */
 -      if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
 -                      ((hba->saved_err & UIC_ERROR) &&
 -                       (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
 +      if (needs_reset) {
 +              unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
 +
 +              if (hba->saved_err & INT_FATAL_ERRORS)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_INT_FATAL_ERRORS);
 +              if (hba->saved_ce_err)
 +                      ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
 +
 +              if (hba->saved_err & UIC_ERROR)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_INT_UIC_ERROR);
 +
 +              if (err_xfer || err_tm)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_CLEAR_PEND_XFER_TM);
 +
 +              /*
 +               * ufshcd_reset_and_restore() does the link reinitialization
 +               * which will need atleast one empty doorbell slot to send the
 +               * device management commands (NOP and query commands).
 +               * If there is no slot empty at this moment then free up last
 +               * slot forcefully.
 +               */
 +              if (hba->outstanding_reqs == max_doorbells)
 +                      __ufshcd_transfer_req_compl(hba,
 +                                                  (1UL << (hba->nutrs - 1)));
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
                err = ufshcd_reset_and_restore(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
                if (err) {
                        dev_err(hba->dev, "%s: reset and restore failed\n",
                                        __func__);
                scsi_report_bus_reset(hba->host, 0);
                hba->saved_err = 0;
                hba->saved_uic_err = 0;
 +              hba->saved_ce_err = 0;
 +              hba->force_host_reset = false;
        }
 +
 +skip_err_handling:
 +      if (!needs_reset) {
 +              hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 +              if (hba->saved_err || hba->saved_uic_err)
 +                      dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
 +                          __func__, hba->saved_err, hba->saved_uic_err);
 +      }
 +
 +      hba->silence_err_logs = false;
 +
 +      if (clks_enabled) {
 +              __ufshcd_release(hba, false);
 +              hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
 +      }
 +out:
        ufshcd_clear_eh_in_progress(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
 +              u32 reg)
 +{
 +      reg_hist->reg[reg_hist->pos] = reg;
 +      reg_hist->tstamp[reg_hist->pos] = ktime_get();
 +      reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
 +}
 +
 +static void ufshcd_rls_handler(struct work_struct *work)
 +{
 +      struct ufs_hba *hba;
 +      int ret = 0;
 +      u32 mode;
 +
 +      hba = container_of(work, struct ufs_hba, rls_work);
 +      ufshcd_scsi_block_requests(hba);
 +      pm_runtime_get_sync(hba->dev);
 +      ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      if (ret) {
 +              dev_err(hba->dev,
 +                      "Timed out (%d) waiting for DB to clear\n",
 +                      ret);
 +              goto out;
 +      }
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
 +      if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
 +              hba->restore_needed = true;
 +
 +      if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
 +              hba->restore_needed = true;
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
 +      if (hba->pwr_info.gear_rx != mode)
 +              hba->restore_needed = true;
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
 +      if (hba->pwr_info.gear_tx != mode)
 +              hba->restore_needed = true;
 +
 +      if (hba->restore_needed)
 +              ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
 +
 +      if (ret)
 +              dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 +                      __func__, ret);
 +      else
 +              hba->restore_needed = false;
  
  out:
 -      scsi_unblock_requests(hba->host);
 -      ufshcd_release(hba);
 +      ufshcd_scsi_unblock_requests(hba);
        pm_runtime_put_sync(hba->dev);
  }
  
  /**
   * ufshcd_update_uic_error - check and set fatal UIC error flags.
   * @hba: per-adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_update_uic_error(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
  {
        u32 reg;
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      /* PHY layer lane error */
 +      reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
 +      if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
 +          (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
 +              /*
 +               * To know whether this error is fatal or not, DB timeout
 +               * must be checked but this error is handled separately.
 +               */
 +              dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
 +                              __func__, reg);
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
 +
 +              /*
 +               * Don't ignore LINERESET indication during hibern8
 +               * enter operation.
 +               */
 +              if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
 +                      struct uic_command *cmd = hba->active_uic_cmd;
 +
 +                      if (cmd) {
 +                              if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
 +                                      dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
 +                                              __func__, reg);
 +                                      hba->full_init_linereset = true;
 +                              }
 +                      }
 +                      if (!hba->full_init_linereset)
 +                              schedule_work(&hba->rls_work);
 +              }
 +              retval |= IRQ_HANDLED;
 +      }
  
        /* PA_INIT_ERROR is fatal and needs UIC reset */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
 -      if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
 -              hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
 +      if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
 +          (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
 +
 +              if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
 +                      hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
 +              } else if (hba->dev_quirks &
 +                         UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 +                      if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
 +                              hba->uic_error |=
 +                                      UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
 +                      else if (reg &
 +                               UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
 +                              hba->uic_error |=
 +                                      UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
 +              }
 +              retval |= IRQ_HANDLED;
 +      }
  
        /* UIC NL/TL/DME errors needs software retry */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
 -      if (reg)
 +      if ((reg & UIC_NETWORK_LAYER_ERROR) &&
 +          (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
                hba->uic_error |= UFSHCD_UIC_NL_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
 -      if (reg)
 +      if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
 +          (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
                hba->uic_error |= UFSHCD_UIC_TL_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
 -      if (reg)
 +      if ((reg & UIC_DME_ERROR) &&
 +          (reg & UIC_DME_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
                hba->uic_error |= UFSHCD_UIC_DME_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
                        __func__, hba->uic_error);
 +      return retval;
  }
  
  /**
   * ufshcd_check_errors - Check for errors that need s/w attention
   * @hba: per-adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_check_errors(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
  {
        bool queue_eh_work = false;
 +      irqreturn_t retval = IRQ_NONE;
  
 -      if (hba->errors & INT_FATAL_ERRORS)
 +      if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
                queue_eh_work = true;
  
        if (hba->errors & UIC_ERROR) {
                hba->uic_error = 0;
 -              ufshcd_update_uic_error(hba);
 +              retval = ufshcd_update_uic_error(hba);
                if (hba->uic_error)
                        queue_eh_work = true;
        }
  
        if (queue_eh_work) {
 +              /*
 +               * update the transfer error masks to sticky bits, let's do this
 +               * irrespective of current ufshcd_state.
 +               */
 +              hba->saved_err |= hba->errors;
 +              hba->saved_uic_err |= hba->uic_error;
 +              hba->saved_ce_err |= hba->ce_error;
 +
                /* handle fatal errors only when link is functional */
                if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
 -                      /* block commands from scsi mid-layer */
 -                      scsi_block_requests(hba->host);
 -
 -                      /* transfer error masks to sticky bits */
 -                      hba->saved_err |= hba->errors;
 -                      hba->saved_uic_err |= hba->uic_error;
 +                      /*
 +                       * Set error handling in progress flag early so that we
 +                       * don't issue new requests any more.
 +                       */
 +                      ufshcd_set_eh_in_progress(hba);
  
                        hba->ufshcd_state = UFSHCD_STATE_ERROR;
                        schedule_work(&hba->eh_work);
                }
 +              retval |= IRQ_HANDLED;
        }
        /*
         * if (!queue_eh_work) -
         * itself without s/w intervention or errors that will be
         * handled by the SCSI core layer.
         */
 +      return retval;
  }
  
  /**
   * ufshcd_tmc_handler - handle task management function completion
   * @hba: per adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_tmc_handler(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
  {
        u32 tm_doorbell;
  
        tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
        hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
 -      wake_up(&hba->tm_wq);
 +      if (hba->tm_condition) {
 +              wake_up(&hba->tm_wq);
 +              return IRQ_HANDLED;
 +      } else {
 +              return IRQ_NONE;
 +      }
  }
  
  /**
   * ufshcd_sl_intr - Interrupt service routine
   * @hba: per adapter instance
   * @intr_status: contains interrupts generated by the controller
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  {
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_INTR, intr_status, &intr_status);
 +
 +      ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
 +
        hba->errors = UFSHCD_ERROR_MASK & intr_status;
 -      if (hba->errors)
 -              ufshcd_check_errors(hba);
 +      if (hba->errors || hba->ce_error)
 +              retval |= ufshcd_check_errors(hba);
  
        if (intr_status & UFSHCD_UIC_MASK)
 -              ufshcd_uic_cmd_compl(hba, intr_status);
 +              retval |= ufshcd_uic_cmd_compl(hba, intr_status);
  
        if (intr_status & UTP_TASK_REQ_COMPL)
 -              ufshcd_tmc_handler(hba);
 +              retval |= ufshcd_tmc_handler(hba);
  
        if (intr_status & UTP_TRANSFER_REQ_COMPL)
 -              ufshcd_transfer_req_compl(hba);
 +              retval |= ufshcd_transfer_req_compl(hba);
 +
 +      return retval;
  }
  
  /**
   * @irq: irq number
   * @__hba: pointer to adapter instance
   *
 - * Returns IRQ_HANDLED - If interrupt is valid
 - *            IRQ_NONE - If invalid interrupt
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
  static irqreturn_t ufshcd_intr(int irq, void *__hba)
  {
 -      u32 intr_status;
 +      u32 intr_status, enabled_intr_status;
        irqreturn_t retval = IRQ_NONE;
        struct ufs_hba *hba = __hba;
 +      int retries = hba->nutrs;
  
        spin_lock(hba->host->host_lock);
        intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +      hba->ufs_stats.last_intr_status = intr_status;
 +      hba->ufs_stats.last_intr_ts = ktime_get();
 +      /*
 +       * There could be max of hba->nutrs reqs in flight and in worst case
 +       * if the reqs get finished 1 by 1 after the interrupt status is
 +       * read, make sure we handle them by checking the interrupt status
 +       * again in a loop until we process all of the reqs before returning.
 +       */
 +      do {
 +              enabled_intr_status =
 +                      intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 +              if (intr_status)
 +                      ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 +              if (enabled_intr_status)
 +                      retval |= ufshcd_sl_intr(hba, enabled_intr_status);
  
 -      if (intr_status) {
 -              ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 -              ufshcd_sl_intr(hba, intr_status);
 -              retval = IRQ_HANDLED;
 +              intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +      } while (intr_status && --retries);
 +
 +      if (retval == IRQ_NONE) {
 +              dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
 +                                      __func__, intr_status);
 +              ufshcd_hex_dump("host regs: ", hba->mmio_base,
 +                                      UFSHCI_REG_SPACE_SIZE);
        }
 +
        spin_unlock(hba->host->host_lock);
        return retval;
  }
@@@ -6595,7 -3737,7 +6596,7 @@@ static int ufshcd_clear_tm_cmd(struct u
        /* poll for max. 1 sec to clear door bell register by h/w */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TASK_REQ_DOOR_BELL,
 -                      mask, 0, 1000, 1000);
 +                      mask, 0, 1000, 1000, true);
  out:
        return err;
  }
@@@ -6629,8 -3771,7 +6630,8 @@@ static int ufshcd_issue_tm_cmd(struct u
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
        wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
 +      ufshcd_hold_all(hba);
  
        spin_lock_irqsave(host->host_lock, flags);
        task_req_descp = hba->utmrdl_base_addr;
  
        /* send command to the controller */
        __set_bit(free_slot, &hba->outstanding_tasks);
 +
 +      /* Make sure descriptors are ready before ringing the task doorbell */
 +      wmb();
 +
        ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
 +      /* Make sure that doorbell is committed immediately */
 +      wmb();
  
        spin_unlock_irqrestore(host->host_lock, flags);
  
        clear_bit(free_slot, &hba->tm_condition);
        ufshcd_put_tm_slot(hba, free_slot);
        wake_up(&hba->tm_tag_wq);
 +      hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
  
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
  
@@@ -6715,7 -3849,6 +6716,7 @@@ static int ufshcd_eh_device_reset_handl
        hba = shost_priv(host);
        tag = cmd->request->tag;
  
 +      ufshcd_print_cmd_log(hba);
        lrbp = &hba->lrb[tag];
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
        spin_lock_irqsave(host->host_lock, flags);
        ufshcd_transfer_req_compl(hba);
        spin_unlock_irqrestore(host->host_lock, flags);
 +
  out:
 +      hba->req_abort_count = 0;
        if (!err) {
                err = SUCCESS;
        } else {
        return err;
  }
  
 +static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      int tag;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutrs) {
 +              lrbp = &hba->lrb[tag];
 +              lrbp->req_abort_skip = true;
 +      }
 +}
 +
  /**
   * ufshcd_abort - abort a specific command
   * @cmd: SCSI command pointer
@@@ -6785,87 -3905,31 +6786,87 @@@ static int ufshcd_abort(struct scsi_cmn
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
 +      if (!ufshcd_valid_tag(hba, tag)) {
 +              dev_err(hba->dev,
 +                      "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
 +                      __func__, tag, cmd, cmd->request);
 +              BUG();
 +      }
  
 -      ufshcd_hold(hba, false);
 +      lrbp = &hba->lrb[tag];
 +
 +      ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
 +
 +      /*
 +       * Task abort to the device W-LUN is illegal. When this command
 +       * will fail, due to spec violation, scsi err handling next step
 +       * will be to send LU reset which, again, is a spec violation.
 +       * To avoid these unnecessary/illegal step we skip to the last error
 +       * handling stage: reset and restore.
 +       */
 +      if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
 +              return ufshcd_eh_host_reset_handler(cmd);
 +
 +      ufshcd_hold_all(hba);
 +      reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* If command is already aborted/completed, return SUCCESS */
 -      if (!(test_bit(tag, &hba->outstanding_reqs)))
 +      if (!(test_bit(tag, &hba->outstanding_reqs))) {
 +              dev_err(hba->dev,
 +                      "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
 +                      __func__, tag, hba->outstanding_reqs, reg);
                goto out;
 +      }
  
 -      reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        if (!(reg & (1 << tag))) {
                dev_err(hba->dev,
                "%s: cmd was completed, but without a notifying intr, tag = %d",
                __func__, tag);
        }
  
 -      lrbp = &hba->lrb[tag];
 +      /* Print Transfer Request of aborted task */
 +      dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
 +
 +      /*
 +       * Print detailed info about aborted request.
 +       * As more than one request might get aborted at the same time,
 +       * print full information only for the first aborted request in order
 +       * to reduce repeated printouts. For other aborted requests only print
 +       * basic details.
 +       */
 +      scsi_print_command(cmd);
 +      if (!hba->req_abort_count) {
 +              ufshcd_print_fsm_state(hba);
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_trs(hba, 1 << tag, true);
 +      } else {
 +              ufshcd_print_trs(hba, 1 << tag, false);
 +      }
 +      hba->req_abort_count++;
 +
 +
 +      /* Skip task abort in case previous aborts failed and report failure */
 +      if (lrbp->req_abort_skip) {
 +              err = -EIO;
 +              goto out;
 +      }
 +
        for (poll_cnt = 100; poll_cnt; poll_cnt--) {
                err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                                UFS_QUERY_TASK, &resp);
                if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
                        /* cmd pending in the device */
 +                      dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
 +                              __func__, tag);
                        break;
                } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                        /*
                         * cmd not pending in the device, check if it is
                         * in transition.
                         */
 +                      dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
 +                              __func__, tag);
                        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
                        if (reg & (1 << tag)) {
                                /* sleep for max. 200us to stabilize */
                                continue;
                        }
                        /* command completed already */
 +                      dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
 +                              __func__, tag);
                        goto out;
                } else {
 +                      dev_err(hba->dev,
 +                              "%s: no response from device. tag = %d, err %d",
 +                              __func__, tag, err);
                        if (!err)
                                err = resp; /* service response error */
                        goto out;
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                        UFS_ABORT_TASK, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
 -              if (!err)
 +              if (!err) {
                        err = resp; /* service response error */
 +                      dev_err(hba->dev, "%s: issued. tag = %d, err %d",
 +                              __func__, tag, err);
 +              }
                goto out;
        }
  
        err = ufshcd_clear_cmd(hba, tag);
 -      if (err)
 +      if (err) {
 +              dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
 +                      __func__, tag, err);
                goto out;
 +      }
  
        scsi_dma_unmap(cmd);
  
        spin_lock_irqsave(host->host_lock, flags);
 -      __clear_bit(tag, &hba->outstanding_reqs);
 +      ufshcd_outstanding_req_clear(hba, tag);
        hba->lrb[tag].cmd = NULL;
        spin_unlock_irqrestore(host->host_lock, flags);
  
                err = SUCCESS;
        } else {
                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
 +              ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
                err = FAILED;
        }
  
        /*
 -       * This ufshcd_release() corresponds to the original scsi cmd that got
 -       * aborted here (as we won't get any IRQ for it).
 +       * This ufshcd_release_all() corresponds to the original scsi cmd that
 +       * got aborted here (as we won't get any IRQ for it).
         */
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
  
@@@ -6953,12 -4005,9 +6954,12 @@@ static int ufshcd_host_reset_and_restor
  
        /* Reset the host controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_hba_stop(hba);
 +      ufshcd_hba_stop(hba, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      /* scale up clocks to max frequency before full reinitialization */
 +      ufshcd_set_clk_freq(hba, true);
 +
        err = ufshcd_hba_enable(hba);
        if (err)
                goto out;
        /* Establish the link again and restore the device */
        err = ufshcd_probe_hba(hba);
  
 -      if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
 +      if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
                err = -EIO;
 +              goto out;
 +      }
 +
 +      if (!err) {
 +              err = ufshcd_vops_crypto_engine_reset(hba);
 +              if (err) {
 +                      dev_err(hba->dev,
 +                              "%s: failed to reset crypto engine %d\n",
 +                              __func__, err);
 +                      goto out;
 +              }
 +      }
 +
  out:
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@@ -7004,26 -4040,10 +7005,26 @@@ static int ufshcd_reset_and_restore(str
        int retries = MAX_HOST_RESET_RETRIES;
  
        do {
 +              err = ufshcd_vops_full_reset(hba);
 +              if (err)
 +                      dev_warn(hba->dev, "%s: full reset returned %d\n",
 +                               __func__, err);
 +
 +              err = ufshcd_reset_device(hba);
 +              if (err)
 +                      dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 +                               __func__, err);
 +
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
  
        /*
 +       * There is no point proceeding even after failing
 +       * to recover after multiple retries.
 +       */
 +      if (err)
 +              BUG();
 +      /*
         * After reset the door-bell might be cleared, complete
         * outstanding requests in s/w here.
         */
   */
  static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
  {
 -      int err;
 +      int err = SUCCESS;
        unsigned long flags;
        struct ufs_hba *hba;
  
        hba = shost_priv(cmd->device->host);
  
 -      ufshcd_hold(hba, false);
        /*
         * Check if there is any race with fatal error handling.
         * If so, wait for it to complete. Even though fatal error
                                hba->ufshcd_state == UFSHCD_STATE_RESET))
                        break;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
                flush_work(&hba->eh_work);
        } while (1);
  
 -      hba->ufshcd_state = UFSHCD_STATE_RESET;
 -      ufshcd_set_eh_in_progress(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      /*
 +       * we don't know if previous reset had really reset the host controller
 +       * or not. So let's force reset here to be sure.
 +       */
 +      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +      hba->force_host_reset = true;
 +      schedule_work(&hba->eh_work);
  
 -      err = ufshcd_reset_and_restore(hba);
 +      /* wait for the reset work to finish */
 +      do {
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
 +              flush_work(&hba->eh_work);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (1);
  
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (!err) {
 -              err = SUCCESS;
 -              hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 -      } else {
 +      if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
 +            ufshcd_is_link_active(hba))) {
                err = FAILED;
                hba->ufshcd_state = UFSHCD_STATE_ERROR;
        }
 -      ufshcd_clear_eh_in_progress(hba);
 +
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 -      ufshcd_release(hba);
        return err;
  }
  
@@@ -7203,9 -4215,9 +7204,9 @@@ static void ufshcd_init_icc_levels(stru
        dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
                        __func__, hba->init_prefetch_data.icc_level);
  
 -      ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 -                      QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 -                      &hba->init_prefetch_data.icc_level);
 +      ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +              QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 +              &hba->init_prefetch_data.icc_level);
  
        if (ret)
                dev_err(hba->dev,
  }
  
  /**
 + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
 + * @hba: per-adapter instance
 + *
 + * PA_TActivate parameter can be tuned manually if UniPro version is less than
 + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
 + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
 + * the hibern8 exit latency.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
 +
 +      if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
 +              return 0;
 +
 +      ret = ufshcd_dme_peer_get(hba,
 +                                UIC_ARG_MIB_SEL(
 +                                      RX_MIN_ACTIVATETIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                                &peer_rx_min_activatetime);
 +      if (ret)
 +              goto out;
 +
 +      /* make sure proper unit conversion is applied */
 +      tuned_pa_tactivate =
 +              ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
 +               / PA_TACTIVATE_TIME_UNIT_US);
 +      ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                           tuned_pa_tactivate);
 +
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
 + * @hba: per-adapter instance
 + *
 + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
 + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
 + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
 + * This optimal value can help reduce the hibern8 exit latency.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
 +      u32 max_hibern8_time, tuned_pa_hibern8time;
 +
 +      ret = ufshcd_dme_get(hba,
 +                           UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 +                                &local_tx_hibern8_time_cap);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba,
 +                                UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                                &peer_rx_hibern8_time_cap);
 +      if (ret)
 +              goto out;
 +
 +      max_hibern8_time = max(local_tx_hibern8_time_cap,
 +                             peer_rx_hibern8_time_cap);
 +      /* make sure proper unit conversion is applied */
 +      tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
 +                              / PA_HIBERN8_TIME_UNIT_US);
 +      ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
 +                           tuned_pa_hibern8time);
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
 + * less than device PA_TACTIVATE time.
 + * @hba: per-adapter instance
 + *
 + * Some UFS devices require host PA_TACTIVATE to be lower than device
 + * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
 + * for such devices.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 granularity, peer_granularity;
 +      u32 pa_tactivate, peer_pa_tactivate;
 +      u32 pa_tactivate_us, peer_pa_tactivate_us;
 +      u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
 +
 +      ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 +                                &granularity);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 +                                &peer_granularity);
 +      if (ret)
 +              goto out;
 +
 +      if ((granularity < PA_GRANULARITY_MIN_VAL) ||
 +          (granularity > PA_GRANULARITY_MAX_VAL)) {
 +              dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
 +                      __func__, granularity);
 +              return -EINVAL;
 +      }
 +
 +      if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
 +          (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
 +              dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
 +                      __func__, peer_granularity);
 +              return -EINVAL;
 +      }
 +
 +      ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                                &peer_pa_tactivate);
 +      if (ret)
 +              goto out;
 +
 +      pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
 +      peer_pa_tactivate_us = peer_pa_tactivate *
 +                           gran_to_us_table[peer_granularity - 1];
 +
 +      if (pa_tactivate_us > peer_pa_tactivate_us) {
 +              u32 new_peer_pa_tactivate;
 +
 +              new_peer_pa_tactivate = pa_tactivate_us /
 +                                    gran_to_us_table[peer_granularity - 1];
 +              new_peer_pa_tactivate++;
 +              ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                                        new_peer_pa_tactivate);
 +      }
 +
 +out:
 +      return ret;
 +}
 +
 +static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 +{
 +      if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
 +              ufshcd_tune_pa_tactivate(hba);
 +              ufshcd_tune_pa_hibern8time(hba);
 +      }
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
 +              /* set 1ms timeout for PA_TACTIVATE */
 +              ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
 +              ufshcd_quirk_tune_host_pa_tactivate(hba);
 +
 +      ufshcd_vops_apply_dev_quirks(hba);
 +}
 +
 +static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
 +{
 +      int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
 +
 +      memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
 +
 +      hba->req_abort_count = 0;
 +}
 +
 +static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
 +{
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
 +              if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
 +                  UIC_LINK_OFF_STATE) {
 +                      hba->rpm_lvl =
 +                              ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                              UFS_SLEEP_PWR_MODE,
 +                                              UIC_LINK_HIBERN8_STATE);
 +                      dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
 +                              hba->rpm_lvl);
 +              }
 +              if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
 +                  UIC_LINK_OFF_STATE) {
 +                      hba->spm_lvl =
 +                              ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                              UFS_SLEEP_PWR_MODE,
 +                                              UIC_LINK_HIBERN8_STATE);
 +                      dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
 +                              hba->spm_lvl);
 +              }
 +      }
 +}
 +
 +/**
   * ufshcd_probe_hba - probe hba to detect device and initialize
   * @hba: per-adapter instance
   *
  static int ufshcd_probe_hba(struct ufs_hba *hba)
  {
        int ret;
 +      ktime_t start = ktime_get();
  
        ret = ufshcd_link_startup(hba);
        if (ret)
                goto out;
  
 -      ufshcd_init_pwr_info(hba);
 +      /* Debug counters initialization */
 +      ufshcd_clear_dbg_ufs_stats(hba);
 +      /* set the default level for urgent bkops */
 +      hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
 +      hba->is_urgent_bkops_lvl_checked = false;
  
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
        if (ret)
                goto out;
  
 +      ufs_advertise_fixup_device(hba);
 +      ufshcd_tune_unipro_params(hba);
 +
 +      ufshcd_apply_pm_quirks(hba);
 +      ret = ufshcd_set_vccq_rail_unused(hba,
 +              (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
 +      if (ret)
 +              goto out;
 +
        /* UFS device is also active now */
        ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
 -      hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        hba->wlun_dev_clr_ua = true;
  
        if (ufshcd_get_max_pwr_mode(hba)) {
                        __func__);
        } else {
                ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
 -              if (ret)
 +              if (ret) {
                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
                                        __func__, ret);
 +                      goto out;
 +              }
        }
  
 +      /* set the state as operational after switching to desired gear */
 +      hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        /*
         * If we are in error handling context or in power management callbacks
         * context, no need to scan the host
  
                /* clear any previous UFS device information */
                memset(&hba->dev_info, 0, sizeof(hba->dev_info));
 -              if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 -                                     QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
 +              if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 +                              QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
                        hba->dev_info.f_power_on_wp_en = flag;
  
                if (!hba->is_init_prefetch)
                if (ufshcd_scsi_add_wlus(hba))
                        goto out;
  
 +              /* Initialize devfreq after UFS device is detected */
 +              if (ufshcd_is_clkscaling_supported(hba)) {
 +                      memcpy(&hba->clk_scaling.saved_pwr_info.info,
 +                          &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
 +                      hba->clk_scaling.saved_pwr_info.is_valid = true;
 +                      hba->clk_scaling.is_scaled_up = true;
 +                      if (!hba->devfreq) {
 +                              hba->devfreq = devfreq_add_device(hba->dev,
 +                                                      &ufs_devfreq_profile,
 +                                                      "simple_ondemand",
 +                                                      gov_data);
 +                              if (IS_ERR(hba->devfreq)) {
 +                                      ret = PTR_ERR(hba->devfreq);
 +                                      dev_err(hba->dev, "Unable to register with devfreq %d\n",
 +                                              ret);
 +                                      goto out;
 +                              }
 +                      }
 +                      hba->clk_scaling.is_allowed = true;
 +              }
 +
                scsi_scan_host(hba->host);
                pm_runtime_put_sync(hba->dev);
        }
  
 -      if (!hba->is_init_prefetch)
 -              hba->is_init_prefetch = true;
 +      if (!hba->is_init_prefetch)
 +              hba->is_init_prefetch = true;
 +
 +      /*
 +       * Enable auto hibern8 if supported, after full host and
 +       * device initialization.
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba))
 +              ufshcd_set_auto_hibern8_timer(hba,
 +                                    hba->hibern8_on_idle.delay_ms);
 +out:
 +      /*
 +       * If we failed to initialize the device or the device is not
 +       * present, turn off the power/clocks etc.
 +       */
 +      if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
 +              pm_runtime_put_sync(hba->dev);
 +              ufshcd_hba_exit(hba);
 +      }
 +
 +      trace_ufshcd_init(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_async_scan - asynchronous execution for probing hba
 + * @data: data pointer to pass to this function
 + * @cookie: cookie data
 + */
 +static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 +{
 +      struct ufs_hba *hba = (struct ufs_hba *)data;
 +
 +      /*
 +       * Don't allow clock gating and hibern8 enter for faster device
 +       * detection.
 +       */
 +      ufshcd_hold_all(hba);
 +      ufshcd_probe_hba(hba);
 +      ufshcd_release_all(hba);
 +}
 +
 +/**
 + * ufshcd_query_ioctl - perform user read queries
 + * @hba: per-adapter instance
 + * @lun: used for lun specific queries
 + * @buffer: user space buffer for reading and submitting query data and params
 + * @return: 0 for success negative error code otherwise
 + *
 + * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
 + * It will read the opcode, idn and buf_length parameters, and, put the
 + * response in the buffer field while updating the used size in buf_length.
 + */
 +static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
 +{
 +      struct ufs_ioctl_query_data *ioctl_data;
 +      int err = 0;
 +      int length = 0;
 +      void *data_ptr;
 +      bool flag;
 +      u32 att;
 +      u8 index;
 +      u8 *desc = NULL;
 +
 +      ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
 +      if (!ioctl_data) {
 +              dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
 +                              sizeof(struct ufs_ioctl_query_data));
 +              err = -ENOMEM;
 +              goto out;
 +      }
 +
 +      /* extract params from user buffer */
 +      err = copy_from_user(ioctl_data, buffer,
 +                      sizeof(struct ufs_ioctl_query_data));
 +      if (err) {
 +              dev_err(hba->dev,
 +                      "%s: Failed copying buffer from user, err %d\n",
 +                      __func__, err);
 +              goto out_release_mem;
 +      }
 +
 +      /* verify legal parameters & send query */
 +      switch (ioctl_data->opcode) {
 +      case UPIU_QUERY_OPCODE_READ_DESC:
 +              switch (ioctl_data->idn) {
 +              case QUERY_DESC_IDN_DEVICE:
 +              case QUERY_DESC_IDN_CONFIGURAION:
 +              case QUERY_DESC_IDN_INTERCONNECT:
 +              case QUERY_DESC_IDN_GEOMETRY:
 +              case QUERY_DESC_IDN_POWER:
 +                      index = 0;
 +                      break;
 +              case QUERY_DESC_IDN_UNIT:
 +                      if (!ufs_is_valid_unit_desc_lun(lun)) {
 +                              dev_err(hba->dev,
 +                                      "%s: No unit descriptor for lun 0x%x\n",
 +                                      __func__, lun);
 +                              err = -EINVAL;
 +                              goto out_release_mem;
 +                      }
 +                      index = lun;
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              length = min_t(int, QUERY_DESC_MAX_SIZE,
 +                              ioctl_data->buf_size);
 +              desc = kzalloc(length, GFP_KERNEL);
 +              if (!desc) {
 +                      dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 +                                      __func__, length);
 +                      err = -ENOMEM;
 +                      goto out_release_mem;
 +              }
 +              err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
 +                              ioctl_data->idn, index, 0, desc, &length);
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_ATTR:
 +              switch (ioctl_data->idn) {
 +              case QUERY_ATTR_IDN_BOOT_LU_EN:
 +              case QUERY_ATTR_IDN_POWER_MODE:
 +              case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
 +              case QUERY_ATTR_IDN_OOO_DATA_EN:
 +              case QUERY_ATTR_IDN_BKOPS_STATUS:
 +              case QUERY_ATTR_IDN_PURGE_STATUS:
 +              case QUERY_ATTR_IDN_MAX_DATA_IN:
 +              case QUERY_ATTR_IDN_MAX_DATA_OUT:
 +              case QUERY_ATTR_IDN_REF_CLK_FREQ:
 +              case QUERY_ATTR_IDN_CONF_DESC_LOCK:
 +              case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
 +              case QUERY_ATTR_IDN_EE_CONTROL:
 +              case QUERY_ATTR_IDN_EE_STATUS:
 +              case QUERY_ATTR_IDN_SECONDS_PASSED:
 +                      index = 0;
 +                      break;
 +              case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
 +              case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
 +                      index = lun;
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
 +                                      index, 0, &att);
 +              break;
 +
 +      case UPIU_QUERY_OPCODE_WRITE_ATTR:
 +              err = copy_from_user(&att,
 +                              buffer + sizeof(struct ufs_ioctl_query_data),
 +                              sizeof(u32));
 +              if (err) {
 +                      dev_err(hba->dev,
 +                              "%s: Failed copying buffer from user, err %d\n",
 +                              __func__, err);
 +                      goto out_release_mem;
 +              }
 +
 +              switch (ioctl_data->idn) {
 +              case QUERY_ATTR_IDN_BOOT_LU_EN:
 +                      index = 0;
 +                      if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
 +                              dev_err(hba->dev,
 +                                      "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
 +                                      __func__, ioctl_data->opcode,
 +                                      (unsigned int)ioctl_data->idn, att);
 +                              err = -EINVAL;
 +                              goto out_release_mem;
 +                      }
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_attr(hba, ioctl_data->opcode,
 +                                      ioctl_data->idn, index, 0, &att);
 +              break;
 +
 +      case UPIU_QUERY_OPCODE_READ_FLAG:
 +              switch (ioctl_data->idn) {
 +              case QUERY_FLAG_IDN_FDEVICEINIT:
 +              case QUERY_FLAG_IDN_PERMANENT_WPE:
 +              case QUERY_FLAG_IDN_PWR_ON_WPE:
 +              case QUERY_FLAG_IDN_BKOPS_EN:
 +              case QUERY_FLAG_IDN_PURGE_ENABLE:
 +              case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
 +              case QUERY_FLAG_IDN_BUSY_RTC:
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
 +                              ioctl_data->idn, &flag);
 +              break;
 +      default:
 +              goto out_einval;
 +      }
 +
 +      if (err) {
 +              dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
 +                              ioctl_data->idn);
 +              goto out_release_mem;
 +      }
 +
 +      /*
 +       * copy response data
 +       * As we might end up reading less data then what is specified in
 +       * "ioctl_data->buf_size". So we are updating "ioctl_data->
 +       * buf_size" to what exactly we have read.
 +       */
 +      switch (ioctl_data->opcode) {
 +      case UPIU_QUERY_OPCODE_READ_DESC:
 +              ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
 +              data_ptr = desc;
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_ATTR:
 +              ioctl_data->buf_size = sizeof(u32);
 +              data_ptr = &att;
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_FLAG:
 +              ioctl_data->buf_size = 1;
 +              data_ptr = &flag;
 +              break;
 +      case UPIU_QUERY_OPCODE_WRITE_ATTR:
 +              goto out_release_mem;
 +      default:
 +              goto out_einval;
 +      }
 +
 +      /* copy to user */
 +      err = copy_to_user(buffer, ioctl_data,
 +                      sizeof(struct ufs_ioctl_query_data));
 +      if (err)
 +              dev_err(hba->dev, "%s: Failed copying back to user.\n",
 +                      __func__);
 +      err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
 +                      data_ptr, ioctl_data->buf_size);
 +      if (err)
 +              dev_err(hba->dev, "%s: err %d copying back to user.\n",
 +                              __func__, err);
 +      goto out_release_mem;
 +
 +out_einval:
 +      dev_err(hba->dev,
 +              "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
 +              __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
 +      err = -EINVAL;
 +out_release_mem:
 +      kfree(ioctl_data);
 +      kfree(desc);
 +out:
 +      return err;
 +}
  
 -      /* Resume devfreq after UFS device is detected */
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 +/**
 + * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
 + * @dev: scsi device required for per LUN queries
 + * @cmd: command opcode
 + * @buffer: user space buffer for transferring data
 + *
 + * Supported commands:
 + * UFS_IOCTL_QUERY
 + */
 +static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
 +{
 +      struct ufs_hba *hba = shost_priv(dev->host);
 +      int err = 0;
  
 -out:
 -      /*
 -       * If we failed to initialize the device or the device is not
 -       * present, turn off the power/clocks etc.
 -       */
 -      if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
 +      BUG_ON(!hba);
 +      if (!buffer) {
 +              dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
 +              return -EINVAL;
 +      }
 +
 +      switch (cmd) {
 +      case UFS_IOCTL_QUERY:
 +              pm_runtime_get_sync(hba->dev);
 +              err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
 +                              buffer);
                pm_runtime_put_sync(hba->dev);
 -              ufshcd_hba_exit(hba);
 +              break;
 +      default:
 +              err = -ENOIOCTLCMD;
 +              dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
 +                      cmd);
 +              break;
        }
  
 -      return ret;
 +      return err;
  }
  
 -/**
 - * ufshcd_async_scan - asynchronous execution for probing hba
 - * @data: data pointer to pass to this function
 - * @cookie: cookie data
 - */
 -static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 +static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
  {
 -      struct ufs_hba *hba = (struct ufs_hba *)data;
 +      unsigned long flags;
 +      struct Scsi_Host *host;
 +      struct ufs_hba *hba;
 +      int index;
 +      bool found = false;
  
 -      ufshcd_probe_hba(hba);
 +      if (!scmd || !scmd->device || !scmd->device->host)
 +              return BLK_EH_NOT_HANDLED;
 +
 +      host = scmd->device->host;
 +      hba = shost_priv(host);
 +      if (!hba)
 +              return BLK_EH_NOT_HANDLED;
 +
 +      spin_lock_irqsave(host->host_lock, flags);
 +
 +      for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
 +              if (hba->lrb[index].cmd == scmd) {
 +                      found = true;
 +                      break;
 +              }
 +      }
 +
 +      spin_unlock_irqrestore(host->host_lock, flags);
 +
 +      /*
 +       * Bypass SCSI error handling and reset the block layer timer if this
 +       * SCSI command was not actually dispatched to UFS driver, otherwise
 +       * let SCSI layer handle the error as usual.
 +       */
 +      return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
  }
  
  static struct scsi_host_template ufshcd_driver_template = {
        .eh_abort_handler       = ufshcd_abort,
        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
 +      .eh_timed_out           = ufshcd_eh_timed_out,
 +      .ioctl                  = ufshcd_ioctl,
 +#ifdef CONFIG_COMPAT
 +      .compat_ioctl           = ufshcd_ioctl,
 +#endif
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@@ -7971,24 -4441,13 +7972,24 @@@ static int ufshcd_config_vreg_load(stru
  static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
  {
 -      return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 +      if (!vreg)
 +              return 0;
 +      else if (vreg->unused)
 +              return 0;
 +      else
 +              return ufshcd_config_vreg_load(hba->dev, vreg,
 +                                             UFS_VREG_LPM_LOAD_UA);
  }
  
  static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
  {
 -      return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 +      if (!vreg)
 +              return 0;
 +      else if (vreg->unused)
 +              return 0;
 +      else
 +              return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
  }
  
  static int ufshcd_config_vreg(struct device *dev,
        name = vreg->name;
  
        if (regulator_count_voltages(reg) > 0) {
 +              uA_load = on ? vreg->max_uA : 0;
 +              ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
 +              if (ret)
 +                      goto out;
 +
                if (vreg->min_uV && vreg->max_uV) {
                        min_uV = on ? vreg->min_uV : 0;
                        ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
                                goto out;
                        }
                }
 -
 -              uA_load = on ? vreg->max_uA : 0;
 -              ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
 -              if (ret)
 -                      goto out;
        }
  out:
        return ret;
@@@ -8029,9 -4488,7 +8030,9 @@@ static int ufshcd_enable_vreg(struct de
  {
        int ret = 0;
  
 -      if (!vreg || vreg->enabled)
 +      if (!vreg)
 +              goto out;
 +      else if (vreg->enabled || vreg->unused)
                goto out;
  
        ret = ufshcd_config_vreg(dev, vreg, true);
@@@ -8051,9 -4508,7 +8052,9 @@@ static int ufshcd_disable_vreg(struct d
  {
        int ret = 0;
  
 -      if (!vreg || !vreg->enabled)
 +      if (!vreg)
 +              goto out;
 +      else if (!vreg->enabled || vreg->unused)
                goto out;
  
        ret = regulator_disable(vreg->reg);
  static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
  {
        struct ufs_vreg_info *info = &hba->vreg_info;
 +      int ret = 0;
  
 -      if (info)
 -              return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 +      if (info->vdd_hba) {
 +              ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
  
 -      return 0;
 +              if (!ret)
 +                      ufshcd_vops_update_sec_cfg(hba, on);
 +      }
 +
 +      return ret;
  }
  
  static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@@ -8164,73 -4614,22 +8165,73 @@@ static int ufshcd_init_hba_vreg(struct 
        return 0;
  }
  
 -static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 -                                      bool skip_ref_clk)
 +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
 +{
 +      int ret = 0;
 +      struct ufs_vreg_info *info = &hba->vreg_info;
 +
 +      if (!info)
 +              goto out;
 +      else if (!info->vccq)
 +              goto out;
 +
 +      if (unused) {
 +              /* shut off the rail here */
 +              ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
 +              /*
 +               * Mark this rail as no longer used, so it doesn't get enabled
 +               * later by mistake
 +               */
 +              if (!ret)
 +                      info->vccq->unused = true;
 +      } else {
 +              /*
 +               * rail should have been already enabled hence just make sure
 +               * that unused flag is cleared.
 +               */
 +              info->vccq->unused = false;
 +      }
 +out:
 +      return ret;
 +}
 +
 +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 +                             bool skip_ref_clk, bool is_gating_context)
  {
        int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
        unsigned long flags;
 +      ktime_t start = ktime_get();
 +      bool clk_state_changed = false;
  
        if (!head || list_empty(head))
                goto out;
  
 +      /* call vendor specific bus vote before enabling the clocks */
 +      if (on) {
 +              ret = ufshcd_vops_set_bus_vote(hba, on);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      /*
 +       * vendor specific setup_clocks ops may depend on clocks managed by
 +       * this standard driver hence call the vendor specific setup_clocks
 +       * before disabling the clocks managed here.
 +       */
 +      if (!on) {
 +              ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
 +              if (ret)
 +                      return ret;
 +      }
 +
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
                                continue;
  
 +                      clk_state_changed = on ^ clki->enabled;
                        if (on && !clki->enabled) {
                                ret = clk_prepare_enable(clki->clk);
                                if (ret) {
                }
        }
  
 -      ret = ufshcd_vops_setup_clocks(hba, on);
 +      /*
 +       * vendor specific setup_clocks ops may depend on clocks managed by
 +       * this standard driver hence call the vendor specific setup_clocks
 +       * after enabling the clocks managed here.
 +       */
 +      if (on) {
 +              ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
 +              if (ret)
 +                      goto out;
 +      }
 +
 +      /*
 +       * call vendor specific bus vote to remove the vote after
 +       * disabling the clocks.
 +       */
 +      if (!on)
 +              ret = ufshcd_vops_set_bus_vote(hba, on);
 +
  out:
        if (ret) {
 +              if (on)
 +                      /* Can't do much if this fails */
 +                      (void) ufshcd_vops_set_bus_vote(hba, false);
                list_for_each_entry(clki, head, list) {
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
 -      } else if (on) {
 +      } else if (!ret && on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              /* restore the secure configuration as clocks are enabled */
 +              ufshcd_vops_update_sec_cfg(hba, true);
        }
 +
 +      if (clk_state_changed)
 +              trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
 +                      (on ? "on" : "off"),
 +                      ktime_to_us(ktime_sub(ktime_get(), start)), ret);
        return ret;
  }
  
 -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 +static int ufshcd_enable_clocks(struct ufs_hba *hba)
 +{
 +      return  ufshcd_setup_clocks(hba, true, false, false);
 +}
 +
 +static int ufshcd_disable_clocks(struct ufs_hba *hba,
 +                               bool is_gating_context)
 +{
 +      return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
 +}
 +
 +static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 +                                            bool is_gating_context)
  {
 -      return  __ufshcd_setup_clocks(hba, on, false);
 +      return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
  }
  
  static int ufshcd_init_clocks(struct ufs_hba *hba)
@@@ -8351,7 -4709,7 +8352,7 @@@ static int ufshcd_variant_hba_init(stru
  {
        int err = 0;
  
 -      if (!hba->vops)
 +      if (!hba->var || !hba->var->vops)
                goto out;
  
        err = ufshcd_vops_init(hba);
@@@ -8375,9 -4733,11 +8376,9 @@@ out
  
  static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
  {
 -      if (!hba->vops)
 +      if (!hba->var || !hba->var->vops)
                return;
  
 -      ufshcd_vops_setup_clocks(hba, false);
 -
        ufshcd_vops_setup_regulators(hba, false);
  
        ufshcd_vops_exit(hba);
@@@ -8406,7 -4766,7 +8407,7 @@@ static int ufshcd_hba_init(struct ufs_h
        if (err)
                goto out_disable_hba_vreg;
  
 -      err = ufshcd_setup_clocks(hba, true);
 +      err = ufshcd_enable_clocks(hba);
        if (err)
                goto out_disable_hba_vreg;
  
  out_disable_vreg:
        ufshcd_setup_vreg(hba, false);
  out_disable_clks:
 -      ufshcd_setup_clocks(hba, false);
 +      ufshcd_disable_clocks(hba, false);
  out_disable_hba_vreg:
        ufshcd_setup_hba_vreg(hba, false);
  out:
@@@ -8440,13 -4800,7 +8441,13 @@@ static void ufshcd_hba_exit(struct ufs_
        if (hba->is_powered) {
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
 -              ufshcd_setup_clocks(hba, false);
 +              if (ufshcd_is_clkscaling_supported(hba)) {
 +                      if (hba->devfreq)
 +                              ufshcd_suspend_clkscaling(hba);
 +                      if (hba->clk_scaling.workq)
 +                              destroy_workqueue(hba->clk_scaling.workq);
 +              }
 +              ufshcd_disable_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
        }
@@@ -8459,19 -4813,19 +8460,19 @@@ ufshcd_send_request_sense(struct ufs_hb
                                0,
                                0,
                                0,
 -                              SCSI_SENSE_BUFFERSIZE,
 +                              UFSHCD_REQ_SENSE_SIZE,
                                0};
        char *buffer;
        int ret;
  
 -      buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
 +      buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
                goto out;
        }
  
        ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
 -                              SCSI_SENSE_BUFFERSIZE, NULL,
 +                              UFSHCD_REQ_SENSE_SIZE, NULL,
                                msecs_to_jiffies(1000), 3, NULL, REQ_PM);
        if (ret)
                pr_err("%s: failed with err %d\n", __func__, ret);
@@@ -8579,20 -4933,10 +8580,20 @@@ static int ufshcd_link_state_transition
                   (!check_for_bkops || (check_for_bkops &&
                    !hba->auto_bkops_enabled))) {
                /*
 +               * Let's make sure that link is in low power mode, we are doing
 +               * this currently by putting the link in Hibern8. Otherway to
 +               * put the link in low power mode is to send the DME end point
 +               * to device and then send the DME reset command to local
 +               * unipro. But putting the link in hibern8 is much faster.
 +               */
 +              ret = ufshcd_uic_hibern8_enter(hba);
 +              if (ret)
 +                      goto out;
 +              /*
                 * Change controller state to "reset state" which
                 * should also put the link in off/reset state
                 */
 -              ufshcd_hba_stop(hba);
 +              ufshcd_hba_stop(hba, true);
                /*
                 * TODO: Check if we need any delay to make sure that
                 * controller is reset
  static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
  {
        /*
 +       * It seems some UFS devices may keep drawing more than sleep current
 +       * (atleast for 500us) from UFS rails (especially from VCCQ rail).
 +       * To avoid this situation, add 2ms delay before putting these UFS
 +       * rails in LPM mode.
 +       */
 +      if (!ufshcd_is_link_active(hba))
 +              usleep_range(2000, 2100);
 +
 +      /*
         * If UFS device is either in UFS_Sleep turn off VCC rail to save some
         * power.
         *
@@@ -8647,6 -4982,7 +8648,6 @@@ static int ufshcd_vreg_set_hpm(struct u
            !hba->dev_info.is_lu_power_on_wp) {
                ret = ufshcd_setup_vreg(hba, true);
        } else if (!ufshcd_is_ufs_dev_active(hba)) {
 -              ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
                if (!ret && !ufshcd_is_link_active(hba)) {
                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
                        if (ret)
                        if (ret)
                                goto vccq_lpm;
                }
 +              ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
        }
        goto out;
  
  
  static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
  {
 -      if (ufshcd_is_link_off(hba))
 +      if (ufshcd_is_link_off(hba) ||
 +          (ufshcd_is_link_hibern8(hba)
 +           && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, false);
  }
  
  static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
  {
 -      if (ufshcd_is_link_off(hba))
 +      if (ufshcd_is_link_off(hba) ||
 +          (ufshcd_is_link_hibern8(hba)
 +           && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, true);
  }
  
@@@ -8721,17 -5052,8 +8722,17 @@@ static int ufshcd_suspend(struct ufs_hb
         * If we can't transition into any of the low power modes
         * just gate the clocks.
         */
 -      ufshcd_hold(hba, false);
 +      WARN_ON(hba->hibern8_on_idle.is_enabled &&
 +              hba->hibern8_on_idle.active_reqs);
 +      ufshcd_hold_all(hba);
        hba->clk_gating.is_suspended = true;
 +      hba->hibern8_on_idle.is_suspended = true;
 +
 +      if (hba->clk_scaling.is_allowed) {
 +              cancel_work_sync(&hba->clk_scaling.suspend_work);
 +              cancel_work_sync(&hba->clk_scaling.resume_work);
 +              ufshcd_suspend_clkscaling(hba);
 +      }
  
        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
                        req_link_state == UIC_LINK_ACTIVE_STATE) {
  
        if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
            (req_link_state == hba->uic_link_state))
 -              goto out;
 +              goto enable_gating;
  
        /* UFS device & link must be active before we enter in this function */
        if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
                ret = -EINVAL;
 -              goto out;
 +              goto enable_gating;
        }
  
        if (ufshcd_is_runtime_pm(pm_op)) {
        if (ret)
                goto set_dev_active;
  
 +      if (ufshcd_is_link_hibern8(hba) &&
 +          ufshcd_is_hibern8_on_idle_allowed(hba))
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 +
        ufshcd_vreg_set_lpm(hba);
  
  disable_clks:
        /*
 -       * The clock scaling needs access to controller registers. Hence, Wait
 -       * for pending clock scaling work to be done before clocks are
 -       * turned off.
 -       */
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 -      }
 -      /*
         * Call vendor specific suspend callback. As these callbacks may access
         * vendor specific host controller register space call them before the
         * host clocks are ON.
        if (ret)
                goto set_link_active;
  
 -      ret = ufshcd_vops_setup_clocks(hba, false);
 -      if (ret)
 -              goto vops_resume;
 -
        if (!ufshcd_is_link_active(hba))
 -              ufshcd_setup_clocks(hba, false);
 +              ret = ufshcd_disable_clocks(hba, false);
        else
                /* If link is active, device ref_clk can't be switched off */
 -              __ufshcd_setup_clocks(hba, false, true);
 +              ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
 +      if (ret)
 +              goto set_link_active;
  
 -      hba->clk_gating.state = CLKS_OFF;
 +      if (ufshcd_is_clkgating_allowed(hba)) {
 +              hba->clk_gating.state = CLKS_OFF;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                                      hba->clk_gating.state);
 +      }
        /*
         * Disable the host irq as host controller as there won't be any
         * host controller transaction expected till resume.
        ufshcd_hba_vreg_set_lpm(hba);
        goto out;
  
 -vops_resume:
 -      ufshcd_vops_resume(hba, pm_op);
  set_link_active:
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
        ufshcd_vreg_set_hpm(hba);
 -      if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
 +      if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
                ufshcd_set_link_active(hba);
 -      else if (ufshcd_is_link_off(hba))
 +      } else if (ufshcd_is_link_off(hba)) {
 +              ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
                ufshcd_host_reset_and_restore(hba);
 +      }
  set_dev_active:
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
                ufshcd_disable_auto_bkops(hba);
  enable_gating:
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
 +      hba->hibern8_on_idle.is_suspended = false;
        hba->clk_gating.is_suspended = false;
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
  out:
        hba->pm_op_in_progress = 0;
 +
 +      if (ret)
 +              ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
 +
        return ret;
  }
  
@@@ -8864,12 -5180,14 +8865,12 @@@ static int ufshcd_resume(struct ufs_hb
  
        ufshcd_hba_vreg_set_hpm(hba);
        /* Make sure clocks are enabled before accessing controller */
 -      ret = ufshcd_setup_clocks(hba, true);
 +      ret = ufshcd_enable_clocks(hba);
        if (ret)
                goto out;
  
        /* enable the host irq as host controller would be active soon */
 -      ret = ufshcd_enable_irq(hba);
 -      if (ret)
 -              goto disable_irq_and_vops_clks;
 +      ufshcd_enable_irq(hba);
  
        ret = ufshcd_vreg_set_hpm(hba);
        if (ret)
  
        if (ufshcd_is_link_hibern8(hba)) {
                ret = ufshcd_uic_hibern8_exit(hba);
 -              if (!ret)
 +              if (!ret) {
                        ufshcd_set_link_active(hba);
 -              else
 +                      if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +                              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              } else {
                        goto vendor_suspend;
 +              }
        } else if (ufshcd_is_link_off(hba)) {
 -              ret = ufshcd_host_reset_and_restore(hba);
                /*
 -               * ufshcd_host_reset_and_restore() should have already
 +               * A full initialization of the host and the device is required
 +               * since the link was put to off during suspend.
 +               */
 +              ret = ufshcd_reset_and_restore(hba);
 +              /*
 +               * ufshcd_reset_and_restore() should have already
                 * set the link state as active
                 */
                if (ret || !ufshcd_is_link_active(hba))
                        goto vendor_suspend;
 +              /* mark link state as hibern8 exited */
 +              if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
        }
  
        if (!ufshcd_is_ufs_dev_active(hba)) {
                ufshcd_urgent_bkops(hba);
  
        hba->clk_gating.is_suspended = false;
 +      hba->hibern8_on_idle.is_suspended = false;
  
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
  
        /* Schedule clock gating in case of no access to UFS device yet */
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        goto out;
  
  set_old_link_state:
        ufshcd_link_state_transition(hba, old_link_state, 0);
 +      if (ufshcd_is_link_hibern8(hba) &&
 +          ufshcd_is_hibern8_on_idle_allowed(hba))
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
  vendor_suspend:
        ufshcd_vops_suspend(hba, pm_op);
  disable_vreg:
        ufshcd_vreg_set_lpm(hba);
  disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
 -      ufshcd_setup_clocks(hba, false);
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_suspend_clkscaling(hba);
 +      ufshcd_disable_clocks(hba, false);
 +      if (ufshcd_is_clkgating_allowed(hba))
 +              hba->clk_gating.state = CLKS_OFF;
  out:
        hba->pm_op_in_progress = 0;
 +
 +      if (ret)
 +              ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
 +
        return ret;
  }
  
  int ufshcd_system_suspend(struct ufs_hba *hba)
  {
        int ret = 0;
 +      ktime_t start = ktime_get();
  
        if (!hba || !hba->is_powered)
                return 0;
  
 -      if (pm_runtime_suspended(hba->dev)) {
 -              if (hba->rpm_lvl == hba->spm_lvl)
 -                      /*
 -                       * There is possibility that device may still be in
 -                       * active state during the runtime suspend.
 -                       */
 -                      if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
 -                          hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
 -                              goto out;
 +      if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
 +           hba->curr_dev_pwr_mode) &&
 +          (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
 +           hba->uic_link_state))
 +              goto out;
  
 +      if (pm_runtime_suspended(hba->dev)) {
                /*
                 * UFS device and/or UFS link low power states during runtime
                 * suspend seems to be different than what is expected during
  
        ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
  out:
 +      trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
        if (!ret)
                hba->is_sys_suspended = true;
        return ret;
@@@ -9017,9 -5312,6 +9018,9 @@@ EXPORT_SYMBOL(ufshcd_system_suspend)
  
  int ufshcd_system_resume(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
                 * Let the runtime resume take care of resuming
                 * if runtime suspended.
                 */
 -              return 0;
 -
 -      return ufshcd_resume(hba, UFS_SYSTEM_PM);
 +              goto out;
 +      else
 +              ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
 +out:
 +      trace_ufshcd_system_resume(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      return ret;
  }
  EXPORT_SYMBOL(ufshcd_system_resume);
  
   */
  int ufshcd_runtime_suspend(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
        if (!hba->is_powered)
 -              return 0;
 +              goto out;
 +      else
 +              ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
 +out:
 +      trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode,
 +              hba->uic_link_state);
 +      return ret;
  
 -      return ufshcd_suspend(hba, UFS_RUNTIME_PM);
  }
  EXPORT_SYMBOL(ufshcd_runtime_suspend);
  
   */
  int ufshcd_runtime_resume(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
        if (!hba->is_powered)
 -              return 0;
 -
 -      return ufshcd_resume(hba, UFS_RUNTIME_PM);
 +              goto out;
 +      else
 +              ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
 +out:
 +      trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode,
 +              hba->uic_link_state);
 +      return ret;
  }
  EXPORT_SYMBOL(ufshcd_runtime_resume);
  
@@@ -9117,157 -5385,6 +9118,157 @@@ int ufshcd_runtime_idle(struct ufs_hba 
  }
  EXPORT_SYMBOL(ufshcd_runtime_idle);
  
 +static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
 +                                         struct device_attribute *attr,
 +                                         const char *buf, size_t count,
 +                                         bool rpm)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      if (value >= UFS_PM_LVL_MAX)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (rpm)
 +              hba->rpm_lvl = value;
 +      else
 +              hba->spm_lvl = value;
 +      ufshcd_apply_pm_quirks(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
 +}
 +
 +static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      int curr_len;
 +      u8 lvl;
 +
 +      curr_len = snprintf(buf, PAGE_SIZE,
 +                          "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                          hba->rpm_lvl,
 +                          ufschd_ufs_dev_pwr_mode_to_string(
 +                              ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
 +                          ufschd_uic_link_state_to_string(
 +                              ufs_pm_lvl_states[hba->rpm_lvl].link_state));
 +
 +      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                           "\nAll available Runtime PM levels info:\n");
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 +              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                                   "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                                  lvl,
 +                                  ufschd_ufs_dev_pwr_mode_to_string(
 +                                      ufs_pm_lvl_states[lvl].dev_state),
 +                                  ufschd_uic_link_state_to_string(
 +                                      ufs_pm_lvl_states[lvl].link_state));
 +
 +      return curr_len;
 +}
 +
 +static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
 +}
 +
 +static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
 +      hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
 +      sysfs_attr_init(&hba->rpm_lvl_attr.attr);
 +      hba->rpm_lvl_attr.attr.name = "rpm_lvl";
 +      hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
 +}
 +
 +static ssize_t ufshcd_spm_lvl_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      int curr_len;
 +      u8 lvl;
 +
 +      curr_len = snprintf(buf, PAGE_SIZE,
 +                          "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                          hba->spm_lvl,
 +                          ufschd_ufs_dev_pwr_mode_to_string(
 +                              ufs_pm_lvl_states[hba->spm_lvl].dev_state),
 +                          ufschd_uic_link_state_to_string(
 +                              ufs_pm_lvl_states[hba->spm_lvl].link_state));
 +
 +      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                           "\nAll available System PM levels info:\n");
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 +              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                                   "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                                  lvl,
 +                                  ufschd_ufs_dev_pwr_mode_to_string(
 +                                      ufs_pm_lvl_states[lvl].dev_state),
 +                                  ufschd_uic_link_state_to_string(
 +                                      ufs_pm_lvl_states[lvl].link_state));
 +
 +      return curr_len;
 +}
 +
 +static ssize_t ufshcd_spm_lvl_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
 +}
 +
 +static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
 +      hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
 +      sysfs_attr_init(&hba->spm_lvl_attr.attr);
 +      hba->spm_lvl_attr.attr.name = "spm_lvl";
 +      hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->spm_lvl_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
 +}
 +
 +static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      ufshcd_add_rpm_lvl_sysfs_nodes(hba);
 +      ufshcd_add_spm_lvl_sysfs_nodes(hba);
 +}
 +
 +static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
 +{
 +      bool suspend = false;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_scaling.is_allowed) {
 +              hba->clk_scaling.is_allowed = false;
 +              suspend = true;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /**
 +       * Scaling may be scheduled before, hence make sure it
 +       * doesn't race with shutdown
 +       */
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 +              cancel_work_sync(&hba->clk_scaling.suspend_work);
 +              cancel_work_sync(&hba->clk_scaling.resume_work);
 +              if (suspend)
 +                      ufshcd_suspend_clkscaling(hba);
 +      }
 +
 +      /* Unregister so that devfreq_monitor can't race with shutdown */
 +      if (hba->devfreq)
 +              devfreq_remove_device(hba->devfreq);
 +}
 +
  /**
   * ufshcd_shutdown - shutdown routine
   * @hba: per adapter instance
@@@ -9283,25 -5400,12 +9284,25 @@@ int ufshcd_shutdown(struct ufs_hba *hba
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
  
 -      if (pm_runtime_suspended(hba->dev)) {
 -              ret = ufshcd_runtime_resume(hba);
 -              if (ret)
 -                      goto out;
 -      }
 -
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold_all(hba);
 +      ufshcd_mark_shutdown_ongoing(hba);
 +      ufshcd_shutdown_clkscaling(hba);
 +      /**
 +       * (1) Acquire the lock to stop any more requests
 +       * (2) Wait for all issued requests to complete
 +       */
 +      ufshcd_get_write_lock(hba);
 +      ufshcd_scsi_block_requests(hba);
 +      ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      if (ret)
 +              dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
 +                      __func__, ret);
 +      /* Requests may have errored out above, let it be handled */
 +      flush_work(&hba->eh_work);
 +      /* reqs issued from contexts other than shutdown will fail from now */
 +      ufshcd_scsi_unblock_requests(hba);
 +      ufshcd_release_all(hba);
        ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
  out:
        if (ret)
@@@ -9376,17 -5480,13 +9377,17 @@@ void ufshcd_remove(struct ufs_hba *hba
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
 -      ufshcd_hba_stop(hba);
 +      ufshcd_hba_stop(hba, true);
  
        ufshcd_exit_clk_gating(hba);
 -      ufshcd_exit_latency_hist(hba);
 -      if (ufshcd_is_clkscaling_enabled(hba))
 +      ufshcd_exit_hibern8_on_idle(hba);
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 +              ufshcd_exit_latency_hist(hba);
                devfreq_remove_device(hba->devfreq);
 +      }
        ufshcd_hba_exit(hba);
 +      ufsdbg_remove_debugfs(hba);
  }
  EXPORT_SYMBOL_GPL(ufshcd_remove);
  
@@@ -9452,370 -5552,66 +9453,370 @@@ out_error
  }
  EXPORT_SYMBOL(ufshcd_alloc_host);
  
 -static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 +/**
 + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
 + * @hba: per adapter instance
 + * @scale_up: True if scaling up and false if scaling down
 + *
 + * Returns true if scaling is required, false otherwise.
 + */
 +static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
 +                                             bool scale_up)
  {
 -      int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
  
        if (!head || list_empty(head))
 -              goto out;
 -
 -      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 -      if (ret)
 -              return ret;
 +              return false;
  
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (scale_up && clki->max_freq) {
                                if (clki->curr_freq == clki->max_freq)
                                        continue;
 -                              ret = clk_set_rate(clki->clk, clki->max_freq);
 -                              if (ret) {
 -                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 -                                              __func__, clki->name,
 -                                              clki->max_freq, ret);
 -                                      break;
 -                              }
 -                              clki->curr_freq = clki->max_freq;
 -
 +                              return true;
                        } else if (!scale_up && clki->min_freq) {
                                if (clki->curr_freq == clki->min_freq)
                                        continue;
 -                              ret = clk_set_rate(clki->clk, clki->min_freq);
 -                              if (ret) {
 -                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 -                                              __func__, clki->name,
 -                                              clki->min_freq, ret);
 -                                      break;
 -                              }
 -                              clki->curr_freq = clki->min_freq;
 +                              return true;
                        }
                }
 -              dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 -                              clki->name, clk_get_rate(clki->clk));
        }
  
 -      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 +      return false;
 +}
 +
 +/**
 + * ufshcd_scale_gear - scale up/down UFS gear
 + * @hba: per adapter instance
 + * @scale_up: True for scaling up gear and false for scaling down
 + *
 + * Returns 0 for success,
 + * Returns -EBUSY if scaling can't happen at this time
 + * Returns non-zero for any other errors
 + */
 +static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +      struct ufs_pa_layer_attr new_pwr_info;
 +      u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
 +
 +      BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
 +
 +      if (scale_up) {
 +              memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
 +                     sizeof(struct ufs_pa_layer_attr));
 +              /*
 +               * Some UFS devices may stop responding after switching from
 +               * HS-G1 to HS-G3. Also, it is found that these devices work
 +               * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
 +               * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
 +               * quirk is enabled for such devices, this 2 steps gear switch
 +               * workaround will be applied.
 +               */
 +              if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
 +                  && (hba->pwr_info.gear_tx == UFS_HS_G1)
 +                  && (new_pwr_info.gear_tx == UFS_HS_G3)) {
 +                      /* scale up to G2 first */
 +                      new_pwr_info.gear_tx = UFS_HS_G2;
 +                      new_pwr_info.gear_rx = UFS_HS_G2;
 +                      ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +                      if (ret)
 +                              goto out;
 +
 +                      /* scale up to G3 now */
 +                      new_pwr_info.gear_tx = UFS_HS_G3;
 +                      new_pwr_info.gear_rx = UFS_HS_G3;
 +                      /* now, fall through to set the HS-G3 */
 +              }
 +              ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +              if (ret)
 +                      goto out;
 +      } else {
 +              memcpy(&new_pwr_info, &hba->pwr_info,
 +                     sizeof(struct ufs_pa_layer_attr));
 +
 +              if (hba->pwr_info.gear_tx > scale_down_gear
 +                  || hba->pwr_info.gear_rx > scale_down_gear) {
 +                      /* save the current power mode */
 +                      memcpy(&hba->clk_scaling.saved_pwr_info.info,
 +                              &hba->pwr_info,
 +                              sizeof(struct ufs_pa_layer_attr));
 +
 +                      /* scale down gear */
 +                      new_pwr_info.gear_tx = scale_down_gear;
 +                      new_pwr_info.gear_rx = scale_down_gear;
 +                      if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
 +                              new_pwr_info.pwr_tx = FASTAUTO_MODE;
 +                              new_pwr_info.pwr_rx = FASTAUTO_MODE;
 +                      }
 +              }
 +              ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +      }
 +
 +out:
 +      if (ret)
 +              dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
 +                      __func__, ret,
 +                      hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
 +                      new_pwr_info.gear_tx, new_pwr_info.gear_rx,
 +                      scale_up);
 +
 +      return ret;
 +}
 +
 +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
 +{
 +      #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
 +      int ret = 0;
 +      /*
 +       * make sure that there are no outstanding requests when
 +       * clock scaling is in progress
 +       */
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 +              ret = -EBUSY;
 +              up_write(&hba->lock);
 +              ufshcd_scsi_unblock_requests(hba);
 +      }
 +
 +      return ret;
 +}
 +
 +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
 +{
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
 +}
 +
 +/**
 + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
 + * @hba: per adapter instance
 + * @scale_up: True for scaling up and false for scalin down
 + *
 + * Returns 0 for success,
 + * Returns -EBUSY if scaling can't happen at this time
 + * Returns non-zero for any other errors
 + */
 +static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +
 +      /* let's not get into low power until clock scaling is completed */
 +      hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 +      ufshcd_hold_all(hba);
 +
 +      ret = ufshcd_clock_scaling_prepare(hba);
 +      if (ret)
 +              goto out;
 +
 +      /* scale down the gear before scaling down clocks */
 +      if (!scale_up) {
 +              ret = ufshcd_scale_gear(hba, false);
 +              if (ret)
 +                      goto clk_scaling_unprepare;
 +      }
 +
 +      /*
 +       * If auto hibern8 is supported then put the link in
 +       * hibern8 manually, this is to avoid auto hibern8
 +       * racing during clock frequency scaling sequence.
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              ret = ufshcd_uic_hibern8_enter(hba);
 +              if (ret)
 +                      /* link will be bad state so no need to scale_up_gear */
 +                      return ret;
 +      }
 +
 +      ret = ufshcd_scale_clks(hba, scale_up);
 +      if (ret)
 +              goto scale_up_gear;
 +
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              ret = ufshcd_uic_hibern8_exit(hba);
 +              if (ret)
 +                      /* link will be bad state so no need to scale_up_gear */
 +                      return ret;
 +      }
 +
 +      /* scale up the gear after scaling up clocks */
 +      if (scale_up) {
 +              ret = ufshcd_scale_gear(hba, true);
 +              if (ret) {
 +                      ufshcd_scale_clks(hba, false);
 +                      goto clk_scaling_unprepare;
 +              }
 +      }
 +
 +      if (!ret) {
 +              hba->clk_scaling.is_scaled_up = scale_up;
 +              if (scale_up)
 +                      hba->clk_gating.delay_ms =
 +                              hba->clk_gating.delay_ms_perf;
 +              else
 +                      hba->clk_gating.delay_ms =
 +                              hba->clk_gating.delay_ms_pwr_save;
 +      }
 +
 +      goto clk_scaling_unprepare;
  
 +scale_up_gear:
 +      if (!scale_up)
 +              ufshcd_scale_gear(hba, true);
 +clk_scaling_unprepare:
 +      ufshcd_clock_scaling_unprepare(hba);
  out:
 +      hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
 +      ufshcd_release_all(hba);
        return ret;
  }
  
 +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +
 +      devfreq_suspend_device(hba->devfreq);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_scaling.window_start_t = 0;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool suspend = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (!hba->clk_scaling.is_suspended) {
 +              suspend = true;
 +              hba->clk_scaling.is_suspended = true;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (suspend)
 +              __ufshcd_suspend_clkscaling(hba);
 +}
 +
 +static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool resume = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_scaling.is_suspended) {
 +              resume = true;
 +              hba->clk_scaling.is_suspended = false;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (resume)
 +              devfreq_resume_device(hba->devfreq);
 +}
 +
 +static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
 +}
 +
 +static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      u32 value;
 +      int err;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->clk_scaling.is_allowed)
 +              goto out;
 +
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold(hba, false);
 +
 +      cancel_work_sync(&hba->clk_scaling.suspend_work);
 +      cancel_work_sync(&hba->clk_scaling.resume_work);
 +
 +      hba->clk_scaling.is_allowed = value;
 +
 +      if (value) {
 +              ufshcd_resume_clkscaling(hba);
 +      } else {
 +              ufshcd_suspend_clkscaling(hba);
 +              err = ufshcd_devfreq_scale(hba, true);
 +              if (err)
 +                      dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
 +                                      __func__, err);
 +      }
 +
 +      ufshcd_release(hba, false);
 +      pm_runtime_put_sync(hba->dev);
 +out:
 +      return count;
 +}
 +
 +static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         clk_scaling.suspend_work);
 +      unsigned long irq_flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 +      if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              return;
 +      }
 +      hba->clk_scaling.is_suspended = true;
 +      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +
 +      __ufshcd_suspend_clkscaling(hba);
 +}
 +
 +static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         clk_scaling.resume_work);
 +      unsigned long irq_flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 +      if (!hba->clk_scaling.is_suspended) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              return;
 +      }
 +      hba->clk_scaling.is_suspended = false;
 +      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +
 +      devfreq_resume_device(hba->devfreq);
 +}
 +
  static int ufshcd_devfreq_target(struct device *dev,
                                unsigned long *freq, u32 flags)
  {
 -      int err = 0;
 +      int ret = 0;
        struct ufs_hba *hba = dev_get_drvdata(dev);
 -      bool release_clk_hold = false;
        unsigned long irq_flags;
 +      ktime_t start;
 +      bool scale_up, sched_clk_scaling_suspend_work = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return -EINVAL;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if ((*freq > 0) && (*freq < UINT_MAX)) {
 +              dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
                return -EINVAL;
 +      }
  
        spin_lock_irqsave(hba->host->host_lock, irq_flags);
        if (ufshcd_eh_in_progress(hba)) {
                return 0;
        }
  
 -      if (ufshcd_is_clkgating_allowed(hba) &&
 -          (hba->clk_gating.state != CLKS_ON)) {
 -              if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 -                      /* hold the vote until the scaling work is completed */
 -                      hba->clk_gating.active_reqs++;
 -                      release_clk_hold = true;
 -                      hba->clk_gating.state = CLKS_ON;
 -              } else {
 -                      /*
 -                       * Clock gating work seems to be running in parallel
 -                       * hence skip scaling work to avoid deadlock between
 -                       * current scaling work and gating work.
 -                       */
 -                      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 -                      return 0;
 -              }
 +      if (!hba->clk_scaling.active_reqs)
 +              sched_clk_scaling_suspend_work = true;
 +
 +      scale_up = (*freq == UINT_MAX) ? true : false;
 +      if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              ret = 0;
 +              goto out; /* no state change required */
        }
        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  
 -      if (*freq == UINT_MAX)
 -              err = ufshcd_scale_clks(hba, true);
 -      else if (*freq == 0)
 -              err = ufshcd_scale_clks(hba, false);
 +      start = ktime_get();
 +      ret = ufshcd_devfreq_scale(hba, scale_up);
 +      trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
 +              (scale_up ? "up" : "down"),
 +              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
  
 -      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 -      if (release_clk_hold)
 -              __ufshcd_release(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +out:
 +      if (sched_clk_scaling_suspend_work)
 +              queue_work(hba->clk_scaling.workq,
 +                         &hba->clk_scaling.suspend_work);
  
 -      return err;
 +      return ret;
  }
  
  static int ufshcd_devfreq_get_dev_status(struct device *dev,
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
        unsigned long flags;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
  
        memset(stat, 0, sizeof(*stat));
@@@ -9886,31 -5689,12 +9887,31 @@@ start_window
        return 0;
  }
  
 -static struct devfreq_dev_profile ufs_devfreq_profile = {
 -      .polling_ms     = 100,
 -      .target         = ufshcd_devfreq_target,
 -      .get_dev_status = ufshcd_devfreq_get_dev_status,
 -};
 +static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
 +{
 +      hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
 +      hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
 +      sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
 +      hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
 +      hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
 +}
 +
 +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
 +{
 +      struct device *dev = hba->dev;
 +      int ret;
  
 +      ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
 +              &hba->lanes_per_direction);
 +      if (ret) {
 +              dev_dbg(hba->dev,
 +                      "%s: failed to read lanes-per-direction, ret=%d\n",
 +                      __func__, ret);
 +              hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
 +      }
 +}
  /**
   * ufshcd_init - Driver initialization routine
   * @hba: per-adapter instance
@@@ -9934,8 -5718,6 +9935,8 @@@ int ufshcd_init(struct ufs_hba *hba, vo
        hba->mmio_base = mmio_base;
        hba->irq = irq;
  
 +      ufshcd_init_lanes_per_dir(hba);
 +
        err = ufshcd_hba_init(hba);
        if (err)
                goto out_error;
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
  
 +      /* print error message if ufs_version is not valid */
 +      if ((hba->ufs_version != UFSHCI_VERSION_10) &&
 +          (hba->ufs_version != UFSHCI_VERSION_11) &&
 +          (hba->ufs_version != UFSHCI_VERSION_20) &&
 +          (hba->ufs_version != UFSHCI_VERSION_21))
 +              dev_err(hba->dev, "invalid UFS version 0x%x\n",
 +                      hba->ufs_version);
 +
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
  
 +      /* Enable debug prints */
 +      hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
 +
        err = ufshcd_set_dma_mask(hba);
        if (err) {
                dev_err(hba->dev, "set dma mask failed\n");
        host->max_channel = UFSHCD_MAX_CHANNEL;
        host->unique_id = host->host_no;
        host->max_cmd_len = MAX_CDB_SIZE;
 +      host->set_dbd_for_caching = 1;
  
        hba->max_pwr_info.is_valid = false;
  
        /* Initialize work queues */
        INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 +      INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
  
        /* Initialize UIC command mutex */
        mutex_init(&hba->uic_cmd_mutex);
        /* Initialize mutex for device management commands */
        mutex_init(&hba->dev_cmd.lock);
  
 +      init_rwsem(&hba->lock);
 +
        /* Initialize device management tag acquire wait queue */
        init_waitqueue_head(&hba->dev_cmd.tag_wq);
  
        ufshcd_init_clk_gating(hba);
 +      ufshcd_init_hibern8_on_idle(hba);
 +
 +      /*
 +       * In order to avoid any spurious interrupt immediately after
 +       * registering UFS controller interrupt handler, clear any pending UFS
 +       * interrupt status and disable all the UFS interrupts.
 +       */
 +      ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
 +                    REG_INTERRUPT_STATUS);
 +      ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
 +      /*
 +       * Make sure that UFS interrupts are disabled and any pending interrupt
 +       * status is cleared before registering UFS interrupt handler.
 +       */
 +      mb();
 +
        /* IRQ registration */
        err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
                goto exit_gating;
        }
  
 +      /* Reset controller to power on reset (POR) state */
 +      ufshcd_vops_full_reset(hba);
 +
 +      /* reset connected UFS device */
 +      err = ufshcd_reset_device(hba);
 +      if (err)
 +              dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 +                       __func__, err);
 +
        /* Host controller enable */
        err = ufshcd_hba_enable(hba);
        if (err) {
                dev_err(hba->dev, "Host controller enable failed\n");
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_host_state(hba);
                goto out_remove_scsi_host;
        }
  
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
 -                                                 "simple_ondemand", NULL);
 -              if (IS_ERR(hba->devfreq)) {
 -                      dev_err(hba->dev, "Unable to register with devfreq %ld\n",
 -                                      PTR_ERR(hba->devfreq));
 -                      goto out_remove_scsi_host;
 -              }
 -              /* Suspend devfreq until the UFS device is detected */
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              char wq_name[sizeof("ufs_clkscaling_00")];
 +
 +              INIT_WORK(&hba->clk_scaling.suspend_work,
 +                        ufshcd_clk_scaling_suspend_work);
 +              INIT_WORK(&hba->clk_scaling.resume_work,
 +                        ufshcd_clk_scaling_resume_work);
 +
 +              snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
 +                       host->host_no);
 +              hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
 +
 +              ufshcd_clkscaling_init_sysfs(hba);
        }
  
 +      /*
 +       * If rpm_lvl and and spm_lvl are not already set to valid levels,
 +       * set the default power management level for UFS runtime and system
 +       * suspend. Default power saving mode selected is keeping UFS link in
 +       * Hibern8 state and UFS device in sleep.
 +       */
 +      if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
 +              hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                                      UFS_SLEEP_PWR_MODE,
 +                                                      UIC_LINK_HIBERN8_STATE);
 +      if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
 +              hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                                      UFS_SLEEP_PWR_MODE,
 +                                                      UIC_LINK_HIBERN8_STATE);
 +
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
  
        ufshcd_init_latency_hist(hba);
  
        /*
 -       * The device-initialize-sequence hasn't been invoked yet.
 -       * Set the device to power-off state
 +       * We are assuming that device wasn't put in sleep/power-down
 +       * state exclusively during the boot stage before kernel.
 +       * This assumption helps avoid doing link startup twice during
 +       * ufshcd_probe_hba().
         */
 -      ufshcd_set_ufs_dev_poweroff(hba);
 +      ufshcd_set_ufs_dev_active(hba);
 +
 +      ufshcd_cmd_log_init(hba);
  
        async_schedule(ufshcd_async_scan, hba);
  
 +      ufsdbg_add_debugfs(hba);
 +
 +      ufshcd_add_sysfs_nodes(hba);
 +
        return 0;
  
  out_remove_scsi_host:
  #include <linux/rcupdate.h>
  #include <linux/profile.h>
  #include <linux/notifier.h>
 +#include <linux/mutex.h>
 +#include <linux/delay.h>
 +#include <linux/swap.h>
 +#include <linux/fs.h>
 +#include <linux/cpuset.h>
 +#include <linux/vmpressure.h>
 +#include <linux/zcache.h>
+ #include <linux/circ_buf.h>
+ #include <linux/proc_fs.h>
+ #include <linux/slab.h>
  
  #define CREATE_TRACE_POINTS
 +#include <trace/events/almk.h>
 +
 +#ifdef CONFIG_HIGHMEM
 +#define _ZONE ZONE_HIGHMEM
 +#else
 +#define _ZONE ZONE_NORMAL
 +#endif
 +
 +#define CREATE_TRACE_POINTS
  #include "trace/lowmemorykiller.h"
  
  static uint32_t lowmem_debug_level = 1;
@@@ -77,7 -64,6 +80,7 @@@ static int lowmem_minfree[6] = 
        16 * 1024,      /* 64MB */
  };
  static int lowmem_minfree_size = 4;
 +static int lmk_fast_run = 1;
  
  static unsigned long lowmem_deathpending_timeout;
  
                        pr_info(x);                     \
        } while (0)
  
+ static DECLARE_WAIT_QUEUE_HEAD(event_wait);
+ static DEFINE_SPINLOCK(lmk_event_lock);
+ static struct circ_buf event_buffer;
+ #define MAX_BUFFERED_EVENTS 8
+ #define MAX_TASKNAME 128
+ struct lmk_event {
+       char taskname[MAX_TASKNAME];
+       pid_t pid;
+       uid_t uid;
+       pid_t group_leader_pid;
+       unsigned long min_flt;
+       unsigned long maj_flt;
+       unsigned long rss_in_pages;
+       short oom_score_adj;
+       short min_score_adj;
+       unsigned long long start_time;
+       struct list_head list;
+ };
+ void handle_lmk_event(struct task_struct *selected, int selected_tasksize,
+                     short min_score_adj)
+ {
+       int head;
+       int tail;
+       struct lmk_event *events;
+       struct lmk_event *event;
+       int res;
+       char taskname[MAX_TASKNAME];
+       res = get_cmdline(selected, taskname, MAX_TASKNAME - 1);
+       /* No valid process name means this is definitely not associated with a
+        * userspace activity.
+        */
+       if (res <= 0 || res >= MAX_TASKNAME)
+               return;
+       taskname[res] = '\0';
+       spin_lock(&lmk_event_lock);
+       head = event_buffer.head;
+       tail = READ_ONCE(event_buffer.tail);
+       /* Do not continue to log if no space remains in the buffer. */
+       if (CIRC_SPACE(head, tail, MAX_BUFFERED_EVENTS) < 1) {
+               spin_unlock(&lmk_event_lock);
+               return;
+       }
+       events = (struct lmk_event *) event_buffer.buf;
+       event = &events[head];
+       memcpy(event->taskname, taskname, res + 1);
+       event->pid = selected->pid;
+       event->uid = from_kuid_munged(current_user_ns(), task_uid(selected));
+       if (selected->group_leader)
+               event->group_leader_pid = selected->group_leader->pid;
+       else
+               event->group_leader_pid = -1;
+       event->min_flt = selected->min_flt;
+       event->maj_flt = selected->maj_flt;
+       event->oom_score_adj = selected->signal->oom_score_adj;
+       event->start_time = nsec_to_clock_t(selected->real_start_time);
+       event->rss_in_pages = selected_tasksize;
+       event->min_score_adj = min_score_adj;
+       event_buffer.head = (head + 1) & (MAX_BUFFERED_EVENTS - 1);
+       spin_unlock(&lmk_event_lock);
+       wake_up_interruptible(&event_wait);
+ }
+ static int lmk_event_show(struct seq_file *s, void *unused)
+ {
+       struct lmk_event *events = (struct lmk_event *) event_buffer.buf;
+       int head;
+       int tail;
+       struct lmk_event *event;
+       spin_lock(&lmk_event_lock);
+       head = event_buffer.head;
+       tail = event_buffer.tail;
+       if (head == tail) {
+               spin_unlock(&lmk_event_lock);
+               return -EAGAIN;
+       }
+       event = &events[tail];
+       seq_printf(s, "%lu %lu %lu %lu %lu %lu %hd %hd %llu\n%s\n",
+               (unsigned long) event->pid, (unsigned long) event->uid,
+               (unsigned long) event->group_leader_pid, event->min_flt,
+               event->maj_flt, event->rss_in_pages, event->oom_score_adj,
+               event->min_score_adj, event->start_time, event->taskname);
+       event_buffer.tail = (tail + 1) & (MAX_BUFFERED_EVENTS - 1);
+       spin_unlock(&lmk_event_lock);
+       return 0;
+ }
+ static unsigned int lmk_event_poll(struct file *file, poll_table *wait)
+ {
+       int ret = 0;
+       poll_wait(file, &event_wait, wait);
+       spin_lock(&lmk_event_lock);
+       if (event_buffer.head != event_buffer.tail)
+               ret = POLLIN;
+       spin_unlock(&lmk_event_lock);
+       return ret;
+ }
+ static int lmk_event_open(struct inode *inode, struct file *file)
+ {
+       return single_open(file, lmk_event_show, inode->i_private);
+ }
+ static const struct file_operations event_file_ops = {
+       .open = lmk_event_open,
+       .poll = lmk_event_poll,
+       .read = seq_read
+ };
+ static void lmk_event_init(void)
+ {
+       struct proc_dir_entry *entry;
+       event_buffer.head = 0;
+       event_buffer.tail = 0;
+       event_buffer.buf = kmalloc(
+               sizeof(struct lmk_event) * MAX_BUFFERED_EVENTS, GFP_KERNEL);
+       if (!event_buffer.buf)
+               return;
+       entry = proc_create("lowmemorykiller", 0, NULL, &event_file_ops);
+       if (!entry)
+               pr_err("error creating kernel lmk event file\n");
+ }
  static unsigned long lowmem_count(struct shrinker *s,
                                  struct shrink_control *sc)
  {
                global_page_state(NR_INACTIVE_FILE);
  }
  
 +static atomic_t shift_adj = ATOMIC_INIT(0);
 +static short adj_max_shift = 353;
 +module_param_named(adj_max_shift, adj_max_shift, short,
 +                   S_IRUGO | S_IWUSR);
 +
 +/* User knob to enable/disable adaptive lmk feature */
 +static int enable_adaptive_lmk;
 +module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int,
 +                 S_IRUGO | S_IWUSR);
 +
 +/*
 + * This parameter controls the behaviour of LMK when vmpressure is in
 + * the range of 90-94. Adaptive lmk triggers based on number of file
 + * pages wrt vmpressure_file_min, when vmpressure is in the range of
 + * 90-94. Usually this is a pseudo minfree value, higher than the
 + * highest configured value in minfree array.
 + */
 +static int vmpressure_file_min;
 +module_param_named(vmpressure_file_min, vmpressure_file_min, int,
 +                 S_IRUGO | S_IWUSR);
 +
 +enum {
 +      VMPRESSURE_NO_ADJUST = 0,
 +      VMPRESSURE_ADJUST_ENCROACH,
 +      VMPRESSURE_ADJUST_NORMAL,
 +};
 +
 +int adjust_minadj(short *min_score_adj)
 +{
 +      int ret = VMPRESSURE_NO_ADJUST;
 +
 +      if (!enable_adaptive_lmk)
 +              return 0;
 +
 +      if (atomic_read(&shift_adj) &&
 +          (*min_score_adj > adj_max_shift)) {
 +              if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1)
 +                      ret = VMPRESSURE_ADJUST_ENCROACH;
 +              else
 +                      ret = VMPRESSURE_ADJUST_NORMAL;
 +              *min_score_adj = adj_max_shift;
 +      }
 +      atomic_set(&shift_adj, 0);
 +
 +      return ret;
 +}
 +
 +static int lmk_vmpressure_notifier(struct notifier_block *nb,
 +                                 unsigned long action, void *data)
 +{
 +      int other_free = 0, other_file = 0;
 +      unsigned long pressure = action;
 +      int array_size = ARRAY_SIZE(lowmem_adj);
 +
 +      if (!enable_adaptive_lmk)
 +              return 0;
 +
 +      if (pressure >= 95) {
 +              other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
 +                      global_page_state(NR_SHMEM) -
 +                      total_swapcache_pages();
 +              other_free = global_page_state(NR_FREE_PAGES);
 +
 +              atomic_set(&shift_adj, 1);
 +              trace_almk_vmpressure(pressure, other_free, other_file);
 +      } else if (pressure >= 90) {
 +              if (lowmem_adj_size < array_size)
 +                      array_size = lowmem_adj_size;
 +              if (lowmem_minfree_size < array_size)
 +                      array_size = lowmem_minfree_size;
 +
 +              other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
 +                      global_page_state(NR_SHMEM) -
 +                      total_swapcache_pages();
 +
 +              other_free = global_page_state(NR_FREE_PAGES);
 +
 +              if ((other_free < lowmem_minfree[array_size - 1]) &&
 +                  (other_file < vmpressure_file_min)) {
 +                      atomic_set(&shift_adj, 1);
 +                      trace_almk_vmpressure(pressure, other_free, other_file);
 +              }
 +      } else if (atomic_read(&shift_adj)) {
 +              other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
 +                      global_page_state(NR_SHMEM) -
 +                      total_swapcache_pages();
 +              other_free = global_page_state(NR_FREE_PAGES);
 +
 +              /*
 +               * shift_adj would have been set by a previous invocation
 +               * of notifier, which is not followed by a lowmem_shrink yet.
 +               * Since vmpressure has improved, reset shift_adj to avoid
 +               * false adaptive LMK trigger.
 +               */
 +              trace_almk_vmpressure(pressure, other_free, other_file);
 +              atomic_set(&shift_adj, 0);
 +      }
 +
 +      return 0;
 +}
 +
 +static struct notifier_block lmk_vmpr_nb = {
 +      .notifier_call = lmk_vmpressure_notifier,
 +};
 +
 +static int test_task_flag(struct task_struct *p, int flag)
 +{
 +      struct task_struct *t;
 +
 +      for_each_thread(p, t) {
 +              task_lock(t);
 +              if (test_tsk_thread_flag(t, flag)) {
 +                      task_unlock(t);
 +                      return 1;
 +              }
 +              task_unlock(t);
 +      }
 +
 +      return 0;
 +}
 +
 +static int test_task_state(struct task_struct *p, int state)
 +{
 +      struct task_struct *t;
 +
 +      for_each_thread(p, t) {
 +              task_lock(t);
 +              if (t->state & state) {
 +                      task_unlock(t);
 +                      return 1;
 +              }
 +              task_unlock(t);
 +      }
 +
 +      return 0;
 +}
 +
 +static DEFINE_MUTEX(scan_mutex);
 +
 +int can_use_cma_pages(gfp_t gfp_mask)
 +{
 +      int can_use = 0;
 +      int mtype = gfpflags_to_migratetype(gfp_mask);
 +      int i = 0;
 +      int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
 +
 +      if (is_migrate_cma(mtype)) {
 +              can_use = 1;
 +      } else {
 +              for (i = 0;; i++) {
 +                      int fallbacktype = mtype_fallbacks[i];
 +
 +                      if (is_migrate_cma(fallbacktype)) {
 +                              can_use = 1;
 +                              break;
 +                      }
 +
 +                      if (fallbacktype == MIGRATE_TYPES)
 +                              break;
 +              }
 +      }
 +      return can_use;
 +}
 +
 +void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
 +                                      int *other_free, int *other_file,
 +                                      int use_cma_pages)
 +{
 +      struct zone *zone;
 +      struct zoneref *zoneref;
 +      int zone_idx;
 +
 +      for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
 +              zone_idx = zonelist_zone_idx(zoneref);
 +              if (zone_idx == ZONE_MOVABLE) {
 +                      if (!use_cma_pages && other_free)
 +                              *other_free -=
 +                                  zone_page_state(zone, NR_FREE_CMA_PAGES);
 +                      continue;
 +              }
 +
 +              if (zone_idx > classzone_idx) {
 +                      if (other_free != NULL)
 +                              *other_free -= zone_page_state(zone,
 +                                                             NR_FREE_PAGES);
 +                      if (other_file != NULL)
 +                              *other_file -= zone_page_state(zone,
 +                                                             NR_FILE_PAGES)
 +                                      - zone_page_state(zone, NR_SHMEM)
 +                                      - zone_page_state(zone, NR_SWAPCACHE);
 +              } else if (zone_idx < classzone_idx) {
 +                      if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
 +                          other_free) {
 +                              if (!use_cma_pages) {
 +                                      *other_free -= min(
 +                                        zone->lowmem_reserve[classzone_idx] +
 +                                        zone_page_state(
 +                                          zone, NR_FREE_CMA_PAGES),
 +                                        zone_page_state(
 +                                          zone, NR_FREE_PAGES));
 +                              } else {
 +                                      *other_free -=
 +                                        zone->lowmem_reserve[classzone_idx];
 +                              }
 +                      } else {
 +                              if (other_free)
 +                                      *other_free -=
 +                                        zone_page_state(zone, NR_FREE_PAGES);
 +                      }
 +              }
 +      }
 +}
 +
 +#ifdef CONFIG_HIGHMEM
 +void adjust_gfp_mask(gfp_t *gfp_mask)
 +{
 +      struct zone *preferred_zone;
 +      struct zonelist *zonelist;
 +      enum zone_type high_zoneidx;
 +
 +      if (current_is_kswapd()) {
 +              zonelist = node_zonelist(0, *gfp_mask);
 +              high_zoneidx = gfp_zone(*gfp_mask);
 +              first_zones_zonelist(zonelist, high_zoneidx, NULL,
 +                                   &preferred_zone);
 +
 +              if (high_zoneidx == ZONE_NORMAL) {
 +                      if (zone_watermark_ok_safe(
 +                                      preferred_zone, 0,
 +                                      high_wmark_pages(preferred_zone), 0))
 +                              *gfp_mask |= __GFP_HIGHMEM;
 +              } else if (high_zoneidx == ZONE_HIGHMEM) {
 +                      *gfp_mask |= __GFP_HIGHMEM;
 +              }
 +      }
 +}
 +#else
 +void adjust_gfp_mask(gfp_t *unused)
 +{
 +}
 +#endif
 +
 +void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
 +{
 +      gfp_t gfp_mask;
 +      struct zone *preferred_zone;
 +      struct zonelist *zonelist;
 +      enum zone_type high_zoneidx, classzone_idx;
 +      unsigned long balance_gap;
 +      int use_cma_pages;
 +
 +      gfp_mask = sc->gfp_mask;
 +      adjust_gfp_mask(&gfp_mask);
 +
 +      zonelist = node_zonelist(0, gfp_mask);
 +      high_zoneidx = gfp_zone(gfp_mask);
 +      first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
 +      classzone_idx = zone_idx(preferred_zone);
 +      use_cma_pages = can_use_cma_pages(gfp_mask);
 +
 +      balance_gap = min(low_wmark_pages(preferred_zone),
 +                        (preferred_zone->present_pages +
 +                         KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
 +                         KSWAPD_ZONE_BALANCE_GAP_RATIO);
 +
 +      if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
 +                        high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
 +                        balance_gap, 0, 0))) {
 +              if (lmk_fast_run)
 +                      tune_lmk_zone_param(zonelist, classzone_idx, other_free,
 +                                     other_file, use_cma_pages);
 +              else
 +                      tune_lmk_zone_param(zonelist, classzone_idx, other_free,
 +                                     NULL, use_cma_pages);
 +
 +              if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
 +                      if (!use_cma_pages) {
 +                              *other_free -= min(
 +                                preferred_zone->lowmem_reserve[_ZONE]
 +                                + zone_page_state(
 +                                  preferred_zone, NR_FREE_CMA_PAGES),
 +                                zone_page_state(
 +                                  preferred_zone, NR_FREE_PAGES));
 +                      } else {
 +                              *other_free -=
 +                                preferred_zone->lowmem_reserve[_ZONE];
 +                      }
 +              } else {
 +                      *other_free -= zone_page_state(preferred_zone,
 +                                                    NR_FREE_PAGES);
 +              }
 +
 +              lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
 +                           "ofree %d, %d\n", *other_free, *other_file);
 +      } else {
 +              tune_lmk_zone_param(zonelist, classzone_idx, other_free,
 +                             other_file, use_cma_pages);
 +
 +              if (!use_cma_pages) {
 +                      *other_free -=
 +                        zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
 +              }
 +
 +              lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
 +                           "%d\n", *other_free, *other_file);
 +      }
 +}
 +
  static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
  {
        struct task_struct *tsk;
        unsigned long rem = 0;
        int tasksize;
        int i;
 +      int ret = 0;
        short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
        int minfree = 0;
        int selected_tasksize = 0;
        short selected_oom_score_adj;
        int array_size = ARRAY_SIZE(lowmem_adj);
 -      int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
 -      int other_file = global_page_state(NR_FILE_PAGES) -
 +      int other_free;
 +      int other_file;
 +
 +      if (!mutex_trylock(&scan_mutex))
 +              return 0;
 +
 +      other_free = global_page_state(NR_FREE_PAGES);
 +
 +      if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
 +              global_page_state(NR_FILE_PAGES) + zcache_pages())
 +              other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
                                                global_page_state(NR_SHMEM) -
                                                global_page_state(NR_UNEVICTABLE) -
                                                total_swapcache_pages();
 +      else
 +              other_file = 0;
 +
 +      tune_lmk_param(&other_free, &other_file, sc);
  
        if (lowmem_adj_size < array_size)
                array_size = lowmem_adj_size;
                }
        }
  
 +      ret = adjust_minadj(&min_score_adj);
 +
        lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
                        sc->nr_to_scan, sc->gfp_mask, other_free,
                        other_file, min_score_adj);
  
        if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
 +              trace_almk_shrink(0, ret, other_free, other_file, 0);
                lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
                             sc->nr_to_scan, sc->gfp_mask);
 +              mutex_unlock(&scan_mutex);
                return 0;
        }
  
                if (tsk->flags & PF_KTHREAD)
                        continue;
  
 +              /* if task no longer has any memory ignore it */
 +              if (test_task_flag(tsk, TIF_MM_RELEASED))
 +                      continue;
 +
 +              if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
 +                      if (test_task_flag(tsk, TIF_MEMDIE)) {
 +                              rcu_read_unlock();
 +                              mutex_unlock(&scan_mutex);
 +                              return 0;
 +                      }
 +              }
 +
                p = find_lock_task_mm(tsk);
                if (!p)
                        continue;
  
 -              if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
 -                  time_before_eq(jiffies, lowmem_deathpending_timeout)) {
 -                      task_unlock(p);
 -                      rcu_read_unlock();
 -                      return 0;
 -              }
                oom_score_adj = p->signal->oom_score_adj;
                if (oom_score_adj < min_score_adj) {
                        task_unlock(p);
                selected = p;
                selected_tasksize = tasksize;
                selected_oom_score_adj = oom_score_adj;
 -              lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
 +              lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
                             p->comm, p->pid, oom_score_adj, tasksize);
        }
        if (selected) {
 -              long cache_size = other_file * (long)(PAGE_SIZE / 1024);
 -              long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
 -              long free = other_free * (long)(PAGE_SIZE / 1024);
 +              long cache_size, cache_limit, free;
 +
 +              if (test_task_flag(selected, TIF_MEMDIE) &&
 +                  (test_task_state(selected, TASK_UNINTERRUPTIBLE))) {
 +                      lowmem_print(2, "'%s' (%d) is already killed\n",
 +                                   selected->comm,
 +                                   selected->pid);
 +                      rcu_read_unlock();
 +                      mutex_unlock(&scan_mutex);
 +                      return 0;
 +              }
  
                task_lock(selected);
                send_sig(SIGKILL, selected, 0);
                if (selected->mm)
                        mark_oom_victim(selected);
                task_unlock(selected);
 +              cache_size = other_file * (long)(PAGE_SIZE / 1024);
 +              cache_limit = minfree * (long)(PAGE_SIZE / 1024);
 +              free = other_free * (long)(PAGE_SIZE / 1024);
                trace_lowmemory_kill(selected, cache_size, cache_limit, free);
                lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n" \
                                "   to free %ldkB on behalf of '%s' (%d) because\n" \
                                "   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
 -                              "   Free memory is %ldkB above reserved\n",
 +                              "   Free memory is %ldkB above reserved.\n" \
 +                              "   Free CMA is %ldkB\n" \
 +                              "   Total reserve is %ldkB\n" \
 +                              "   Total free pages is %ldkB\n" \
 +                              "   Total file cache is %ldkB\n" \
 +                              "   Total zcache is %ldkB\n" \
 +                              "   GFP mask is 0x%x\n",
                             selected->comm, selected->pid, selected->tgid,
                             selected_oom_score_adj,
                             selected_tasksize * (long)(PAGE_SIZE / 1024),
                             current->comm, current->pid,
                             cache_size, cache_limit,
                             min_score_adj,
 -                           free);
 +                           free,
 +                           global_page_state(NR_FREE_CMA_PAGES) *
 +                              (long)(PAGE_SIZE / 1024),
 +                           totalreserve_pages * (long)(PAGE_SIZE / 1024),
 +                           global_page_state(NR_FREE_PAGES) *
 +                              (long)(PAGE_SIZE / 1024),
 +                           global_page_state(NR_FILE_PAGES) *
 +                              (long)(PAGE_SIZE / 1024),
 +                           (long)zcache_pages() * (long)(PAGE_SIZE / 1024),
 +                           sc->gfp_mask);
 +
 +              if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
 +                      show_mem(SHOW_MEM_FILTER_NODES);
 +                      dump_tasks(NULL, NULL);
 +              }
 +
                lowmem_deathpending_timeout = jiffies + HZ;
                rem += selected_tasksize;
 +              rcu_read_unlock();
+               get_task_struct(selected);
 +              /* give the system time to free up the memory */
 +              msleep_interruptible(20);
 +              trace_almk_shrink(selected_tasksize, ret,
 +                                other_free, other_file,
 +                                selected_oom_score_adj);
 +      } else {
 +              trace_almk_shrink(1, ret, other_free, other_file, 0);
 +              rcu_read_unlock();
        }
  
        lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
                     sc->nr_to_scan, sc->gfp_mask, rem);
 -      rcu_read_unlock();
 +      mutex_unlock(&scan_mutex);
+       if (selected) {
+               handle_lmk_event(selected, selected_tasksize, min_score_adj);
+               put_task_struct(selected);
+       }
        return rem;
  }
  
  static struct shrinker lowmem_shrinker = {
        .scan_objects = lowmem_scan,
        .count_objects = lowmem_count,
 -      .seeks = DEFAULT_SEEKS * 16
 +      .seeks = DEFAULT_SEEKS * 16,
 +      .flags = SHRINKER_LMK
  };
  
  static int __init lowmem_init(void)
  {
        register_shrinker(&lowmem_shrinker);
 +      vmpressure_notifier_register(&lmk_vmpr_nb);
+       lmk_event_init();
        return 0;
  }
  device_initcall(lowmem_init);
@@@ -697,5 -462,4 +854,5 @@@ module_param_array_named(adj, lowmem_ad
  module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
                         S_IRUGO | S_IWUSR);
  module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
 +module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
  
diff --combined kernel/cpu.c
@@@ -92,11 -92,6 +92,11 @@@ static struct 
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  
 +void cpu_hotplug_mutex_held(void)
 +{
 +      lockdep_assert_held(&cpu_hotplug.lock);
 +}
 +EXPORT_SYMBOL(cpu_hotplug_mutex_held);
  
  void get_online_cpus(void)
  {
@@@ -373,9 -368,6 +373,9 @@@ static int _cpu_down(unsigned int cpu, 
        if (!cpu_online(cpu))
                return -EINVAL;
  
 +      if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
 +              return -EBUSY;
 +
        cpu_hotplug_begin();
  
        err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
@@@ -536,8 -528,8 +536,8 @@@ static int _cpu_up(unsigned int cpu, in
        ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
        if (ret) {
                nr_calls--;
 -              pr_warn("%s: attempt to bring up CPU %u failed\n",
 -                      __func__, cpu);
 +              pr_warn_ratelimited("%s: attempt to bring up CPU %u failed\n",
 +                                  __func__, cpu);
                goto out_notify;
        }
  
@@@ -561,41 -553,9 +561,41 @@@ out
        return ret;
  }
  
 +static int switch_to_rt_policy(void)
 +{
 +      struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 +      unsigned int policy = current->policy;
 +      int err;
 +
 +      /* Nobody should be attempting hotplug from these policy contexts. */
 +      if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
 +                                      policy == SCHED_DEADLINE)
 +              return -EPERM;
 +
 +      if (policy == SCHED_FIFO || policy == SCHED_RR)
 +              return 1;
 +
 +      /* Only SCHED_NORMAL left. */
 +      err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
 +      return err;
 +
 +}
 +
 +static int switch_to_fair_policy(void)
 +{
 +      struct sched_param param = { .sched_priority = 0 };
 +
 +      return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
 +}
 +
  int cpu_up(unsigned int cpu)
  {
        int err = 0;
 +      int switch_err = 0;
 +
 +      switch_err = switch_to_rt_policy();
 +      if (switch_err < 0)
 +              return switch_err;
  
        if (!cpu_possible(cpu)) {
                pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
  
  out:
        cpu_maps_update_done();
 +
 +      if (!switch_err) {
 +              switch_err = switch_to_fair_policy();
 +              if (switch_err)
 +                      pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
 +                              switch_err, current->comm, current->pid);
 +      }
 +
        return err;
  }
  EXPORT_SYMBOL_GPL(cpu_up);
@@@ -853,10 -805,6 +853,10 @@@ static DECLARE_BITMAP(cpu_active_bits, 
  const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
  EXPORT_SYMBOL(cpu_active_mask);
  
 +static DECLARE_BITMAP(cpu_isolated_bits, CONFIG_NR_CPUS) __read_mostly;
 +const struct cpumask *const cpu_isolated_mask = to_cpumask(cpu_isolated_bits);
 +EXPORT_SYMBOL(cpu_isolated_mask);
 +
  void set_cpu_possible(unsigned int cpu, bool possible)
  {
        if (possible)
@@@ -891,14 -839,6 +891,14 @@@ void set_cpu_active(unsigned int cpu, b
                cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
  }
  
 +void set_cpu_isolated(unsigned int cpu, bool isolated)
 +{
 +      if (isolated)
 +              cpumask_set_cpu(cpu, to_cpumask(cpu_isolated_bits));
 +      else
 +              cpumask_clear_cpu(cpu, to_cpumask(cpu_isolated_bits));
 +}
 +
  void init_cpu_present(const struct cpumask *src)
  {
        cpumask_copy(to_cpumask(cpu_present_bits), src);
@@@ -914,11 -854,6 +914,11 @@@ void init_cpu_online(const struct cpuma
        cpumask_copy(to_cpumask(cpu_online_bits), src);
  }
  
 +void init_cpu_isolated(const struct cpumask *src)
 +{
 +      cpumask_copy(to_cpumask(cpu_isolated_bits), src);
 +}
 +
  enum cpu_mitigations cpu_mitigations = CPU_MITIGATIONS_AUTO;
  
  static int __init mitigations_parse_cmdline(char *arg)
                cpu_mitigations = CPU_MITIGATIONS_OFF;
        else if (!strcmp(arg, "auto"))
                cpu_mitigations = CPU_MITIGATIONS_AUTO;
+       else
+               pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
+                       arg);
  
        return 0;
  }
diff --combined kernel/trace/trace.c
@@@ -41,7 -41,6 +41,7 @@@
  #include <linux/nmi.h>
  #include <linux/fs.h>
  #include <linux/sched/rt.h>
 +#include <linux/coresight-stm.h>
  
  #include "trace.h"
  #include "trace_output.h"
@@@ -574,11 -573,8 +574,11 @@@ int __trace_puts(unsigned long ip, cons
        if (entry->buf[size - 1] != '\n') {
                entry->buf[size] = '\n';
                entry->buf[size + 1] = '\0';
 -      } else
 +              stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 2);
 +      } else {
                entry->buf[size] = '\0';
 +              stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 1);
 +      }
  
        __buffer_unlock_commit(buffer, event);
        ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@@ -619,7 -615,6 +619,7 @@@ int __trace_bputs(unsigned long ip, con
        entry = ring_buffer_event_data(event);
        entry->ip                       = ip;
        entry->str                      = str;
 +      stm_log(OST_ENTITY_TRACE_PRINTK, entry->str, strlen(entry->str)+1);
  
        __buffer_unlock_commit(buffer, event);
        ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@@ -1645,7 -1640,7 +1645,7 @@@ static void __trace_find_cmdline(int pi
  
        map = savedcmd->map_pid_to_cmdline[pid];
        if (map != NO_CMDLINE_MAP)
 -              strcpy(comm, get_saved_cmdlines(map));
 +              strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN - 1);
        else
                strcpy(comm, "<...>");
  }
@@@ -2295,7 -2290,6 +2295,7 @@@ __trace_array_vprintk(struct ring_buffe
  
        memcpy(&entry->buf, tbuffer, len + 1);
        if (!call_filter_check_discard(call, entry, buffer, event)) {
 +              stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
                __buffer_unlock_commit(buffer, event);
                ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
        }
@@@ -5362,11 -5356,8 +5362,11 @@@ tracing_mark_write(struct file *filp, c
        if (entry->buf[cnt - 1] != '\n') {
                entry->buf[cnt] = '\n';
                entry->buf[cnt + 1] = '\0';
 -      } else
 +              stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
 +      } else {
                entry->buf[cnt] = '\0';
 +              stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
 +      }
  
        __buffer_unlock_commit(buffer, event);
  
@@@ -7353,12 -7344,8 +7353,8 @@@ void ftrace_dump(enum ftrace_dump_mode 
  
                cnt++;
  
-               /* reset all but tr, trace, and overruns */
-               memset(&iter.seq, 0,
-                      sizeof(struct trace_iterator) -
-                      offsetof(struct trace_iterator, seq));
+               trace_iterator_reset(&iter);
                iter.iter_flags |= TRACE_FILE_LAT_FMT;
-               iter.pos = -1;
  
                if (trace_find_next_entry_inc(&iter) != NULL) {
                        int ret;
diff --combined net/bluetooth/hci_conn.c
@@@ -141,7 -141,7 +141,7 @@@ static void le_scan_cleanup(struct work
        struct hci_dev *hdev = conn->hdev;
        struct hci_conn *c = NULL;
  
 -      BT_DBG("%s hcon %p", hdev->name, conn);
 +      BT_DBG("%s hcon %pK", hdev->name, conn);
  
        hci_dev_lock(hdev);
  
  
  static void hci_connect_le_scan_remove(struct hci_conn *conn)
  {
 -      BT_DBG("%s hcon %p", conn->hdev->name, conn);
 +      BT_DBG("%s hcon %pK", conn->hdev->name, conn);
  
        /* We can't call hci_conn_del/hci_conn_cleanup here since that
         * could deadlock with another hci_conn_del() call that's holding
@@@ -187,7 -187,7 +187,7 @@@ static void hci_acl_create_connection(s
        struct inquiry_entry *ie;
        struct hci_cp_create_conn cp;
  
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        conn->state = BT_CONNECT;
        conn->out = true;
  
  int hci_disconnect(struct hci_conn *conn, __u8 reason)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        /* When we are master of an established connection and it enters
         * the disconnect timeout, then go ahead and try to read the
@@@ -251,7 -251,7 +251,7 @@@ static void hci_add_sco(struct hci_con
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_add_sco cp;
  
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        conn->state = BT_CONNECT;
        conn->out = true;
@@@ -270,7 -270,7 +270,7 @@@ bool hci_setup_sync(struct hci_conn *co
        struct hci_cp_setup_sync_conn cp;
        const struct sco_param *param;
  
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        conn->state = BT_CONNECT;
        conn->out = true;
@@@ -356,7 -356,7 +356,7 @@@ void hci_le_start_enc(struct hci_conn *
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_le_start_enc cp;
  
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        memset(&cp, 0, sizeof(cp));
  
@@@ -376,7 -376,7 +376,7 @@@ void hci_sco_setup(struct hci_conn *con
        if (!sco)
                return;
  
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        if (!status) {
                if (lmp_esco_capable(conn->hdev))
@@@ -395,7 -395,7 +395,7 @@@ static void hci_conn_timeout(struct wor
                                             disc_work.work);
        int refcnt = atomic_read(&conn->refcnt);
  
 -      BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
 +      BT_DBG("hcon %pK state %s", conn, state_to_string(conn->state));
  
        WARN_ON(refcnt < 0);
  
@@@ -426,7 -426,7 +426,7 @@@ static void hci_conn_idle(struct work_s
                                             idle_work.work);
        struct hci_dev *hdev = conn->hdev;
  
 -      BT_DBG("hcon %p mode %d", conn, conn->mode);
 +      BT_DBG("hcon %pK mode %d", conn, conn->mode);
  
        if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
                return;
@@@ -566,7 -566,7 +566,7 @@@ int hci_conn_del(struct hci_conn *conn
  {
        struct hci_dev *hdev = conn->hdev;
  
 -      BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
 +      BT_DBG("%s hcon %pK handle %d", hdev->name, conn, conn->handle);
  
        cancel_delayed_work_sync(&conn->disc_work);
        cancel_delayed_work_sync(&conn->auto_accept_work);
@@@ -1160,7 -1160,7 +1160,7 @@@ struct hci_conn *hci_connect_sco(struc
  /* Check link security requirement */
  int hci_conn_check_link_mode(struct hci_conn *conn)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        /* In Secure Connections Only mode, it is required that Secure
         * Connections is used and the link is encrypted with AES-CCM
  /* Authenticate remote device */
  static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        if (conn->pending_sec_level > sec_level)
                sec_level = conn->pending_sec_level;
  /* Encrypt the the link */
  static void hci_conn_encrypt(struct hci_conn *conn)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
                struct hci_cp_set_conn_encrypt cp;
  int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
                      bool initiator)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        if (conn->type == LE_LINK)
                return smp_conn_security(conn, sec_level);
@@@ -1293,8 -1293,16 +1293,16 @@@ auth
                return 0;
  
  encrypt:
-       if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+       if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
+               /* Ensure that the encryption key size has been read,
+                * otherwise stall the upper layer responses.
+                */
+               if (!conn->enc_key_size)
+                       return 0;
+               /* Nothing else needed, all requirements are met */
                return 1;
+       }
  
        hci_conn_encrypt(conn);
        return 0;
@@@ -1304,7 -1312,7 +1312,7 @@@ EXPORT_SYMBOL(hci_conn_security)
  /* Check secure link requirement */
  int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        /* Accept if non-secure or higher security level is required */
        if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
@@@ -1323,7 -1331,7 +1331,7 @@@ EXPORT_SYMBOL(hci_conn_check_secure)
  /* Switch role */
  int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
  {
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        if (role == conn->role)
                return 1;
@@@ -1344,7 -1352,7 +1352,7 @@@ void hci_conn_enter_active_mode(struct 
  {
        struct hci_dev *hdev = conn->hdev;
  
 -      BT_DBG("hcon %p mode %d", conn, conn->mode);
 +      BT_DBG("hcon %pK mode %d", conn, conn->mode);
  
        if (conn->mode != HCI_CM_SNIFF)
                goto timer;
@@@ -1524,7 -1532,7 +1532,7 @@@ struct hci_chan *hci_chan_create(struc
        struct hci_dev *hdev = conn->hdev;
        struct hci_chan *chan;
  
 -      BT_DBG("%s hcon %p", hdev->name, conn);
 +      BT_DBG("%s hcon %pK", hdev->name, conn);
  
        if (test_bit(HCI_CONN_DROP, &conn->flags)) {
                BT_DBG("Refusing to create new hci_chan");
@@@ -1549,7 -1557,7 +1557,7 @@@ void hci_chan_del(struct hci_chan *chan
        struct hci_conn *conn = chan->conn;
        struct hci_dev *hdev = conn->hdev;
  
 -      BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
 +      BT_DBG("%s hcon %pK chan %pK", hdev->name, conn, chan);
  
        list_del_rcu(&chan->list);
  
@@@ -1568,7 -1576,7 +1576,7 @@@ void hci_chan_list_flush(struct hci_con
  {
        struct hci_chan *chan, *n;
  
 -      BT_DBG("hcon %p", conn);
 +      BT_DBG("hcon %pK", conn);
  
        list_for_each_entry_safe(chan, n, &conn->chan_list, list)
                hci_chan_del(chan);
@@@ -249,7 -249,7 +249,7 @@@ static u16 l2cap_alloc_cid(struct l2cap
  
  static void l2cap_state_change(struct l2cap_chan *chan, int state)
  {
 -      BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
 +      BT_DBG("chan %pK %s -> %s", chan, state_to_string(chan->state),
               state_to_string(state));
  
        chan->state = state;
@@@ -400,7 -400,7 +400,7 @@@ static void l2cap_chan_timeout(struct w
        struct l2cap_conn *conn = chan->conn;
        int reason;
  
 -      BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 +      BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
  
        mutex_lock(&conn->chan_lock);
        l2cap_chan_lock(chan);
@@@ -449,7 -449,7 +449,7 @@@ struct l2cap_chan *l2cap_chan_create(vo
        /* This flag is cleared in l2cap_chan_ready() */
        set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        return chan;
  }
@@@ -459,7 -459,7 +459,7 @@@ static void l2cap_chan_destroy(struct k
  {
        struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        write_lock(&chan_list_lock);
        list_del(&chan->global_l);
  
  void l2cap_chan_hold(struct l2cap_chan *c)
  {
 -      BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
 +      BT_DBG("chan %pK orig refcnt %d", c, atomic_read(&c->kref.refcount));
  
        kref_get(&c->kref);
  }
  
  void l2cap_chan_put(struct l2cap_chan *c)
  {
 -      BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
 +      BT_DBG("chan %pK orig refcnt %d", c, atomic_read(&c->kref.refcount));
  
        kref_put(&c->kref, l2cap_chan_destroy);
  }
@@@ -516,7 -516,7 +516,7 @@@ static void l2cap_le_flowctl_init(struc
  
  void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
  {
 -      BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
 +      BT_DBG("conn %pK, psm 0x%2.2x, dcid 0x%4.4x", conn,
               __le16_to_cpu(chan->psm), chan->dcid);
  
        conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@@ -579,7 -579,7 +579,7 @@@ void l2cap_chan_del(struct l2cap_chan *
  
        __clear_chan_timer(chan);
  
 -      BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
 +      BT_DBG("chan %pK, conn %pK, err %d, state %s", chan, conn, err,
               state_to_string(chan->state));
  
        chan->ops->teardown(chan, err);
        if (chan->hs_hchan) {
                struct hci_chan *hs_hchan = chan->hs_hchan;
  
 -              BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
 +              BT_DBG("chan %pK disconnect hs_hchan %pK", chan, hs_hchan);
                amp_disconnect_logical_link(hs_hchan);
        }
  
@@@ -711,7 -711,7 +711,7 @@@ void l2cap_chan_close(struct l2cap_cha
  {
        struct l2cap_conn *conn = chan->conn;
  
 -      BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 +      BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
  
        switch (chan->state) {
        case BT_LISTEN:
@@@ -874,7 -874,7 +874,7 @@@ static void l2cap_do_send(struct l2cap_
        struct hci_conn *hcon = chan->conn->hcon;
        u16 flags;
  
 -      BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
 +      BT_DBG("chan %pK, skb %pK len %d priority %u", chan, skb, skb->len,
               skb->priority);
  
        if (chan->hs_hcon && !__chan_is_moving(chan)) {
@@@ -1061,7 -1061,7 +1061,7 @@@ static void l2cap_send_sframe(struct l2
        struct sk_buff *skb;
        u32 control_field;
  
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
  
        if (!control->sframe)
                return;
@@@ -1100,7 -1100,7 +1100,7 @@@ static void l2cap_send_rr_or_rnr(struc
  {
        struct l2cap_ctrl control;
  
 -      BT_DBG("chan %p, poll %d", chan, poll);
 +      BT_DBG("chan %pK, poll %d", chan, poll);
  
        memset(&control, 0, sizeof(control));
        control.sframe = 1;
@@@ -1189,7 -1189,7 +1189,7 @@@ static void l2cap_move_setup(struct l2c
  {
        struct sk_buff *skb;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        if (chan->mode != L2CAP_MODE_ERTM)
                return;
  static void l2cap_move_done(struct l2cap_chan *chan)
  {
        u8 move_role = chan->move_role;
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        chan->move_state = L2CAP_MOVE_STABLE;
        chan->move_role = L2CAP_MOVE_ROLE_NONE;
@@@ -1302,7 -1302,7 +1302,7 @@@ static void l2cap_le_start(struct l2cap
  static void l2cap_start_connection(struct l2cap_chan *chan)
  {
        if (__amp_capable(chan)) {
 -              BT_DBG("chan %p AMP capable: discover AMPs", chan);
 +              BT_DBG("chan %pK AMP capable: discover AMPs", chan);
                a2mp_discover_amp(chan);
        } else if (chan->conn->hcon->type == LE_LINK) {
                l2cap_le_start(chan);
@@@ -1329,6 -1329,21 +1329,21 @@@ static void l2cap_request_info(struct l
                       sizeof(req), &req);
  }
  
+ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
+ {
+       /* The minimum encryption key size needs to be enforced by the
+        * host stack before establishing any L2CAP connections. The
+        * specification in theory allows a minimum of 1, but to align
+        * BR/EDR and LE transports, a minimum of 7 is chosen.
+        *
+        * This check might also be called for unencrypted connections
+        * that have no key size requirements. Ensure that the link is
+        * actually encrypted before enforcing a key size.
+        */
+       return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
+               hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
+ }
  static void l2cap_do_start(struct l2cap_chan *chan)
  {
        struct l2cap_conn *conn = chan->conn;
        if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
                return;
  
-       if (l2cap_chan_check_security(chan, true) &&
-           __l2cap_no_conn_pending(chan))
+       if (!l2cap_chan_check_security(chan, true) ||
+           !__l2cap_no_conn_pending(chan))
+               return;
+       if (l2cap_check_enc_key_size(conn->hcon))
                l2cap_start_connection(chan);
+       else
+               __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
  }
  
  static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
@@@ -1399,7 -1419,7 +1419,7 @@@ static void l2cap_conn_start(struct l2c
  {
        struct l2cap_chan *chan, *tmp;
  
 -      BT_DBG("conn %p", conn);
 +      BT_DBG("conn %pK", conn);
  
        mutex_lock(&conn->chan_lock);
  
                                continue;
                        }
  
-                       l2cap_start_connection(chan);
+                       if (l2cap_check_enc_key_size(conn->hcon))
+                               l2cap_start_connection(chan);
+                       else
+                               l2cap_chan_close(chan, ECONNREFUSED);
  
                } else if (chan->state == BT_CONNECT2) {
                        struct l2cap_conn_rsp rsp;
@@@ -1477,7 -1500,7 +1500,7 @@@ static void l2cap_le_conn_ready(struct 
        struct hci_conn *hcon = conn->hcon;
        struct hci_dev *hdev = hcon->hdev;
  
 -      BT_DBG("%s conn %p", hdev->name, conn);
 +      BT_DBG("%s conn %pK", hdev->name, conn);
  
        /* For outgoing pairing which doesn't necessarily have an
         * associated socket (e.g. mgmt_pair_device).
@@@ -1510,7 -1533,7 +1533,7 @@@ static void l2cap_conn_ready(struct l2c
        struct l2cap_chan *chan;
        struct hci_conn *hcon = conn->hcon;
  
 -      BT_DBG("conn %p", conn);
 +      BT_DBG("conn %pK", conn);
  
        if (hcon->type == ACL_LINK)
                l2cap_request_info(conn);
@@@ -1551,7 -1574,7 +1574,7 @@@ static void l2cap_conn_unreliable(struc
  {
        struct l2cap_chan *chan;
  
 -      BT_DBG("conn %p", conn);
 +      BT_DBG("conn %pK", conn);
  
        mutex_lock(&conn->chan_lock);
  
@@@ -1661,7 -1684,7 +1684,7 @@@ static void l2cap_conn_del(struct hci_c
        if (!conn)
                return;
  
 -      BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
 +      BT_DBG("hcon %pK conn %pK, err %d", hcon, conn, err);
  
        kfree_skb(conn->rx_skb);
  
@@@ -1789,7 -1812,7 +1812,7 @@@ static void l2cap_monitor_timeout(struc
        struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
                                               monitor_timer.work);
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        l2cap_chan_lock(chan);
  
@@@ -1810,7 -1833,7 +1833,7 @@@ static void l2cap_retrans_timeout(struc
        struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
                                               retrans_timer.work);
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        l2cap_chan_lock(chan);
  
@@@ -1831,7 -1854,7 +1854,7 @@@ static void l2cap_streaming_send(struc
        struct sk_buff *skb;
        struct l2cap_ctrl *control;
  
 -      BT_DBG("chan %p, skbs %p", chan, skbs);
 +      BT_DBG("chan %pK, skbs %pK", chan, skbs);
  
        if (__chan_is_moving(chan))
                return;
@@@ -1870,7 -1893,7 +1893,7 @@@ static int l2cap_ertm_send(struct l2cap
        struct l2cap_ctrl *control;
        int sent = 0;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        if (chan->state != BT_CONNECTED)
                return -ENOTCONN;
@@@ -1941,7 -1964,7 +1964,7 @@@ static void l2cap_ertm_resend(struct l2
        struct sk_buff *tx_skb;
        u16 seq;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
                return;
  static void l2cap_retransmit(struct l2cap_chan *chan,
                             struct l2cap_ctrl *control)
  {
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
  
        l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
        l2cap_ertm_resend(chan);
@@@ -2029,7 -2052,7 +2052,7 @@@ static void l2cap_retransmit_all(struc
  {
        struct sk_buff *skb;
  
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
  
        if (control->poll)
                set_bit(CONN_SEND_FBIT, &chan->conn_state);
@@@ -2065,7 -2088,7 +2088,7 @@@ static void l2cap_send_ack(struct l2cap
                                         chan->last_acked_seq);
        int threshold;
  
 -      BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
 +      BT_DBG("chan %pK last_acked_seq %d buffer_seq %d",
               chan, chan->last_acked_seq, chan->buffer_seq);
  
        memset(&control, 0, sizeof(control));
@@@ -2160,7 -2183,7 +2183,7 @@@ static struct sk_buff *l2cap_create_con
        int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
        struct l2cap_hdr *lh;
  
 -      BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
 +      BT_DBG("chan %pK psm 0x%2.2x len %zu", chan,
               __le16_to_cpu(chan->psm), len);
  
        count = min_t(unsigned int, (conn->mtu - hlen), len);
@@@ -2192,7 -2215,7 +2215,7 @@@ static struct sk_buff *l2cap_create_bas
        int err, count;
        struct l2cap_hdr *lh;
  
 -      BT_DBG("chan %p len %zu", chan, len);
 +      BT_DBG("chan %pK len %zu", chan, len);
  
        count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
  
@@@ -2223,7 -2246,7 +2246,7 @@@ static struct sk_buff *l2cap_create_ifr
        int err, count, hlen;
        struct l2cap_hdr *lh;
  
 -      BT_DBG("chan %p len %zu", chan, len);
 +      BT_DBG("chan %pK len %zu", chan, len);
  
        if (!conn)
                return ERR_PTR(-ENOTCONN);
@@@ -2277,7 -2300,7 +2300,7 @@@ static int l2cap_segment_sdu(struct l2c
        size_t pdu_len;
        u8 sar;
  
 -      BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
 +      BT_DBG("chan %pK, msg %pK, len %zu", chan, msg, len);
  
        /* It is critical that ERTM PDUs fit in a single HCI fragment,
         * so fragmented skbs are not used.  The HCI layer's handling
@@@ -2344,7 -2367,7 +2367,7 @@@ static struct sk_buff *l2cap_create_le_
        int err, count, hlen;
        struct l2cap_hdr *lh;
  
 -      BT_DBG("chan %p len %zu", chan, len);
 +      BT_DBG("chan %pK len %zu", chan, len);
  
        if (!conn)
                return ERR_PTR(-ENOTCONN);
@@@ -2386,7 -2409,7 +2409,7 @@@ static int l2cap_segment_le_sdu(struct 
        size_t pdu_len;
        u16 sdu_len;
  
 -      BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
 +      BT_DBG("chan %pK, msg %pK, len %zu", chan, msg, len);
  
        sdu_len = len;
        pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
@@@ -2552,7 -2575,7 +2575,7 @@@ static void l2cap_send_srej(struct l2ca
        struct l2cap_ctrl control;
        u16 seq;
  
 -      BT_DBG("chan %p, txseq %u", chan, txseq);
 +      BT_DBG("chan %pK, txseq %u", chan, txseq);
  
        memset(&control, 0, sizeof(control));
        control.sframe = 1;
@@@ -2574,7 -2597,7 +2597,7 @@@ static void l2cap_send_srej_tail(struc
  {
        struct l2cap_ctrl control;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
                return;
@@@ -2592,7 -2615,7 +2615,7 @@@ static void l2cap_send_srej_list(struc
        u16 initial_head;
        u16 seq;
  
 -      BT_DBG("chan %p, txseq %u", chan, txseq);
 +      BT_DBG("chan %pK, txseq %u", chan, txseq);
  
        memset(&control, 0, sizeof(control));
        control.sframe = 1;
@@@ -2617,7 -2640,7 +2640,7 @@@ static void l2cap_process_reqseq(struc
        struct sk_buff *acked_skb;
        u16 ackseq;
  
 -      BT_DBG("chan %p, reqseq %u", chan, reqseq);
 +      BT_DBG("chan %pK, reqseq %u", chan, reqseq);
  
        if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
                return;
  
  static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
  {
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        chan->expected_tx_seq = chan->buffer_seq;
        l2cap_seq_list_clear(&chan->srej_list);
@@@ -2658,7 -2681,7 +2681,7 @@@ static void l2cap_tx_state_xmit(struct 
                                struct l2cap_ctrl *control,
                                struct sk_buff_head *skbs, u8 event)
  {
 -      BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
 +      BT_DBG("chan %pK, control %pK, skbs %pK, event %d", chan, control, skbs,
               event);
  
        switch (event) {
@@@ -2730,7 -2753,7 +2753,7 @@@ static void l2cap_tx_state_wait_f(struc
                                  struct l2cap_ctrl *control,
                                  struct sk_buff_head *skbs, u8 event)
  {
 -      BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
 +      BT_DBG("chan %pK, control %pK, skbs %pK, event %d", chan, control, skbs,
               event);
  
        switch (event) {
  static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
                     struct sk_buff_head *skbs, u8 event)
  {
 -      BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
 +      BT_DBG("chan %pK, control %pK, skbs %pK, event %d, state %d",
               chan, control, skbs, event, chan->tx_state);
  
        switch (chan->tx_state) {
  static void l2cap_pass_to_tx(struct l2cap_chan *chan,
                             struct l2cap_ctrl *control)
  {
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
        l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
  }
  
  static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
                                  struct l2cap_ctrl *control)
  {
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
        l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
  }
  
@@@ -2843,7 -2866,7 +2866,7 @@@ static void l2cap_raw_recv(struct l2cap
        struct sk_buff *nskb;
        struct l2cap_chan *chan;
  
 -      BT_DBG("conn %p", conn);
 +      BT_DBG("conn %pK", conn);
  
        mutex_lock(&conn->chan_lock);
  
@@@ -2874,7 -2897,7 +2897,7 @@@ static struct sk_buff *l2cap_build_cmd(
        struct l2cap_hdr *lh;
        int len, count;
  
 -      BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
 +      BT_DBG("conn %pK, code 0x%2.2x, ident 0x%2.2x, len %u",
               conn, code, ident, dlen);
  
        if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
@@@ -3036,7 -3059,7 +3059,7 @@@ static void l2cap_ack_timeout(struct wo
                                               ack_timer.work);
        u16 frames_to_ack;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        l2cap_chan_lock(chan);
  
@@@ -3181,7 -3204,7 +3204,7 @@@ static int l2cap_build_conf_req(struct 
        void *endptr = data + data_size;
        u16 size;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        if (chan->num_conf_req || chan->num_conf_rsp)
                goto done;
@@@ -3311,7 -3334,7 +3334,7 @@@ static int l2cap_parse_conf_req(struct 
        u16 result = L2CAP_CONF_SUCCESS;
        u16 size;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        while (len >= L2CAP_CONF_OPT_SIZE) {
                len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@@ -3531,7 -3554,7 +3554,7 @@@ static int l2cap_parse_conf_rsp(struct 
        struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
        struct l2cap_conf_efs efs;
  
 -      BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 +      BT_DBG("chan %pK, rsp %pK, len %d, req %pK", chan, rsp, len, data);
  
        while (len >= L2CAP_CONF_OPT_SIZE) {
                len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@@ -3644,7 -3667,7 +3667,7 @@@ static int l2cap_build_conf_rsp(struct 
        struct l2cap_conf_rsp *rsp = data;
        void *ptr = rsp->data;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        rsp->scid   = cpu_to_le16(chan->dcid);
        rsp->result = cpu_to_le16(result);
@@@ -3658,7 -3681,7 +3681,7 @@@ void __l2cap_le_connect_rsp_defer(struc
        struct l2cap_le_conn_rsp rsp;
        struct l2cap_conn *conn = chan->conn;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        rsp.dcid    = cpu_to_le16(chan->scid);
        rsp.mtu     = cpu_to_le16(chan->imtu);
@@@ -3687,7 -3710,7 +3710,7 @@@ void __l2cap_connect_rsp_defer(struct l
        else
                rsp_code = L2CAP_CONN_RSP;
  
 -      BT_DBG("chan %p rsp_code %u", chan, rsp_code);
 +      BT_DBG("chan %pK rsp_code %u", chan, rsp_code);
  
        l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
  
@@@ -3715,7 -3738,7 +3738,7 @@@ static void l2cap_conf_rfc_get(struct l
                .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
        };
  
 -      BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
 +      BT_DBG("chan %pK, rsp %pK, len %d", chan, rsp, len);
  
        if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
                return;
@@@ -4023,7 -4046,7 +4046,7 @@@ static void l2cap_send_efs_conf_rsp(str
  {
        struct l2cap_conn *conn = chan->conn;
  
 -      BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
 +      BT_DBG("conn %pK chan %pK ident %d flags 0x%4.4x", conn, chan, ident,
               flags);
  
        clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
@@@ -4520,8 -4543,7 +4543,8 @@@ static int l2cap_create_channel_req(str
                        return 0;
                }
  
 -              BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
 +              BT_DBG("mgr %pK bredr_chan %pK hs_hcon %pK",
 +                     mgr, chan, hs_hcon);
  
                mgr->bredr_chan = chan;
                chan->hs_hcon = hs_hcon;
@@@ -4550,7 -4572,7 +4573,7 @@@ static void l2cap_send_move_chan_req(st
        struct l2cap_move_chan_req req;
        u8 ident;
  
 -      BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
 +      BT_DBG("chan %pK, dest_amp_id %d", chan, dest_amp_id);
  
        ident = l2cap_get_ident(chan->conn);
        chan->ident = ident;
@@@ -4568,7 -4590,7 +4591,7 @@@ static void l2cap_send_move_chan_rsp(st
  {
        struct l2cap_move_chan_rsp rsp;
  
 -      BT_DBG("chan %p, result 0x%4.4x", chan, result);
 +      BT_DBG("chan %pK, result 0x%4.4x", chan, result);
  
        rsp.icid = cpu_to_le16(chan->dcid);
        rsp.result = cpu_to_le16(result);
@@@ -4581,7 -4603,7 +4604,7 @@@ static void l2cap_send_move_chan_cfm(st
  {
        struct l2cap_move_chan_cfm cfm;
  
 -      BT_DBG("chan %p, result 0x%4.4x", chan, result);
 +      BT_DBG("chan %pK, result 0x%4.4x", chan, result);
  
        chan->ident = l2cap_get_ident(chan->conn);
  
@@@ -4598,7 -4620,7 +4621,7 @@@ static void l2cap_send_move_chan_cfm_ic
  {
        struct l2cap_move_chan_cfm cfm;
  
 -      BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
 +      BT_DBG("conn %pK, icid 0x%4.4x", conn, icid);
  
        cfm.icid = cpu_to_le16(icid);
        cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
@@@ -4718,7 -4740,7 +4741,7 @@@ static void l2cap_logical_finish_move(s
  void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
                       u8 status)
  {
 -      BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
 +      BT_DBG("chan %pK, hchan %pK, status %d", chan, hchan, status);
  
        if (status) {
                l2cap_logical_fail(chan);
  
  void l2cap_move_start(struct l2cap_chan *chan)
  {
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        if (chan->local_amp_id == AMP_ID_BREDR) {
                if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
  static void l2cap_do_create(struct l2cap_chan *chan, int result,
                            u8 local_amp_id, u8 remote_amp_id)
  {
 -      BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
 +      BT_DBG("chan %pK state %s %u -> %u", chan, state_to_string(chan->state),
               local_amp_id, remote_amp_id);
  
        chan->fcs = L2CAP_FCS_NONE;
@@@ -4866,7 -4888,7 +4889,7 @@@ void __l2cap_physical_cfm(struct l2cap_
        u8 local_amp_id = chan->local_amp_id;
        u8 remote_amp_id = chan->remote_amp_id;
  
 -      BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
 +      BT_DBG("chan %pK, result %d, local_amp_id %d, remote_amp_id %d",
               chan, result, local_amp_id, remote_amp_id);
  
        if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
@@@ -5789,7 -5811,7 +5812,7 @@@ static void l2cap_send_i_or_rr_or_rnr(s
  {
        struct l2cap_ctrl control;
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        memset(&control, 0, sizeof(control));
        control.sframe = 1;
@@@ -5944,7 -5966,7 +5967,7 @@@ static int l2cap_rx_queued_iframes(stru
         * until a gap is encountered.
         */
  
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
                struct sk_buff *skb;
@@@ -5976,7 -5998,7 +5999,7 @@@ static void l2cap_handle_srej(struct l2
  {
        struct sk_buff *skb;
  
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
  
        if (control->reqseq == chan->next_tx_seq) {
                BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
@@@ -6034,7 -6056,7 +6057,7 @@@ static void l2cap_handle_rej(struct l2c
  {
        struct sk_buff *skb;
  
 -      BT_DBG("chan %p, control %p", chan, control);
 +      BT_DBG("chan %pK, control %pK", chan, control);
  
        if (control->reqseq == chan->next_tx_seq) {
                BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
  
  static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
  {
 -      BT_DBG("chan %p, txseq %d", chan, txseq);
 +      BT_DBG("chan %pK, txseq %d", chan, txseq);
  
        BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
               chan->expected_tx_seq);
@@@ -6159,7 -6181,7 +6182,7 @@@ static int l2cap_rx_state_recv(struct l
        int err = 0;
        bool skb_in_use = false;
  
 -      BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
 +      BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
               event);
  
        switch (event) {
                         */
                        skb_queue_tail(&chan->srej_q, skb);
                        skb_in_use = true;
 -                      BT_DBG("Queued %p (queue len %d)", skb,
 +                      BT_DBG("Queued %pK (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
  
                        clear_bit(CONN_SREJ_ACT, &chan->conn_state);
        }
  
        if (skb && !skb_in_use) {
 -              BT_DBG("Freeing %p", skb);
 +              BT_DBG("Freeing %pK", skb);
                kfree_skb(skb);
        }
  
@@@ -6294,7 -6316,7 +6317,7 @@@ static int l2cap_rx_state_srej_sent(str
        u16 txseq = control->txseq;
        bool skb_in_use = false;
  
 -      BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
 +      BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
               event);
  
        switch (event) {
                        l2cap_pass_to_tx(chan, control);
                        skb_queue_tail(&chan->srej_q, skb);
                        skb_in_use = true;
 -                      BT_DBG("Queued %p (queue len %d)", skb,
 +                      BT_DBG("Queued %pK (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
  
                        chan->expected_tx_seq = __next_seq(chan, txseq);
                        l2cap_pass_to_tx(chan, control);
                        skb_queue_tail(&chan->srej_q, skb);
                        skb_in_use = true;
 -                      BT_DBG("Queued %p (queue len %d)", skb,
 +                      BT_DBG("Queued %pK (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
  
                        err = l2cap_rx_queued_iframes(chan);
                         */
                        skb_queue_tail(&chan->srej_q, skb);
                        skb_in_use = true;
 -                      BT_DBG("Queued %p (queue len %d)", skb,
 +                      BT_DBG("Queued %pK (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
  
                        l2cap_pass_to_tx(chan, control);
                         */
                        skb_queue_tail(&chan->srej_q, skb);
                        skb_in_use = true;
 -                      BT_DBG("Queued %p (queue len %d)", skb,
 +                      BT_DBG("Queued %pK (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
  
                        l2cap_pass_to_tx(chan, control);
        }
  
        if (skb && !skb_in_use) {
 -              BT_DBG("Freeing %p", skb);
 +              BT_DBG("Freeing %pK", skb);
                kfree_skb(skb);
        }
  
  
  static int l2cap_finish_move(struct l2cap_chan *chan)
  {
 -      BT_DBG("chan %p", chan);
 +      BT_DBG("chan %pK", chan);
  
        chan->rx_state = L2CAP_RX_STATE_RECV;
  
@@@ -6449,7 -6471,7 +6472,7 @@@ static int l2cap_rx_state_wait_p(struc
  {
        int err;
  
 -      BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
 +      BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
               event);
  
        if (!control->poll)
@@@ -6533,7 -6555,7 +6556,7 @@@ static int l2cap_rx(struct l2cap_chan *
  {
        int err = 0;
  
 -      BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
 +      BT_DBG("chan %pK, control %pK, skb %pK, event %d, state %d", chan,
               control, skb, event, chan->rx_state);
  
        if (__valid_reqseq(chan, control->reqseq)) {
@@@ -6570,7 -6592,7 +6593,7 @@@ static int l2cap_stream_rx(struct l2cap
  {
        int err = 0;
  
 -      BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
 +      BT_DBG("chan %pK, control %pK, skb %pK, state %d", chan, control, skb,
               chan->rx_state);
  
        if (l2cap_classify_txseq(chan, control->txseq) ==
                chan->sdu_len = 0;
  
                if (skb) {
 -                      BT_DBG("Freeing %p", skb);
 +                      BT_DBG("Freeing %pK", skb);
                        kfree_skb(skb);
                }
        }
@@@ -6705,7 -6727,7 +6728,7 @@@ static void l2cap_chan_le_send_credits(
  
        return_credits = le_max_credits - chan->rx_credits;
  
 -      BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
 +      BT_DBG("chan %pK returning %u credits to sender", chan, return_credits);
  
        chan->rx_credits += return_credits;
  
@@@ -6830,7 -6852,7 +6853,7 @@@ static void l2cap_data_channel(struct l
                }
        }
  
 -      BT_DBG("chan %p, len %d", chan, skb->len);
 +      BT_DBG("chan %pK, len %d", chan, skb->len);
  
        /* If we receive data on a fixed channel before the info req/rsp
         * procdure is done simply assume that the channel is supported
                goto done;
  
        default:
 -              BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
 +              BT_DBG("chan %pK: bad mode 0x%2.2x", chan, chan->mode);
                break;
        }
  
@@@ -6895,7 -6917,7 +6918,7 @@@ static void l2cap_conless_channel(struc
        if (!chan)
                goto free_skb;
  
 -      BT_DBG("chan %p, len %d", chan, skb->len);
 +      BT_DBG("chan %pK, len %d", chan, skb->len);
  
        if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
                goto drop;
@@@ -7008,7 -7030,7 +7031,7 @@@ static struct l2cap_conn *l2cap_conn_ad
        conn->hcon = hci_conn_get(hcon);
        conn->hchan = hchan;
  
 -      BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
 +      BT_DBG("hcon %pK conn %pK hchan %pK", hcon, conn, hchan);
  
        switch (hcon->type) {
        case LE_LINK:
@@@ -7304,7 -7326,7 +7327,7 @@@ static void l2cap_connect_cfm(struct hc
        if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
                return;
  
 -      BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
 +      BT_DBG("hcon %pK bdaddr %pMR status %d", hcon, &hcon->dst, status);
  
        if (status) {
                l2cap_conn_del(hcon, bt_to_errno(status));
@@@ -7359,7 -7381,7 +7382,7 @@@ int l2cap_disconn_ind(struct hci_conn *
  {
        struct l2cap_conn *conn = hcon->l2cap_data;
  
 -      BT_DBG("hcon %p", hcon);
 +      BT_DBG("hcon %pK", hcon);
  
        if (!conn)
                return HCI_ERROR_REMOTE_USER_TERM;
@@@ -7371,7 -7393,7 +7394,7 @@@ static void l2cap_disconn_cfm(struct hc
        if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
                return;
  
 -      BT_DBG("hcon %p reason %d", hcon, reason);
 +      BT_DBG("hcon %pK reason %d", hcon, reason);
  
        l2cap_conn_del(hcon, bt_to_errno(reason));
  }
@@@ -7401,14 -7423,14 +7424,14 @@@ static void l2cap_security_cfm(struct h
        if (!conn)
                return;
  
 -      BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
 +      BT_DBG("conn %pK status 0x%2.2x encrypt %u", conn, status, encrypt);
  
        mutex_lock(&conn->chan_lock);
  
        list_for_each_entry(chan, &conn->chan_l, list) {
                l2cap_chan_lock(chan);
  
 -              BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
 +              BT_DBG("chan %pK scid 0x%4.4x state %s", chan, chan->scid,
                       state_to_string(chan->state));
  
                if (chan->scid == L2CAP_CID_A2MP) {
                }
  
                if (chan->state == BT_CONNECT) {
-                       if (!status)
+                       if (!status && l2cap_check_enc_key_size(hcon))
                                l2cap_start_connection(chan);
                        else
                                __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
                        struct l2cap_conn_rsp rsp;
                        __u16 res, stat;
  
-                       if (!status) {
+                       if (!status && l2cap_check_enc_key_size(hcon)) {
                                if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
                                        res = L2CAP_CR_PEND;
                                        stat = L2CAP_CS_AUTHOR_PEND;
@@@ -7500,7 -7522,7 +7523,7 @@@ void l2cap_recv_acldata(struct hci_con
        if (!conn)
                goto drop;
  
 -      BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
 +      BT_DBG("conn %pK len %d flags 0x%x", conn, skb->len, flags);
  
        switch (flags) {
        case ACL_START:
diff --combined net/mac80211/rx.c
@@@ -122,8 -122,7 +122,8 @@@ static inline bool should_drop_frame(st
        hdr = (void *)(skb->data + rtap_vendor_space);
  
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
 -                          RX_FLAG_FAILED_PLCP_CRC))
 +                          RX_FLAG_FAILED_PLCP_CRC |
 +                          RX_FLAG_ONLY_MONITOR))
                return true;
  
        if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@@ -509,7 -508,7 +509,7 @@@ ieee80211_rx_monitor(struct ieee80211_l
                return NULL;
        }
  
 -      if (!local->monitors) {
 +      if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
                if (should_drop_frame(origskb, present_fcs_len,
                                      rtap_vendor_space)) {
                        dev_kfree_skb(origskb);
@@@ -3325,6 -3324,8 +3325,8 @@@ static bool ieee80211_accept_frame(stru
        case NL80211_IFTYPE_STATION:
                if (!bssid && !sdata->u.mgd.use_4addr)
                        return false;
+               if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
+                       return false;
                if (multicast)
                        return true;
                return ether_addr_equal(sdata->vif.addr, hdr->addr1);
@@@ -3485,7 -3486,6 +3487,7 @@@ static bool ieee80211_prepare_and_rx_ha
   * be called with rcu_read_lock protection.
   */
  static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
 +                                       struct ieee80211_sta *pubsta,
                                         struct sk_buff *skb,
                                         struct napi_struct *napi)
  {
        __le16 fc;
        struct ieee80211_rx_data rx;
        struct ieee80211_sub_if_data *prev;
 -      struct sta_info *sta, *prev_sta;
        struct rhash_head *tmp;
        int err = 0;
  
                     ieee80211_is_beacon(hdr->frame_control)))
                ieee80211_scan_rx(local, skb);
  
 -      if (ieee80211_is_data(fc)) {
 +      if (pubsta) {
 +              rx.sta = container_of(pubsta, struct sta_info, sta);
 +              rx.sdata = rx.sta->sdata;
 +              if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
 +                      return;
 +              goto out;
 +      } else if (ieee80211_is_data(fc)) {
 +              struct sta_info *sta, *prev_sta;
                const struct bucket_table *tbl;
  
                prev_sta = NULL;
   * This is the receive path handler. It is called by a low level driver when an
   * 802.11 MPDU is received from the hardware.
   */
 -void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
 -                     struct napi_struct *napi)
 +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
 +                     struct sk_buff *skb, struct napi_struct *napi)
  {
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_rate *rate = NULL;
        ieee80211_tpt_led_trig_rx(local,
                        ((struct ieee80211_hdr *)skb->data)->frame_control,
                        skb->len);
 -      __ieee80211_rx_handle_packet(hw, skb, napi);
 +
 +      __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
  
        rcu_read_unlock();
  
diff --combined net/wireless/core.c
@@@ -3,7 -3,6 +3,7 @@@
   *
   * Copyright 2006-2010                Johannes Berg <johannes@sipsolutions.net>
   * Copyright 2013-2014  Intel Mobile Communications GmbH
 + * Copyright 2015     Intel Deutschland GmbH
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@@ -161,7 -160,7 +161,7 @@@ int cfg80211_switch_netns(struct cfg802
        if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
                return -EOPNOTSUPP;
  
 -      list_for_each_entry(wdev, &rdev->wdev_list, list) {
 +      list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
                if (!wdev->netdev)
                        continue;
                wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
                /* failed -- clean up to old netns */
                net = wiphy_net(&rdev->wiphy);
  
 -              list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
 +              list_for_each_entry_continue_reverse(wdev,
 +                                                   &rdev->wiphy.wdev_list,
                                                     list) {
                        if (!wdev->netdev)
                                continue;
@@@ -235,7 -233,7 +235,7 @@@ void cfg80211_shutdown_all_interfaces(s
  
        ASSERT_RTNL();
  
 -      list_for_each_entry(wdev, &rdev->wdev_list, list) {
 +      list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
                if (wdev->netdev) {
                        dev_close(wdev->netdev);
                        continue;
@@@ -303,8 -301,7 +303,8 @@@ void cfg80211_destroy_ifaces(struct cfg
                kfree(item);
                spin_unlock_irq(&rdev->destroy_list_lock);
  
 -              list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
 +              list_for_each_entry_safe(wdev, tmp,
 +                                       &rdev->wiphy.wdev_list, list) {
                        if (nlportid == wdev->owner_nlportid)
                                rdev_del_virtual_intf(rdev, wdev);
                }
@@@ -412,7 -409,7 +412,7 @@@ use_default_name
                }
        }
  
 -      INIT_LIST_HEAD(&rdev->wdev_list);
 +      INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
        INIT_LIST_HEAD(&rdev->beacon_registrations);
        spin_lock_init(&rdev->beacon_registrations_lock);
        spin_lock_init(&rdev->bss_lock);
                                   &rdev->rfkill_ops, rdev);
  
        if (!rdev->rfkill) {
-               kfree(rdev);
+               wiphy_free(&rdev->wiphy);
                return NULL;
        }
  
@@@ -628,13 -625,6 +628,13 @@@ int wiphy_register(struct wiphy *wiphy
                     !rdev->ops->set_mac_acl)))
                return -EINVAL;
  
 +      /* assure only valid behaviours are flagged by driver
 +       * hence subtract 2 as bit 0 is invalid.
 +       */
 +      if (WARN_ON(wiphy->bss_select_support &&
 +                  (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2))))
 +              return -EINVAL;
 +
        if (wiphy->addresses)
                memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
  
                nl80211_send_reg_change_event(&request);
        }
  
 +      /* Check that nobody globally advertises any capabilities they do not
 +       * advertise on all possible interface types.
 +       */
 +      if (wiphy->extended_capabilities_len &&
 +          wiphy->num_iftype_ext_capab &&
 +          wiphy->iftype_ext_capab) {
 +              u8 supported_on_all, j;
 +              const struct wiphy_iftype_ext_capab *capab;
 +
 +              capab = wiphy->iftype_ext_capab;
 +              for (j = 0; j < wiphy->extended_capabilities_len; j++) {
 +                      if (capab[0].extended_capabilities_len > j)
 +                              supported_on_all =
 +                                      capab[0].extended_capabilities[j];
 +                      else
 +                              supported_on_all = 0x00;
 +                      for (i = 1; i < wiphy->num_iftype_ext_capab; i++) {
 +                              if (j >= capab[i].extended_capabilities_len) {
 +                                      supported_on_all = 0x00;
 +                                      break;
 +                              }
 +                              supported_on_all &=
 +                                      capab[i].extended_capabilities[j];
 +                      }
 +                      if (WARN_ON(wiphy->extended_capabilities[j] &
 +                                  ~supported_on_all))
 +                              break;
 +              }
 +      }
 +
        rdev->wiphy.registered = true;
        rtnl_unlock();
  
@@@ -831,7 -791,7 +831,7 @@@ void wiphy_unregister(struct wiphy *wip
        nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
        rdev->wiphy.registered = false;
  
 -      WARN_ON(!list_empty(&rdev->wdev_list));
 +      WARN_ON(!list_empty(&rdev->wiphy.wdev_list));
  
        /*
         * First remove the hardware from everywhere, this makes
@@@ -954,6 -914,7 +954,6 @@@ void __cfg80211_leave(struct cfg80211_r
                sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
                if (sched_scan_req && dev == sched_scan_req->dev)
                        __cfg80211_stop_sched_scan(rdev, false);
 -
  #ifdef CONFIG_CFG80211_WEXT
                kfree(wdev->wext.ie);
                wdev->wext.ie = NULL;
  #endif
                cfg80211_disconnect(rdev, dev,
                                    WLAN_REASON_DEAUTH_LEAVING, true);
 +              cfg80211_mlme_down(rdev, dev);
                break;
        case NL80211_IFTYPE_MESH_POINT:
                __cfg80211_leave_mesh(rdev, dev);
                /* invalid */
                break;
        }
 +      wdev->beacon_interval = 0;
  }
  
  void cfg80211_leave(struct cfg80211_registered_device *rdev,
@@@ -1054,7 -1013,7 +1054,7 @@@ static int cfg80211_netdev_notifier_cal
                spin_lock_init(&wdev->mgmt_registrations_lock);
  
                wdev->identifier = ++rdev->wdev_id;
 -              list_add_rcu(&wdev->list, &rdev->wdev_list);
 +              list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
                rdev->devlist_generation++;
                /* can only change netns with wiphy */
                dev->features |= NETIF_F_NETNS_LOCAL;
                     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
                     wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
                        dev->priv_flags |= IFF_DONT_BRIDGE;
 +              INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
                break;
        case NETDEV_GOING_DOWN:
                cfg80211_leave(rdev, wdev);
  #ifdef CONFIG_CFG80211_WEXT
                        kzfree(wdev->wext.keys);
  #endif
 +                      flush_work(&wdev->disconnect_wk);
                }
                /*
                 * synchronise (so that we won't find this netdev