OSDN Git Service

Merge tag 'v4.4.214' into 10
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / scsi / ufs / ufshcd.c
index 19f8206..368db1a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufshcd.c
  * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * Authors:
  *     Santosh Yaraganavi <santosh.sy@samsung.com>
  */
 
 #include <linux/async.h>
+#include <scsi/ufs/ioctl.h>
 #include <linux/devfreq.h>
+#include <linux/nls.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
+#include <asm/unaligned.h>
 
 #include "ufshcd.h"
-#include "unipro.h"
+#include "ufshci.h"
+#include "ufs_quirks.h"
+#include "ufs-debugfs.h"
+#include "ufs-qcom.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ufshcd_tag_req_type(struct request *rq)
+{
+       int rq_type = TS_WRITE;
+
+       if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+               rq_type = TS_NOT_SUPPORTED;
+       else if (rq->cmd_flags & REQ_FLUSH)
+               rq_type = TS_FLUSH;
+       else if (rq_data_dir(rq) == READ)
+               rq_type = (rq->cmd_flags & REQ_URGENT) ?
+                       TS_URGENT_READ : TS_READ;
+       else if (rq->cmd_flags & REQ_URGENT)
+               rq_type = TS_URGENT_WRITE;
+
+       return rq_type;
+}
+
+static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+       ufsdbg_set_err_state(hba);
+       if (type < UFS_ERR_MAX)
+               hba->ufs_stats.err_stats[type]++;
+}
+
+static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+       struct request *rq =
+               hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
+       u64 **tag_stats = hba->ufs_stats.tag_stats;
+       int rq_type;
+
+       if (!hba->ufs_stats.enabled)
+               return;
+
+       tag_stats[tag][TS_TAG]++;
+       if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+               return;
+
+       WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
+       rq_type = ufshcd_tag_req_type(rq);
+       if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
+               tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
+}
+
+static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+               struct scsi_cmnd *cmd)
+{
+       struct request *rq = cmd ? cmd->request : NULL;
+
+       if (rq && rq->cmd_type & REQ_TYPE_FS)
+               hba->ufs_stats.q_depth--;
+}
+
+static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+       int rq_type;
+       struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
+       s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
+               lrbp->issue_time_stamp);
+
+       /* update general request statistics */
+       if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
+               hba->ufs_stats.req_stats[TS_TAG].min = delta;
+       hba->ufs_stats.req_stats[TS_TAG].count++;
+       hba->ufs_stats.req_stats[TS_TAG].sum += delta;
+       if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
+               hba->ufs_stats.req_stats[TS_TAG].max = delta;
+       if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
+                       hba->ufs_stats.req_stats[TS_TAG].min = delta;
+
+       rq_type = ufshcd_tag_req_type(rq);
+       if (rq_type == TS_NOT_SUPPORTED)
+               return;
+
+       /* update request type specific statistics */
+       if (hba->ufs_stats.req_stats[rq_type].count == 0)
+               hba->ufs_stats.req_stats[rq_type].min = delta;
+       hba->ufs_stats.req_stats[rq_type].count++;
+       hba->ufs_stats.req_stats[rq_type].sum += delta;
+       if (delta > hba->ufs_stats.req_stats[rq_type].max)
+               hba->ufs_stats.req_stats[rq_type].max = delta;
+       if (delta < hba->ufs_stats.req_stats[rq_type].min)
+                       hba->ufs_stats.req_stats[rq_type].min = delta;
+}
+
+static void
+ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
+{
+       if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
+               hba->ufs_stats.query_stats_arr[opcode][idn]++;
+}
+
+#else
+static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+}
+
+static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+               struct scsi_cmnd *cmd)
+{
+}
+
+static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+}
+
+static inline
+void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+}
+
+static inline
+void ufshcd_update_query_stats(struct ufs_hba *hba,
+                              enum query_opcode opcode, u8 idn)
+{
+}
+#endif
+
+#define PWR_INFO_MASK  0xF
+#define PWR_RX_OFFSET  4
+
+#define UFSHCD_REQ_SENSE_SIZE  18
 
 #define UFSHCD_ENABLE_INTRS    (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
 #define NOP_OUT_TIMEOUT    30 /* msecs */
 
 /* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
 /* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
 
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT 100 /* msecs */
 
+/* maximum number of retries for a general UIC command  */
+#define UFS_UIC_COMMAND_RETRIES 3
+
 /* maximum number of link-startup retries */
 #define DME_LINKSTARTUP_RETRIES 3
 
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
 /* maximum number of reset retries before giving up */
 #define MAX_HOST_RESET_RETRIES 5
 
 /* Interrupt aggregation default timeout, unit: 40us */
 #define INT_AGGR_DEF_TO        0x02
 
+/* default value of auto suspend is 3 seconds */
+#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
+
+#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE    10
+#define UFSHCD_CLK_GATING_DELAY_MS_PERF                50
+
+/* IOCTL opcode for command - ufs set device read only */
+#define UFS_IOCTL_BLKROSET      BLKROSET
+
+#define UFSHCD_DEFAULT_LANES_PER_DIRECTION             2
+
 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                           \
        ({                                                              \
                int _ret;                                               \
                _ret;                                                   \
        })
 
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
 static u32 ufs_query_desc_max_size[] = {
        QUERY_DESC_DEVICE_MAX_SIZE,
        QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -94,6 +250,7 @@ static u32 ufs_query_desc_max_size[] = {
        QUERY_DESC_RFU_MAX_SIZE,
        QUERY_DESC_GEOMETRY_MAZ_SIZE,
        QUERY_DESC_POWER_MAX_SIZE,
+       QUERY_DESC_HEALTH_MAX_SIZE,
        QUERY_DESC_RFU_MAX_SIZE,
 };
 
@@ -119,9 +276,11 @@ enum {
 /* UFSHCD UIC layer error flags */
 enum {
        UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
-       UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
-       UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
-       UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
+       UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+       UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+       UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+       UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+       UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
 };
 
 /* Interrupt configuration options */
@@ -131,6 +290,8 @@ enum {
        UFSHCD_INT_CLEAR,
 };
 
+#define DEFAULT_UFSHCD_DBG_PRINT_EN    UFSHCD_DBG_PRINT_ALL
+
 #define ufshcd_set_eh_in_progress(h) \
        (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
 #define ufshcd_eh_in_progress(h) \
@@ -172,489 +333,1706 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
        return ufs_pm_lvl_states[lvl].link_state;
 }
 
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+                                       enum uic_link_state link_state)
+{
+       enum ufs_pm_level lvl;
+
+       for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+               if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+                       (ufs_pm_lvl_states[lvl].link_state == link_state))
+                       return lvl;
+       }
+
+       /* if no match found, return the level 0 */
+       return UFS_PM_LVL_0;
+}
+
+static inline bool ufshcd_is_valid_pm_lvl(int lvl)
+{
+       if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
+               return true;
+       else
+               return false;
+}
+
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
-                                bool skip_ref_clk);
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_enable_clocks(struct ufs_hba *hba);
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+                                bool is_gating_context);
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+                                             bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
-               struct ufs_pa_layer_attr *desired_pwr_mode);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
-                            struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+static int ufshcd_devfreq_target(struct device *dev,
+                               unsigned long *freq, u32 flags);
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+               struct devfreq_dev_status *stat);
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
+       .upthreshold = 70,
+       .downdifferential = 65,
+       .simple_scaling = 1,
+};
+
+static void *gov_data = &ufshcd_ondemand_data;
+#else
+static void *gov_data;
+#endif
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+       .polling_ms     = 60,
+       .target         = ufshcd_devfreq_target,
+       .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
 
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 {
-       int ret = 0;
+       return tag >= 0 && tag < hba->nutrs;
+}
 
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
+{
        if (!hba->is_irq_enabled) {
-               ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
-                               hba);
-               if (ret)
-                       dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
-                               __func__, ret);
+               enable_irq(hba->irq);
                hba->is_irq_enabled = true;
        }
-
-       return ret;
 }
 
 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 {
        if (hba->is_irq_enabled) {
-               free_irq(hba->irq, hba);
+               disable_irq(hba->irq);
                hba->is_irq_enabled = false;
        }
 }
 
-/*
- * ufshcd_wait_for_register - wait for register value to change
- * @hba - per-adapter interface
- * @reg - mmio register offset
- * @mask - mask to apply to read register value
- * @val - wait condition
- * @interval_us - polling interval in microsecs
- * @timeout_ms - timeout in millisecs
- *
- * Returns -ETIMEDOUT on error, zero on success
- */
-static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
-               u32 val, unsigned long interval_us, unsigned long timeout_ms)
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
 {
-       int err = 0;
-       unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-
-       /* ignore bits that we don't intend to wait on */
-       val = val & mask;
-
-       while ((ufshcd_readl(hba, reg) & mask) != val) {
-               /* wakeup within 50us of expiry */
-               usleep_range(interval_us, interval_us + 50);
-
-               if (time_after(jiffies, timeout)) {
-                       if ((ufshcd_readl(hba, reg) & mask) != val)
-                               err = -ETIMEDOUT;
-                       break;
-               }
-       }
+       unsigned long flags;
+       bool unblock = false;
 
-       return err;
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->scsi_block_reqs_cnt--;
+       unblock = !hba->scsi_block_reqs_cnt;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       if (unblock)
+               scsi_unblock_requests(hba->host);
 }
+EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
 
-/**
- * ufshcd_get_intr_mask - Get the interrupt bit mask
- * @hba - Pointer to adapter instance
- *
- * Returns interrupt bit mask per version
- */
-static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
+static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
 {
-       if (hba->ufs_version == UFSHCI_VERSION_10)
-               return INTERRUPT_MASK_ALL_VER_10;
-       else
-               return INTERRUPT_MASK_ALL_VER_11;
+       if (!hba->scsi_block_reqs_cnt++)
+               scsi_block_requests(hba->host);
 }
 
-/**
- * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
- * @hba - Pointer to adapter instance
- *
- * Returns UFSHCI version supported by the controller
- */
-static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+void ufshcd_scsi_block_requests(struct ufs_hba *hba)
 {
-       if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
-               return ufshcd_vops_get_ufs_hci_version(hba);
+       unsigned long flags;
 
-       return ufshcd_readl(hba, REG_UFS_VERSION);
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       __ufshcd_scsi_block_requests(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
+EXPORT_SYMBOL(ufshcd_scsi_block_requests);
 
-/**
- * ufshcd_is_device_present - Check if any device connected to
- *                           the host controller
- * @hba: pointer to adapter instance
- *
- * Returns 1 if device present, 0 if no device detected
- */
-static inline int ufshcd_is_device_present(struct ufs_hba *hba)
+static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
 {
-       return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
-                                               DEVICE_PRESENT) ? 1 : 0;
+       int ret = 0;
+
+       if (!hba->pctrl)
+               return 0;
+
+       /* Assert reset if ctrl == true */
+       if (ctrl)
+               ret = pinctrl_select_state(hba->pctrl,
+                       pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
+       else
+               ret = pinctrl_select_state(hba->pctrl,
+                       pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
+
+       if (ret < 0)
+               dev_err(hba->dev, "%s: %s failed with err %d\n",
+                       __func__, ctrl ? "Assert" : "Deassert", ret);
+
+       return ret;
 }
 
-/**
- * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
- * @lrb: pointer to local command reference block
- *
- * This function is used to get the OCS field from UTRD
- * Returns the OCS field in the UTRD
- */
-static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
 {
-       return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+       return ufshcd_device_reset_ctrl(hba, true);
 }
 
-/**
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
- * @task_req_descp: pointer to utp_task_req_desc structure
- *
- * This function is used to get the OCS field from UTMRD
- * Returns the OCS field in the UTMRD
- */
-static inline int
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
 {
-       return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
+       return ufshcd_device_reset_ctrl(hba, false);
 }
 
-/**
- * ufshcd_get_tm_free_slot - get a free slot for task management request
- * @hba: per adapter instance
- * @free_slot: pointer to variable with available slot value
- *
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
- * Returns 0 if free slot is not available, else return 1 with tag value
- * in @free_slot.
- */
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
+static int ufshcd_reset_device(struct ufs_hba *hba)
 {
-       int tag;
-       bool ret = false;
+       int ret;
 
-       if (!free_slot)
+       /* reset the connected UFS device */
+       ret = ufshcd_assert_device_reset(hba);
+       if (ret)
                goto out;
+       /*
+        * The reset signal is active low.
+        * The UFS device shall detect more than or equal to 1us of positive
+        * or negative RST_n pulse width.
+        * To be on safe side, keep the reset low for atleast 10us.
+        */
+       usleep_range(10, 15);
 
-       do {
-               tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
-               if (tag >= hba->nutmrs)
-                       goto out;
-       } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
-
-       *free_slot = tag;
-       ret = true;
+       ret = ufshcd_deassert_device_reset(hba);
+       if (ret)
+               goto out;
+       /* same as assert, wait for atleast 10us after deassert */
+       usleep_range(10, 15);
 out:
        return ret;
 }
 
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
 {
-       clear_bit_unlock(slot, &hba->tm_slots_in_use);
+       if (!val || !*val)
+               return;
+
+       if (*val < 0x20 || *val > 0x7e)
+               *val = ' ';
 }
 
-/**
- * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
- * @hba: per adapter instance
- * @pos: position of the bit to be cleared
- */
-static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+#define UFSHCD_MAX_CMD_LOGGING 200
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+                       struct ufshcd_cmd_log_entry *entry, u8 opcode)
 {
-       ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
-}
+       if (trace_ufshcd_command_enabled()) {
+               u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 
-/**
- * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
- * @reg: Register value of host controller status
- *
- * Returns integer, 0 on Success and positive value if failed
- */
-static inline int ufshcd_get_lists_status(u32 reg)
+               trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
+                                    entry->doorbell, entry->transfer_len, intr,
+                                    entry->lba, opcode);
+       }
+}
+#else
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+                       struct ufshcd_cmd_log_entry *entry, u8 opcode)
 {
-       /*
-        * The mask 0xFF is for the following HCS register bits
-        * Bit          Description
-        *  0           Device Present
-        *  1           UTRLRDY
-        *  2           UTMRLRDY
-        *  3           UCRDY
-        *  4           HEI
-        *  5           DEI
-        * 6-7          reserved
-        */
-       return (((reg) & (0xFF)) >> 1) ^ (0x07);
 }
+#endif
 
-/**
- * ufshcd_get_uic_cmd_result - Get the UIC command result
- * @hba: Pointer to adapter instance
- *
- * This function gets the result of UIC command completion
- * Returns 0 on success, non zero value on error
- */
-static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
 {
-       return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
-              MASK_UIC_COMMAND_RESULT;
+       /* Allocate log entries */
+       if (!hba->cmd_log.entries) {
+               hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
+                       sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
+               if (!hba->cmd_log.entries)
+                       return;
+               dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
+                               __func__);
+       }
 }
 
-/**
- * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
- * @hba: Pointer to adapter instance
- *
- * This function gets UIC command argument3
- * Returns 0 on success, non zero value on error
- */
-static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+#ifdef CONFIG_TRACEPOINTS
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+                            unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+                            sector_t lba, int transfer_len, u8 opcode)
 {
-       return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+       struct ufshcd_cmd_log_entry *entry;
+
+       if (!hba->cmd_log.entries)
+               return;
+
+       entry = &hba->cmd_log.entries[hba->cmd_log.pos];
+       entry->lun = lun;
+       entry->str = str;
+       entry->cmd_type = cmd_type;
+       entry->cmd_id = cmd_id;
+       entry->lba = lba;
+       entry->transfer_len = transfer_len;
+       entry->idn = idn;
+       entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       entry->tag = tag;
+       entry->tstamp = ktime_get();
+       entry->outstanding_reqs = hba->outstanding_reqs;
+       entry->seq_num = hba->cmd_log.seq_num;
+       hba->cmd_log.seq_num++;
+       hba->cmd_log.pos =
+                       (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+       ufshcd_add_command_trace(hba, entry, opcode);
+}
+#endif
+
+static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+       unsigned int tag, u8 cmd_id, u8 idn)
+{
+       __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
+                        0xff, (sector_t)-1, -1, -1);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+       ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+}
+
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
+{
+       int i;
+       int pos;
+       struct ufshcd_cmd_log_entry *p;
+
+       if (!hba->cmd_log.entries)
+               return;
+
+       pos = hba->cmd_log.pos;
+       for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
+               p = &hba->cmd_log.entries[pos];
+               pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+               if (ktime_to_us(p->tstamp)) {
+                       pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
+                               p->cmd_type, p->str, p->seq_num,
+                               p->lun, p->cmd_id, (unsigned long long)p->lba,
+                               p->transfer_len, p->tag, p->doorbell,
+                               p->outstanding_reqs, p->idn,
+                               ktime_to_us(p->tstamp));
+                               usleep_range(1000, 1100);
+               }
+       }
+}
+#else
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+}
+
+#ifdef CONFIG_TRACEPOINTS
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+                            unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+                            sector_t lba, int transfer_len, u8 opcode)
+{
+       struct ufshcd_cmd_log_entry entry;
+
+       entry.str = str;
+       entry.lba = lba;
+       entry.cmd_id = cmd_id;
+       entry.transfer_len = transfer_len;
+       entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       entry.tag = tag;
+
+       ufshcd_add_command_trace(hba, &entry, opcode);
+}
+#endif
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+}
+
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+                                       unsigned int tag, const char *str)
+{
+       struct ufshcd_lrb *lrbp;
+       char *cmd_type = NULL;
+       u8 opcode = 0;
+       u8 cmd_id = 0, idn = 0;
+       sector_t lba = -1;
+       int transfer_len = -1;
+
+       lrbp = &hba->lrb[tag];
+
+       if (lrbp->cmd) { /* data phase exists */
+               opcode = (u8)(*lrbp->cmd->cmnd);
+               if ((opcode == READ_10) || (opcode == WRITE_10)) {
+                       /*
+                        * Currently we only fully trace read(10) and write(10)
+                        * commands
+                        */
+                       if (lrbp->cmd->request && lrbp->cmd->request->bio)
+                               lba =
+                               lrbp->cmd->request->bio->bi_iter.bi_sector;
+                       transfer_len = be32_to_cpu(
+                               lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+               }
+       }
+
+       if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
+               cmd_type = "scsi";
+               cmd_id = (u8)(*lrbp->cmd->cmnd);
+       } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+               if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
+                       cmd_type = "nop";
+                       cmd_id = 0;
+               } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
+                       cmd_type = "query";
+                       cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
+                       idn = hba->dev_cmd.query.request.upiu_req.idn;
+               }
+       }
+
+       __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
+                        lrbp->lun, lba, transfer_len, opcode);
+}
+#else
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+                                       unsigned int tag, const char *str)
+{
+}
+#endif
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
+               return;
+
+       if (!head || list_empty(head))
+               return;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+                               clki->max_freq)
+                       dev_err(hba->dev, "clk: %s, rate: %u\n",
+                                       clki->name, clki->curr_freq);
+       }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+               struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+       int i;
+
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
+               return;
+
+       for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+               int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+               if (err_hist->reg[p] == 0)
+                       continue;
+               dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
+                       err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+       }
+}
+
+static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
+{
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
+               return;
+
+       /*
+        * hex_dump reads its data without the readl macro. This might
+        * cause inconsistency issues on some platform, as the printed
+        * values may be from cache and not the most recent value.
+        * To know whether you are looking at an un-cached version verify
+        * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+        * during platform/pci probe function.
+        */
+       ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+       dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
+               hba->ufs_version, hba->capabilities);
+       dev_err(hba->dev,
+               "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
+               (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+       dev_err(hba->dev,
+               "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
+               ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+               hba->ufs_stats.hibern8_exit_cnt);
+
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+       ufshcd_print_clk_freqs(hba);
+
+       ufshcd_vops_dbg_register_dump(hba, no_sleep);
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+       __ufshcd_print_host_regs(hba, false);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+       struct ufshcd_lrb *lrbp;
+       int prdt_length;
+       int tag;
+
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
+               return;
+
+       for_each_set_bit(tag, &bitmap, hba->nutrs) {
+               lrbp = &hba->lrb[tag];
+
+               dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
+                               tag, ktime_to_us(lrbp->issue_time_stamp));
+               dev_err(hba->dev,
+                       "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
+                       tag, (u64)lrbp->utrd_dma_addr);
+               ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+                               sizeof(struct utp_transfer_req_desc));
+               dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
+                       (u64)lrbp->ucd_req_dma_addr);
+               ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+                               sizeof(struct utp_upiu_req));
+               dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
+                       (u64)lrbp->ucd_rsp_dma_addr);
+               ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+                               sizeof(struct utp_upiu_rsp));
+               prdt_length =
+                       le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
+               dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
+                       tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
+               if (pr_prdt)
+                       ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+                               sizeof(struct ufshcd_sg_entry) * prdt_length);
+       }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+       struct utp_task_req_desc *tmrdp;
+       int tag;
+
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
+               return;
+
+       for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+               tmrdp = &hba->utmrdl_base_addr[tag];
+               dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
+               ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+                               sizeof(struct request_desc_header));
+               dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
+                               tag);
+               ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+                               sizeof(struct utp_upiu_req));
+               dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
+                               tag);
+               ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+                               sizeof(struct utp_task_req_desc));
+       }
+}
+
+static void ufshcd_print_fsm_state(struct ufs_hba *hba)
+{
+       int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
+
+       err = ufshcd_dme_get(hba,
+                       UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
+                       UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+                       &tx_fsm_val);
+       dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
+                       tx_fsm_val, err);
+       err = ufshcd_dme_get(hba,
+                       UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
+                       UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+                       &rx_fsm_val);
+       dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
+                       rx_fsm_val, err);
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
+               return;
+
+       dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+       dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+               hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+       dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
+               hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
+       dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
+       dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+               hba->pm_op_in_progress, hba->is_sys_suspended);
+       dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+               hba->auto_bkops_enabled, hba->host->host_self_blocked);
+       dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
+               hba->clk_gating.state, hba->hibern8_on_idle.state);
+       dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+               hba->eh_flags, hba->req_abort_count);
+       dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+               hba->capabilities, hba->caps);
+       dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+               hba->dev_quirks);
 }
 
 /**
- * ufshcd_get_req_rsp - returns the TR response transaction type
- * @ucd_rsp_ptr: pointer to response UPIU
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
  */
-static inline int
-ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+       char *names[] = {
+               "INVALID MODE",
+               "FAST MODE",
+               "SLOW_MODE",
+               "INVALID MODE",
+               "FASTAUTO_MODE",
+               "SLOWAUTO_MODE",
+               "INVALID MODE",
+       };
+
+       if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
+               return;
+
+       dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+                __func__,
+                hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+                hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+                names[hba->pwr_info.pwr_rx],
+                names[hba->pwr_info.pwr_tx],
+                hba->pwr_info.hs_rate);
+}
+
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ * @can_sleep - perform sleep or just spin
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+                               u32 val, unsigned long interval_us,
+                               unsigned long timeout_ms, bool can_sleep)
 {
-       return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+       int err = 0;
+       unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+       /* ignore bits that we don't intend to wait on */
+       val = val & mask;
+
+       while ((ufshcd_readl(hba, reg) & mask) != val) {
+               if (can_sleep)
+                       usleep_range(interval_us, interval_us + 50);
+               else
+                       udelay(interval_us);
+               if (time_after(jiffies, timeout)) {
+                       if ((ufshcd_readl(hba, reg) & mask) != val)
+                               err = -ETIMEDOUT;
+                       break;
+               }
+       }
+
+       return err;
 }
 
 /**
- * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
+ * ufshcd_get_intr_mask - Get the interrupt bit mask
+ * @hba - Pointer to adapter instance
  *
- * This function gets the response status and scsi_status from response UPIU
- * Returns the response result code.
+ * Returns interrupt bit mask per version
  */
-static inline int
-ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 {
-       return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+       u32 intr_mask = 0;
+
+       switch (hba->ufs_version) {
+       case UFSHCI_VERSION_10:
+               intr_mask = INTERRUPT_MASK_ALL_VER_10;
+               break;
+       /* allow fall through */
+       case UFSHCI_VERSION_11:
+       case UFSHCI_VERSION_20:
+               intr_mask = INTERRUPT_MASK_ALL_VER_11;
+               break;
+       /* allow fall through */
+       case UFSHCI_VERSION_21:
+       default:
+               intr_mask = INTERRUPT_MASK_ALL_VER_21;
+       }
+
+       if (!ufshcd_is_crypto_supported(hba))
+               intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
+
+       return intr_mask;
 }
 
-/*
- * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
- *                             from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba - Pointer to adapter instance
  *
- * Return the data segment length.
+ * Returns UFSHCI version supported by the controller
  */
-static inline unsigned int
-ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 {
-       return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
-               MASK_RSP_UPIU_DATA_SEG_LEN;
+       if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
+               return ufshcd_vops_get_ufs_hci_version(hba);
+
+       return ufshcd_readl(hba, REG_UFS_VERSION);
 }
 
 /**
- * ufshcd_is_exception_event - Check if the device raised an exception event
- * @ucd_rsp_ptr: pointer to response UPIU
+ * ufshcd_is_device_present - Check if any device connected to
+ *                           the host controller
+ * @hba: pointer to adapter instance
  *
- * The function checks if the device raised an exception event indicated in
- * the Device Information field of response UPIU.
+ * Returns 1 if device present, 0 if no device detected
+ */
+static inline int ufshcd_is_device_present(struct ufs_hba *hba)
+{
+       return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+                                               DEVICE_PRESENT) ? 1 : 0;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrb: pointer to local command reference block
  *
- * Returns true if exception is raised, false otherwise.
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
  */
-static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+       return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+       return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
+ *
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
+ */
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
+{
+       int tag;
+       bool ret = false;
+
+       if (!free_slot)
+               goto out;
+
+       do {
+               tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+               if (tag >= hba->nutmrs)
+                       goto out;
+       } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+       *free_slot = tag;
+       ret = true;
+out:
+       return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+       clear_bit_unlock(slot, &hba->tm_slots_in_use);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+       ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+       __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+       /*
+        * The mask 0xFF is for the following HCS register bits
+        * Bit          Description
+        *  0           Device Present
+        *  1           UTRLRDY
+        *  2           UTMRLRDY
+        *  3           UCRDY
+        * 4-7          reserved
+        */
+       return ((reg & 0xFF) >> 1) ^ 0x07;
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+       return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
+              MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+       return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ * @ucd_rsp_ptr: pointer to response UPIU
+ */
+static inline int
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ *                             from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+               MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
+/**
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+                       MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
+ * @hba: per adapter instance
+ */
+static inline void
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
+{
+       ufshcd_writel(hba, INT_AGGR_ENABLE |
+                     INT_AGGR_COUNTER_AND_TIMER_RESET,
+                     REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+       ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+                     INT_AGGR_COUNTER_THLD_VAL(cnt) |
+                     INT_AGGR_TIMEOUT_VAL(tmout),
+                     REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+{
+       ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ *                     When run-stop registers are set to 1, it indicates the
+ *                     host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+       ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+                     REG_UTP_TASK_REQ_LIST_RUN_STOP);
+       ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+                     REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+       u32 val = CONTROLLER_ENABLE;
+
+       if (ufshcd_is_crypto_supported(hba))
+               val |= CRYPTO_GENERAL_ENABLE;
+       ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns zero if controller is active, 1 otherwise
+ */
+static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+       return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+}
+
+static const char *ufschd_uic_link_state_to_string(
+                       enum uic_link_state state)
+{
+       switch (state) {
+       case UIC_LINK_OFF_STATE:        return "OFF";
+       case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
+       case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
+       default:                        return "UNKNOWN";
+       }
+}
+
+static const char *ufschd_ufs_dev_pwr_mode_to_string(
+                       enum ufs_dev_pwr_mode state)
+{
+       switch (state) {
+       case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
+       case UFS_SLEEP_PWR_MODE:        return "SLEEP";
+       case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
+       default:                        return "UNKNOWN";
+       }
+}
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+       /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+       if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+           (hba->ufs_version == UFSHCI_VERSION_11))
+               return UFS_UNIPRO_VER_1_41;
+       else
+               return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+       /*
+        * If both host and device support UniPro ver1.6 or later, PA layer
+        * parameters tuning happens during link startup itself.
+        *
+        * We can manually tune PA layer parameters if either host or device
+        * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+        * logic simple, we will only do manual tuning if local unipro version
+        * doesn't support ver1.6 or later.
+        */
+       if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
+ * @hba: per adapter instance
+ * @scale_up: If True, set max possible frequency othewise set low frequency
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+
+       if (!head || list_empty(head))
+               goto out;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk)) {
+                       if (scale_up && clki->max_freq) {
+                               if (clki->curr_freq == clki->max_freq)
+                                       continue;
+
+                               ret = clk_set_rate(clki->clk, clki->max_freq);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                               __func__, clki->name,
+                                               clki->max_freq, ret);
+                                       break;
+                               }
+                               trace_ufshcd_clk_scaling(dev_name(hba->dev),
+                                               "scaled up", clki->name,
+                                               clki->curr_freq,
+                                               clki->max_freq);
+                               clki->curr_freq = clki->max_freq;
+
+                       } else if (!scale_up && clki->min_freq) {
+                               if (clki->curr_freq == clki->min_freq)
+                                       continue;
+
+                               ret = clk_set_rate(clki->clk, clki->min_freq);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                               __func__, clki->name,
+                                               clki->min_freq, ret);
+                                       break;
+                               }
+                               trace_ufshcd_clk_scaling(dev_name(hba->dev),
+                                               "scaled down", clki->name,
+                                               clki->curr_freq,
+                                               clki->min_freq);
+                               clki->curr_freq = clki->min_freq;
+                       }
+               }
+               dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+                               clki->name, clk_get_rate(clki->clk));
+       }
+
+out:
+       return ret;
+}
+
+/**
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+
+       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+       if (ret)
+               return ret;
+
+       ret = ufshcd_set_clk_freq(hba, scale_up);
+       if (ret)
+               return ret;
+
+       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+       if (ret) {
+               ufshcd_set_clk_freq(hba, !scale_up);
+               return ret;
+       }
+
+       return ret;
+}
+
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+       hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+       cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+       int ret;
+       unsigned long flags;
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                       clk_gating.ungate_work);
+
+       ufshcd_cancel_gate_work(hba);
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_gating.state == CLKS_ON) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               goto unblock_reqs;
+       }
+
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_hba_vreg_set_hpm(hba);
+       ufshcd_enable_clocks(hba);
+
+       /* Exit from hibern8 */
+       if (ufshcd_can_hibern8_during_gating(hba)) {
+               /* Prevent gating in this path */
+               hba->clk_gating.is_suspended = true;
+               if (ufshcd_is_link_hibern8(hba)) {
+                       ret = ufshcd_uic_hibern8_exit(hba);
+                       if (ret)
+                               dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+                                       __func__, ret);
+                       else
+                               ufshcd_set_link_active(hba);
+               }
+               hba->clk_gating.is_suspended = false;
+       }
+unblock_reqs:
+       ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+       int rc = 0;
+       unsigned long flags;
+
+       if (!ufshcd_is_clkgating_allowed(hba))
+               goto out;
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_gating.active_reqs++;
+
+       if (ufshcd_eh_in_progress(hba)) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               return 0;
+       }
+
+start:
+       switch (hba->clk_gating.state) {
+       case CLKS_ON:
+               /*
+                * Wait for the ungate work to complete if in progress.
+                * Though the clocks may be in ON state, the link could
+                * still be in hibner8 state if hibern8 is allowed
+                * during clock gating.
+                * Make sure we exit hibern8 state also in addition to
+                * clocks being ON.
+                */
+               if (ufshcd_can_hibern8_during_gating(hba) &&
+                   ufshcd_is_link_hibern8(hba)) {
+                       spin_unlock_irqrestore(hba->host->host_lock, flags);
+                       flush_work(&hba->clk_gating.ungate_work);
+                       spin_lock_irqsave(hba->host->host_lock, flags);
+                       goto start;
+               }
+               break;
+       case REQ_CLKS_OFF:
+               /*
+                * If the timer was active but the callback was not running
+                * we have nothing to do, just change state and return.
+                */
+               if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
+                       hba->clk_gating.state = CLKS_ON;
+                       trace_ufshcd_clk_gating(dev_name(hba->dev),
+                               hba->clk_gating.state);
+                       break;
+               }
+               /*
+                * If we are here, it means gating work is either done or
+                * currently running. Hence, fall through to cancel gating
+                * work and to enable clocks.
+                */
+       case CLKS_OFF:
+               __ufshcd_scsi_block_requests(hba);
+               hba->clk_gating.state = REQ_CLKS_ON;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                       hba->clk_gating.state);
+               queue_work(hba->clk_gating.clk_gating_workq,
+                               &hba->clk_gating.ungate_work);
+               /*
+                * fall through to check if we should wait for this
+                * work to be done or not.
+                */
+       case REQ_CLKS_ON:
+               if (async) {
+                       rc = -EAGAIN;
+                       hba->clk_gating.active_reqs--;
+                       break;
+               }
+
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               flush_work(&hba->clk_gating.ungate_work);
+               /* Make sure state is CLKS_ON before returning */
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               goto start;
+       default:
+               dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+                               __func__, hba->clk_gating.state);
+               break;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+       hba->ufs_stats.clk_hold.ts = ktime_get();
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ufshcd_hold);
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                                               clk_gating.gate_work);
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       /*
+        * In case you are here to cancel this work the gating state
+        * would be marked as REQ_CLKS_ON. In this case save time by
+        * skipping the gating work and exit after changing the clock
+        * state to CLKS_ON.
+        */
+       if (hba->clk_gating.is_suspended ||
+               (hba->clk_gating.state != REQ_CLKS_OFF)) {
+               hba->clk_gating.state = CLKS_ON;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                       hba->clk_gating.state);
+               goto rel_lock;
+       }
+
+       if (hba->clk_gating.active_reqs
+               || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+               || hba->lrb_in_use || hba->outstanding_tasks
+               || hba->active_uic_cmd || hba->uic_async_done)
+               goto rel_lock;
+
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
+           hba->hibern8_on_idle.is_enabled)
+               /*
+                * Hibern8 enter work (on Idle) needs clocks to be ON hence
+                * make sure that it is flushed before turning off the clocks.
+                */
+               flush_delayed_work(&hba->hibern8_on_idle.enter_work);
+
+       /* put the link into hibern8 mode before turning off clocks */
+       if (ufshcd_can_hibern8_during_gating(hba)) {
+               if (ufshcd_uic_hibern8_enter(hba)) {
+                       hba->clk_gating.state = CLKS_ON;
+                       trace_ufshcd_clk_gating(dev_name(hba->dev),
+                               hba->clk_gating.state);
+                       goto out;
+               }
+               ufshcd_set_link_hibern8(hba);
+       }
+
+       /*
+        * If auto hibern8 is supported then the link will already
+        * be in hibern8 state and the ref clock can be gated.
+        */
+       if ((ufshcd_is_auto_hibern8_supported(hba) ||
+            !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
+               ufshcd_disable_clocks(hba, true);
+       else
+               /* If link is active, device ref_clk can't be switched off */
+               ufshcd_disable_clocks_skip_ref_clk(hba, true);
+
+       /* Put the host controller in low power mode if possible */
+       ufshcd_hba_vreg_set_lpm(hba);
+
+       /*
+        * In case you are here to cancel this work the gating state
+        * would be marked as REQ_CLKS_ON. In this case keep the state
+        * as REQ_CLKS_ON which would anyway imply that clocks are off
+        * and a request to turn them on is pending. By doing this way,
+        * we keep the state machine in tact and this would ultimately
+        * prevent from doing cancel work multiple times when there are
+        * new requests arriving before the current cancel work is done.
+        */
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_gating.state == REQ_CLKS_OFF) {
+               hba->clk_gating.state = CLKS_OFF;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                       hba->clk_gating.state);
+       }
+rel_lock:
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+       return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
+{
+       if (!ufshcd_is_clkgating_allowed(hba))
+               return;
+
+       hba->clk_gating.active_reqs--;
+
+       if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+               || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+               || hba->lrb_in_use || hba->outstanding_tasks
+               || hba->active_uic_cmd || hba->uic_async_done
+               || ufshcd_eh_in_progress(hba) || no_sched)
+               return;
+
+       hba->clk_gating.state = REQ_CLKS_OFF;
+       trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+       hba->ufs_stats.clk_rel.ts = ktime_get();
+
+       hrtimer_start(&hba->clk_gating.gate_hrtimer,
+                       ms_to_ktime(hba->clk_gating.delay_ms),
+                       HRTIMER_MODE_REL);
+}
+
+void ufshcd_release(struct ufs_hba *hba, bool no_sched)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       __ufshcd_release(hba, no_sched);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_release);
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
 {
-       return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
-                       MASK_RSP_EXCEPTION_EVENT ? true : false;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
 }
 
-/**
- * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
- * @hba: per adapter instance
- */
-static inline void
-ufshcd_reset_intr_aggr(struct ufs_hba *hba)
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
 {
-       ufshcd_writel(hba, INT_AGGR_ENABLE |
-                     INT_AGGR_COUNTER_AND_TIMER_RESET,
-                     REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags, value;
+
+       if (kstrtoul(buf, 0, &value))
+               return -EINVAL;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_gating.delay_ms = value;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return count;
 }
 
-/**
- * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
- * @hba: per adapter instance
- * @cnt: Interrupt aggregation counter threshold
- * @tmout: Interrupt aggregation timeout value
- */
-static inline void
-ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
 {
-       ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
-                     INT_AGGR_COUNTER_THLD_VAL(cnt) |
-                     INT_AGGR_TIMEOUT_VAL(tmout),
-                     REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n",
+                       hba->clk_gating.delay_ms_pwr_save);
 }
 
-/**
- * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
- * @hba: per adapter instance
- */
-static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
 {
-       ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags, value;
+
+       if (kstrtoul(buf, 0, &value))
+               return -EINVAL;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+
+       hba->clk_gating.delay_ms_pwr_save = value;
+       if (ufshcd_is_clkscaling_supported(hba) &&
+           !hba->clk_scaling.is_scaled_up)
+               hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
+
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return count;
 }
 
-/**
- * ufshcd_enable_run_stop_reg - Enable run-stop registers,
- *                     When run-stop registers are set to 1, it indicates the
- *                     host controller that it can process the requests
- * @hba: per adapter instance
- */
-static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
 {
-       ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
-                     REG_UTP_TASK_REQ_LIST_RUN_STOP);
-       ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
-                     REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
 }
 
-/**
- * ufshcd_hba_start - Start controller initialization sequence
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_start(struct ufs_hba *hba)
+static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
 {
-       ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags, value;
+
+       if (kstrtoul(buf, 0, &value))
+               return -EINVAL;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+
+       hba->clk_gating.delay_ms_perf = value;
+       if (ufshcd_is_clkscaling_supported(hba) &&
+           hba->clk_scaling.is_scaled_up)
+               hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
+
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return count;
 }
 
-/**
- * ufshcd_is_hba_active - Get controller state
- * @hba: per adapter instance
- *
- * Returns zero if controller is active, 1 otherwise
- */
-static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
 {
-       return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
 }
 
-static void ufshcd_ungate_work(struct work_struct *work)
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
 {
-       int ret;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags;
-       struct ufs_hba *hba = container_of(work, struct ufs_hba,
-                       clk_gating.ungate_work);
+       u32 value;
 
-       cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+       if (kstrtou32(buf, 0, &value))
+               return -EINVAL;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_gating.state == CLKS_ON) {
+       value = !!value;
+       if (value == hba->clk_gating.is_enabled)
+               goto out;
+
+       if (value) {
+               ufshcd_release(hba, false);
+       } else {
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->clk_gating.active_reqs++;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
-               goto unblock_reqs;
        }
 
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-       ufshcd_setup_clocks(hba, true);
+       hba->clk_gating.is_enabled = value;
+out:
+       return count;
+}
 
-       /* Exit from hibern8 */
-       if (ufshcd_can_hibern8_during_gating(hba)) {
-               /* Prevent gating in this path */
-               hba->clk_gating.is_suspended = true;
-               if (ufshcd_is_link_hibern8(hba)) {
-                       ret = ufshcd_uic_hibern8_exit(hba);
-                       if (ret)
-                               dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
-                                       __func__, ret);
-                       else
-                               ufshcd_set_link_active(hba);
-               }
-               hba->clk_gating.is_suspended = false;
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+                                       struct hrtimer *timer)
+{
+       struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+                                          clk_gating.gate_hrtimer);
+
+       queue_work(hba->clk_gating.clk_gating_workq,
+                               &hba->clk_gating.gate_work);
+
+       return HRTIMER_NORESTART;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+       struct ufs_clk_gating *gating = &hba->clk_gating;
+       char wq_name[sizeof("ufs_clk_gating_00")];
+
+       hba->clk_gating.state = CLKS_ON;
+
+       if (!ufshcd_is_clkgating_allowed(hba))
+               return;
+
+       /*
+        * Disable hibern8 during clk gating if
+        * auto hibern8 is supported
+        */
+       if (ufshcd_is_auto_hibern8_supported(hba))
+               hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
+       INIT_WORK(&gating->gate_work, ufshcd_gate_work);
+       INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+       /*
+        * Clock gating work must be executed only after auto hibern8
+        * timeout has expired in the hardware or after aggressive
+        * hibern8 on idle software timeout. Using jiffy based low
+        * resolution delayed work is not reliable to guarantee this,
+        * hence use a high resolution timer to make sure we schedule
+        * the gate work precisely more than hibern8 timeout.
+        *
+        * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+        */
+       hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
+
+       snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+                       hba->host->host_no);
+       hba->clk_gating.clk_gating_workq =
+               create_singlethread_workqueue(wq_name);
+
+       gating->is_enabled = true;
+
+       gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+       gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
+
+       /* start with performance mode */
+       gating->delay_ms = gating->delay_ms_perf;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               goto scaling_not_supported;
+
+       gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
+       gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
+       sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
+       gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
+       gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
+
+       gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
+       gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
+       sysfs_attr_init(&gating->delay_perf_attr.attr);
+       gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
+       gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &gating->delay_perf_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
+
+       goto add_clkgate_enable;
+
+scaling_not_supported:
+       hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+       hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+       sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+       hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+       hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+add_clkgate_enable:
+       gating->enable_attr.show = ufshcd_clkgate_enable_show;
+       gating->enable_attr.store = ufshcd_clkgate_enable_store;
+       sysfs_attr_init(&gating->enable_attr.attr);
+       gating->enable_attr.attr.name = "clkgate_enable";
+       gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &gating->enable_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_clkgating_allowed(hba))
+               return;
+       if (ufshcd_is_clkscaling_supported(hba)) {
+               device_remove_file(hba->dev,
+                                  &hba->clk_gating.delay_pwr_save_attr);
+               device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
+       } else {
+               device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
        }
-unblock_reqs:
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_resume_device(hba->devfreq);
-       scsi_unblock_requests(hba->host);
+       device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+       ufshcd_cancel_gate_work(hba);
+       cancel_work_sync(&hba->clk_gating.ungate_work);
+       destroy_workqueue(hba->clk_gating.clk_gating_workq);
+}
+
+static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
+{
+       ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
+                        AUTO_HIBERN8_IDLE_TIMER_MASK,
+                       AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
+                       REG_AUTO_HIBERN8_IDLE_TIMER);
+       /* Make sure the timer gets applied before further operations */
+       mb();
 }
 
 /**
- * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
- * Also, exit from hibern8 mode and set the link as active.
+ * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
+ *
  * @hba: per adapter instance
- * @async: This indicates whether caller should ungate clocks asynchronously.
+ * @async: This indicates whether caller wants to exit hibern8 asynchronously.
+ *
+ * Exit from hibern8 mode and set the link as active.
+ *
+ * Return 0 on success, non-zero on failure.
  */
-int ufshcd_hold(struct ufs_hba *hba, bool async)
+static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
 {
        int rc = 0;
        unsigned long flags;
 
-       if (!ufshcd_is_clkgating_allowed(hba))
+       if (!ufshcd_is_hibern8_on_idle_allowed(hba))
                goto out;
+
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->clk_gating.active_reqs++;
+       hba->hibern8_on_idle.active_reqs++;
+
+       if (ufshcd_eh_in_progress(hba)) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               return 0;
+       }
 
 start:
-       switch (hba->clk_gating.state) {
-       case CLKS_ON:
-               /*
-                * Wait for the ungate work to complete if in progress.
-                * Though the clocks may be in ON state, the link could
-                * still be in hibner8 state if hibern8 is allowed
-                * during clock gating.
-                * Make sure we exit hibern8 state also in addition to
-                * clocks being ON.
-                */
-               if (ufshcd_can_hibern8_during_gating(hba) &&
-                   ufshcd_is_link_hibern8(hba)) {
-                       spin_unlock_irqrestore(hba->host->host_lock, flags);
-                       flush_work(&hba->clk_gating.ungate_work);
-                       spin_lock_irqsave(hba->host->host_lock, flags);
-                       goto start;
-               }
+       switch (hba->hibern8_on_idle.state) {
+       case HIBERN8_EXITED:
                break;
-       case REQ_CLKS_OFF:
-               if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
-                       hba->clk_gating.state = CLKS_ON;
+       case REQ_HIBERN8_ENTER:
+               if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
+                       hba->hibern8_on_idle.state = HIBERN8_EXITED;
+                       trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+                               hba->hibern8_on_idle.state);
                        break;
                }
                /*
-                * If we here, it means gating work is either done or
-                * currently running. Hence, fall through to cancel gating
-                * work and to enable clocks.
+                * If we here, it means Hibern8 enter work is either done or
+                * currently running. Hence, fall through to cancel hibern8
+                * work and exit hibern8.
                 */
-       case CLKS_OFF:
-               scsi_block_requests(hba->host);
-               hba->clk_gating.state = REQ_CLKS_ON;
-               schedule_work(&hba->clk_gating.ungate_work);
+       case HIBERN8_ENTERED:
+               __ufshcd_scsi_block_requests(hba);
+               hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
+               trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+                       hba->hibern8_on_idle.state);
+               schedule_work(&hba->hibern8_on_idle.exit_work);
                /*
                 * fall through to check if we should wait for this
                 * work to be done or not.
                 */
-       case REQ_CLKS_ON:
+       case REQ_HIBERN8_EXIT:
                if (async) {
                        rc = -EAGAIN;
-                       hba->clk_gating.active_reqs--;
+                       hba->hibern8_on_idle.active_reqs--;
                        break;
+               } else {
+                       spin_unlock_irqrestore(hba->host->host_lock, flags);
+                       flush_work(&hba->hibern8_on_idle.exit_work);
+                       /* Make sure state is HIBERN8_EXITED before returning */
+                       spin_lock_irqsave(hba->host->host_lock, flags);
+                       goto start;
                }
-
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
-               flush_work(&hba->clk_gating.ungate_work);
-               /* Make sure state is CLKS_ON before returning */
-               spin_lock_irqsave(hba->host->host_lock, flags);
-               goto start;
        default:
-               dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
-                               __func__, hba->clk_gating.state);
+               dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
+                               __func__, hba->hibern8_on_idle.state);
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
-       return rc;
+out:
+       return rc;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+       unsigned long delay_in_jiffies;
+
+       if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+               return;
+
+       hba->hibern8_on_idle.active_reqs--;
+       BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
+
+       if (hba->hibern8_on_idle.active_reqs
+               || hba->hibern8_on_idle.is_suspended
+               || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+               || hba->lrb_in_use || hba->outstanding_tasks
+               || hba->active_uic_cmd || hba->uic_async_done
+               || ufshcd_eh_in_progress(hba) || no_sched)
+               return;
+
+       hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
+       trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+               hba->hibern8_on_idle.state);
+       /*
+        * Scheduling the delayed work after 1 jiffies will make the work to
+        * get schedule any time from 0ms to 1000/HZ ms which is not desirable
+        * for hibern8 enter work as it may impact the performance if it gets
+        * scheduled almost immediately. Hence make sure that hibern8 enter
+        * work gets scheduled atleast after 2 jiffies (any time between
+        * 1000/HZ ms to 2000/HZ ms).
+        */
+       delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
+       if (delay_in_jiffies == 1)
+               delay_in_jiffies++;
+
+       schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
+                             delay_in_jiffies);
+}
+
+static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       __ufshcd_hibern8_release(hba, no_sched);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
-EXPORT_SYMBOL_GPL(ufshcd_hold);
 
-static void ufshcd_gate_work(struct work_struct *work)
+static void ufshcd_hibern8_enter_work(struct work_struct *work)
 {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
-                       clk_gating.gate_work.work);
+                                          hibern8_on_idle.enter_work.work);
        unsigned long flags;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_gating.is_suspended) {
-               hba->clk_gating.state = CLKS_ON;
+       if (hba->hibern8_on_idle.is_suspended) {
+               hba->hibern8_on_idle.state = HIBERN8_EXITED;
+               trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+                       hba->hibern8_on_idle.state);
                goto rel_lock;
        }
 
-       if (hba->clk_gating.active_reqs
+       if (hba->hibern8_on_idle.active_reqs
                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
                || hba->lrb_in_use || hba->outstanding_tasks
                || hba->active_uic_cmd || hba->uic_async_done)
@@ -662,130 +2040,275 @@ static void ufshcd_gate_work(struct work_struct *work)
 
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       /* put the link into hibern8 mode before turning off clocks */
-       if (ufshcd_can_hibern8_during_gating(hba)) {
-               if (ufshcd_uic_hibern8_enter(hba)) {
-                       hba->clk_gating.state = CLKS_ON;
-                       goto out;
-               }
-               ufshcd_set_link_hibern8(hba);
-       }
-
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
+       if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
+               /* Enter failed */
+               hba->hibern8_on_idle.state = HIBERN8_EXITED;
+               trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+                       hba->hibern8_on_idle.state);
+               goto out;
        }
-
-       if (!ufshcd_is_link_active(hba))
-               ufshcd_setup_clocks(hba, false);
-       else
-               /* If link is active, device ref_clk can't be switched off */
-               __ufshcd_setup_clocks(hba, false, true);
+       ufshcd_set_link_hibern8(hba);
 
        /*
-        * In case you are here to cancel this work the gating state
-        * would be marked as REQ_CLKS_ON. In this case keep the state
-        * as REQ_CLKS_ON which would anyway imply that clocks are off
-        * and a request to turn them on is pending. By doing this way,
+        * In case you are here to cancel this work the hibern8_on_idle.state
+        * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
+        * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
+        * and a request to exit from it is pending. By doing this way,
         * we keep the state machine in tact and this would ultimately
         * prevent from doing cancel work multiple times when there are
         * new requests arriving before the current cancel work is done.
         */
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_gating.state == REQ_CLKS_OFF)
-               hba->clk_gating.state = CLKS_OFF;
-
+       if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
+               hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+               trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+                       hba->hibern8_on_idle.state);
+       }
 rel_lock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
        return;
 }
 
-/* host lock must be held before calling this variant */
-static void __ufshcd_release(struct ufs_hba *hba)
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+                                           unsigned long delay_ms)
 {
-       if (!ufshcd_is_clkgating_allowed(hba))
-               return;
-
-       hba->clk_gating.active_reqs--;
-
-       if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
-               || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
-               || hba->lrb_in_use || hba->outstanding_tasks
-               || hba->active_uic_cmd || hba->uic_async_done)
-               return;
-
-       hba->clk_gating.state = REQ_CLKS_OFF;
-       schedule_delayed_work(&hba->clk_gating.gate_work,
-                       msecs_to_jiffies(hba->clk_gating.delay_ms));
+       pm_runtime_get_sync(hba->dev);
+       ufshcd_hold_all(hba);
+       ufshcd_scsi_block_requests(hba);
+       down_write(&hba->lock);
+       /* wait for all the outstanding requests to finish */
+       ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+       ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+       up_write(&hba->lock);
+       ufshcd_scsi_unblock_requests(hba);
+       ufshcd_release_all(hba);
+       pm_runtime_put_sync(hba->dev);
 }
 
-void ufshcd_release(struct ufs_hba *hba)
+static void ufshcd_hibern8_exit_work(struct work_struct *work)
 {
+       int ret;
        unsigned long flags;
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                                          hibern8_on_idle.exit_work);
+
+       cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       __ufshcd_release(hba);
+       if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
+            || ufshcd_is_link_active(hba)) {
+               hba->hibern8_on_idle.state = HIBERN8_EXITED;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               goto unblock_reqs;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* Exit from hibern8 */
+       if (ufshcd_is_link_hibern8(hba)) {
+               hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
+               ufshcd_hold(hba, false);
+               ret = ufshcd_uic_hibern8_exit(hba);
+               hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
+               ufshcd_release(hba, false);
+               if (!ret) {
+                       spin_lock_irqsave(hba->host->host_lock, flags);
+                       ufshcd_set_link_active(hba);
+                       hba->hibern8_on_idle.state = HIBERN8_EXITED;
+                       trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+                               hba->hibern8_on_idle.state);
+                       spin_unlock_irqrestore(hba->host->host_lock, flags);
+               }
+       }
+unblock_reqs:
+       ufshcd_scsi_unblock_requests(hba);
 }
-EXPORT_SYMBOL_GPL(ufshcd_release);
 
-static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct ufs_hba *hba = dev_get_drvdata(dev);
 
-       return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+       return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
 }
 
-static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags, value;
+       bool change = true;
 
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->clk_gating.delay_ms = value;
+       if (hba->hibern8_on_idle.delay_ms == value)
+               change = false;
+
+       if (value >= hba->clk_gating.delay_ms_pwr_save ||
+           value >= hba->clk_gating.delay_ms_perf) {
+               dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+                       value, hba->clk_gating.delay_ms_pwr_save,
+                       hba->clk_gating.delay_ms_perf);
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               return -EINVAL;
+       }
+
+       hba->hibern8_on_idle.delay_ms = value;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* Update auto hibern8 timer value if supported */
+       if (change && ufshcd_is_auto_hibern8_supported(hba) &&
+           hba->hibern8_on_idle.is_enabled)
+               __ufshcd_set_auto_hibern8_timer(hba,
+                                               hba->hibern8_on_idle.delay_ms);
+
        return count;
 }
 
-static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
 {
-       if (!ufshcd_is_clkgating_allowed(hba))
-               return;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
 
-       hba->clk_gating.delay_ms = 150;
-       INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
-       INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       hba->hibern8_on_idle.is_enabled);
+}
 
-       hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
-       hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
-       sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
-       hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
-       hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
-       if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
-               dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags;
+       u32 value;
+
+       if (kstrtou32(buf, 0, &value))
+               return -EINVAL;
+
+       value = !!value;
+       if (value == hba->hibern8_on_idle.is_enabled)
+               goto out;
+
+       /* Update auto hibern8 timer value if supported */
+       if (ufshcd_is_auto_hibern8_supported(hba)) {
+               __ufshcd_set_auto_hibern8_timer(hba,
+                       value ? hba->hibern8_on_idle.delay_ms : value);
+               goto update;
+       }
+
+       if (value) {
+               /*
+                * As clock gating work would wait for the hibern8 enter work
+                * to finish, clocks would remain on during hibern8 enter work.
+                */
+               ufshcd_hold(hba, false);
+               ufshcd_release_all(hba);
+       } else {
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->hibern8_on_idle.active_reqs++;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
+
+update:
+       hba->hibern8_on_idle.is_enabled = value;
+out:
+       return count;
 }
 
-static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
 {
-       if (!ufshcd_is_clkgating_allowed(hba))
+       /* initialize the state variable here */
+       hba->hibern8_on_idle.state = HIBERN8_EXITED;
+
+       if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+           !ufshcd_is_auto_hibern8_supported(hba))
                return;
-       device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
-       cancel_work_sync(&hba->clk_gating.ungate_work);
-       cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+       if (ufshcd_is_auto_hibern8_supported(hba)) {
+               hba->hibern8_on_idle.delay_ms = 1;
+               hba->hibern8_on_idle.state = AUTO_HIBERN8;
+               /*
+                * Disable SW hibern8 enter on idle in case
+                * auto hibern8 is supported
+                */
+               hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+       } else {
+               hba->hibern8_on_idle.delay_ms = 10;
+               INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
+                                 ufshcd_hibern8_enter_work);
+               INIT_WORK(&hba->hibern8_on_idle.exit_work,
+                         ufshcd_hibern8_exit_work);
+       }
+
+       hba->hibern8_on_idle.is_enabled = true;
+
+       hba->hibern8_on_idle.delay_attr.show =
+                                       ufshcd_hibern8_on_idle_delay_show;
+       hba->hibern8_on_idle.delay_attr.store =
+                                       ufshcd_hibern8_on_idle_delay_store;
+       sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
+       hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
+       hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
+               dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
+
+       hba->hibern8_on_idle.enable_attr.show =
+                                       ufshcd_hibern8_on_idle_enable_show;
+       hba->hibern8_on_idle.enable_attr.store =
+                                       ufshcd_hibern8_on_idle_enable_store;
+       sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
+       hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
+       hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
+               dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
+}
+
+static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+           !ufshcd_is_auto_hibern8_supported(hba))
+               return;
+       device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
+       device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
+}
+
+static void ufshcd_hold_all(struct ufs_hba *hba)
+{
+       ufshcd_hold(hba, false);
+       ufshcd_hibern8_hold(hba, false);
+}
+
+static void ufshcd_release_all(struct ufs_hba *hba)
+{
+       ufshcd_hibern8_release(hba, false);
+       ufshcd_release(hba, false);
 }
 
 /* Must be called with host lock acquired */
 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 {
-       if (!ufshcd_is_clkscaling_enabled(hba))
+       bool queue_resume_work = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       if (!hba->clk_scaling.active_reqs++)
+               queue_resume_work = true;
+
+       if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
                return;
 
+       if (queue_resume_work)
+               queue_work(hba->clk_scaling.workq,
+                          &hba->clk_scaling.resume_work);
+
+       if (!hba->clk_scaling.window_start_t) {
+               hba->clk_scaling.window_start_t = jiffies;
+               hba->clk_scaling.tot_busy_t = 0;
+               hba->clk_scaling.is_busy_started = false;
+       }
+
        if (!hba->clk_scaling.is_busy_started) {
                hba->clk_scaling.busy_start_t = ktime_get();
                hba->clk_scaling.is_busy_started = true;
@@ -796,7 +2319,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 
-       if (!ufshcd_is_clkscaling_enabled(hba))
+       if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
        if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -806,17 +2329,27 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
                scaling->is_busy_started = false;
        }
 }
+
 /**
  * ufshcd_send_command - Send SCSI or device management commands
  * @hba: per adapter instance
  * @task_tag: Task tag of the command
  */
 static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
+       int ret = 0;
+
+       hba->lrb[task_tag].issue_time_stamp = ktime_get();
+       hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       /* Make sure that doorbell is committed immediately */
+       wmb();
+       ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
+       ufshcd_update_tag_stats(hba, task_tag);
+       return ret;
 }
 
 /**
@@ -835,7 +2368,7 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
 
                memcpy(lrbp->sense_buffer,
                        lrbp->ucd_rsp_ptr->sr.sense_data,
-                       min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
+                       min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
        }
 }
 
@@ -890,6 +2423,9 @@ static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
        hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
        hba->nutmrs =
        ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+
+       /* disable auto hibern8 */
+       hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
 }
 
 /**
@@ -932,6 +2468,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 
        hba->active_uic_cmd = uic_cmd;
 
+       ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
        /* Write Args */
        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -962,6 +2499,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
        else
                ret = -ETIMEDOUT;
 
+       if (ret)
+               ufsdbg_set_err_state(hba);
+
+       ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->active_uic_cmd = NULL;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -973,13 +2515,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  * @hba: per adapter instance
  * @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
  *
  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  * with mutex held and host_lock locked.
  * Returns 0 only if success.
  */
 static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+                     bool completion)
 {
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
@@ -987,7 +2531,8 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
                return -EIO;
        }
 
-       init_completion(&uic_cmd->done);
+       if (completion)
+               init_completion(&uic_cmd->done);
 
        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
 
@@ -1007,19 +2552,25 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
        int ret;
        unsigned long flags;
 
-       ufshcd_hold(hba, false);
+       hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
+       ufshcd_hold_all(hba);
        mutex_lock(&hba->uic_cmd_mutex);
        ufshcd_add_delay_before_dme_cmd(hba);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+       ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (!ret)
                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
 
+       ufshcd_save_tstamp_of_last_dme_cmd(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
+       ufshcd_release_all(hba);
+       hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
+
+       ufsdbg_error_inject_dispatcher(hba,
+               ERR_INJECT_UIC, 0, &ret);
 
-       ufshcd_release(hba);
        return ret;
 }
 
@@ -1055,6 +2606,7 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
                                cpu_to_le32(lower_32_bits(sg->dma_address));
                        prd_table[i].upper_addr =
                                cpu_to_le32(upper_32_bits(sg->dma_address));
+                       prd_table[i].reserved = 0;
                }
        } else {
                lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@ -1105,15 +2657,52 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
 }
 
+static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
+               struct ufshcd_lrb *lrbp)
+{
+       struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+       u8 cc_index = 0;
+       bool enable = false;
+       u64 dun = 0;
+       int ret;
+
+       /*
+        * Call vendor specific code to get crypto info for this request:
+        * enable, crypto config. index, DUN.
+        * If bypass is set, don't bother setting the other fields.
+        */
+       ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
+       if (ret) {
+               if (ret != -EAGAIN) {
+                       dev_err(hba->dev,
+                               "%s: failed to setup crypto request (%d)\n",
+                               __func__, ret);
+               }
+
+               return ret;
+       }
+
+       if (!enable)
+               goto out;
+
+       req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
+       req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
+       req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
+out:
+       return 0;
+}
+
 /**
  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  * descriptor according to request
+ * @hba: per adapter instance
  * @lrbp: pointer to local reference block
  * @upiu_flags: flags required in the header
  * @cmd_dir: requests data direction
  */
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
-               u32 *upiu_flags, enum dma_data_direction cmd_dir)
+static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
+       struct ufshcd_lrb *lrbp, u32 *upiu_flags,
+       enum dma_data_direction cmd_dir)
 {
        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
        u32 data_direction;
@@ -1137,7 +2726,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 
        /* Transfer request descriptor header fields */
        req_desc->header.dword_0 = cpu_to_le32(dword_0);
-
+       /* dword_1 is reserved, hence it is set to 0 */
+       req_desc->header.dword_1 = 0;
        /*
         * assigning invalid value for command status. Controller
         * updates OCS on command completion, with the command
@@ -1145,6 +2735,15 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
         */
        req_desc->header.dword_2 =
                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+       /* dword_3 is reserved, hence it is set to 0 */
+       req_desc->header.dword_3 = 0;
+
+       req_desc->prd_table_length = 0;
+
+       if (ufshcd_is_crypto_supported(hba))
+               return ufshcd_prepare_crypto_utrd(hba, lrbp);
+
+       return 0;
 }
 
 /**
@@ -1157,6 +2756,7 @@ static
 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
 {
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+       unsigned short cdb_len;
 
        /* command descriptor fields */
        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -1171,8 +2771,12 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
        ucd_req_ptr->sc.exp_data_transfer_len =
                cpu_to_be32(lrbp->cmd->sdb.length);
 
-       memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
-               (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+       cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+       memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+       if (cdb_len < MAX_CDB_SIZE)
+               memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
+                      (MAX_CDB_SIZE - cdb_len));
+       memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
 /**
@@ -1209,6 +2813,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
                memcpy(descp, query->descriptor, len);
 
+       memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -1221,6 +2826,11 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
        ucd_req_ptr->header.dword_0 =
                UPIU_HEADER_DWORD(
                        UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+       /* clear rest of the fields of basic header */
+       ucd_req_ptr->header.dword_1 = 0;
+       ucd_req_ptr->header.dword_2 = 0;
+
+       memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
 /**
@@ -1236,15 +2846,16 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        switch (lrbp->command_type) {
        case UTP_CMD_TYPE_SCSI:
                if (likely(lrbp->cmd)) {
-                       ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
-                                       lrbp->cmd->sc_data_direction);
+                       ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
+                               &upiu_flags, lrbp->cmd->sc_data_direction);
                        ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
                } else {
                        ret = -EINVAL;
                }
                break;
        case UTP_CMD_TYPE_DEV_MANAGE:
-               ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+               ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+                       DMA_NONE);
                if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
                        ufshcd_prepare_utp_query_req_upiu(
                                        hba, lrbp, upiu_flags);
@@ -1296,6 +2907,61 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
 }
 
 /**
+ * ufshcd_get_write_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Lock is predominantly held by shutdown context thus, ensuring
+ * that no requests from any other context may sneak through.
+ */
+static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
+{
+       down_write(&hba->lock);
+}
+
+/**
+ * ufshcd_get_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns 1 if acquired, < 0 on contention
+ *
+ * After shutdown's initiated, allow requests only directed to the
+ * well known device lun. The sync between scaling & issue is maintained
+ * as is and this restructuring syncs shutdown with these too.
+ */
+static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
+{
+       int err = 0;
+
+       err = down_read_trylock(&hba->lock);
+       if (err > 0)
+               goto out;
+       /* let requests for well known device lun to go through */
+       if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+               return 0;
+       else if (!ufshcd_is_shutdown_ongoing(hba))
+               return -EAGAIN;
+       else
+               return -EPERM;
+
+out:
+       return err;
+}
+
+/**
+ * ufshcd_put_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns none
+ */
+static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
+{
+       up_read(&hba->lock);
+}
+
+/**
  * ufshcd_queuecommand - main entry point for SCSI requests
  * @cmd: command from SCSI Midlayer
  * @done: call back function
@@ -1309,12 +2975,42 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        unsigned long flags;
        int tag;
        int err = 0;
+       bool has_read_lock = false;
 
        hba = shost_priv(host);
 
+       if (!cmd || !cmd->request || !hba)
+               return -EINVAL;
+
        tag = cmd->request->tag;
+       if (!ufshcd_valid_tag(hba, tag)) {
+               dev_err(hba->dev,
+                       "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+                       __func__, tag, cmd, cmd->request);
+               BUG();
+       }
+
+       err = ufshcd_get_read_lock(hba, cmd->device->lun);
+       if (unlikely(err < 0)) {
+               if (err == -EPERM) {
+                       set_host_byte(cmd, DID_ERROR);
+                       cmd->scsi_done(cmd);
+                       return 0;
+               }
+               if (err == -EAGAIN)
+                       return SCSI_MLQUEUE_HOST_BUSY;
+       } else if (err == 1) {
+               has_read_lock = true;
+       }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
+
+       /* if error handling is in progress, return host busy */
+       if (ufshcd_eh_in_progress(hba)) {
+               err = SCSI_MLQUEUE_HOST_BUSY;
+               goto out_unlock;
+       }
+
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
                break;
@@ -1334,6 +3030,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       hba->req_abort_count = 0;
+
        /* acquire the tag to make sure device cmds don't use it */
        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
                /*
@@ -1346,40 +3044,118 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto out;
        }
 
+       hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
        err = ufshcd_hold(hba, true);
        if (err) {
                err = SCSI_MLQUEUE_HOST_BUSY;
                clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
+       if (ufshcd_is_clkgating_allowed(hba))
+               WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+       err = ufshcd_hibern8_hold(hba, true);
+       if (err) {
+               clear_bit_unlock(tag, &hba->lrb_in_use);
+               err = SCSI_MLQUEUE_HOST_BUSY;
+               hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
+               ufshcd_release(hba, true);
+               goto out;
+       }
+       if (ufshcd_is_hibern8_on_idle_allowed(hba))
+               WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
+
+       /* Vote PM QoS for the request */
+       ufshcd_vops_pm_qos_req_start(hba, cmd->request);
+
+       /* IO svc time latency histogram */
+       if (hba->latency_hist_enabled &&
+           (cmd->request->cmd_type == REQ_TYPE_FS)) {
+               cmd->request->lat_hist_io_start = ktime_get();
+               cmd->request->lat_hist_enabled = 1;
+       } else {
+               cmd->request->lat_hist_enabled = 0;
+       }
+
        WARN_ON(hba->clk_gating.state != CLKS_ON);
 
        lrbp = &hba->lrb[tag];
 
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
-       lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+       lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
        lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
        lrbp->command_type = UTP_CMD_TYPE_SCSI;
+       lrbp->req_abort_skip = false;
+
+       /* form UPIU before issuing the command */
+       err = ufshcd_compose_upiu(hba, lrbp);
+       if (err) {
+               if (err != -EAGAIN)
+                       dev_err(hba->dev,
+                               "%s: failed to compose upiu %d\n",
+                               __func__, err);
+
+               lrbp->cmd = NULL;
+               clear_bit_unlock(tag, &hba->lrb_in_use);
+               ufshcd_release_all(hba);
+               ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+               goto out;
+       }
+
+       err = ufshcd_map_sg(lrbp);
+       if (err) {
+               lrbp->cmd = NULL;
+               clear_bit_unlock(tag, &hba->lrb_in_use);
+               ufshcd_release_all(hba);
+               ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+               goto out;
+       }
+
+       err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
+       if (err) {
+               if (err != -EAGAIN)
+                       dev_err(hba->dev,
+                               "%s: failed to configure crypto engine %d\n",
+                               __func__, err);
+
+               scsi_dma_unmap(lrbp->cmd);
+               lrbp->cmd = NULL;
+               clear_bit_unlock(tag, &hba->lrb_in_use);
+               ufshcd_release_all(hba);
+               ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+
+               goto out;
+       }
+
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
+       /* issue command to the controller */
+       spin_lock_irqsave(hba->host->host_lock, flags);
 
-       /* form UPIU before issuing the command */
-       ufshcd_compose_upiu(hba, lrbp);
-       err = ufshcd_map_sg(lrbp);
+       err = ufshcd_send_command(hba, tag);
        if (err) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               scsi_dma_unmap(lrbp->cmd);
                lrbp->cmd = NULL;
                clear_bit_unlock(tag, &hba->lrb_in_use);
+               ufshcd_release_all(hba);
+               ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+               ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
+               dev_err(hba->dev, "%s: failed sending command, %d\n",
+                                                       __func__, err);
+               err = DID_ERROR;
                goto out;
        }
 
-       /* issue command to the controller */
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_send_command(hba, tag);
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+       if (has_read_lock)
+               ufshcd_put_read_lock(hba);
        return err;
 }
 
@@ -1416,7 +3192,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
         */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TRANSFER_REQ_DOOR_BELL,
-                       mask, ~mask, 1000, 1000);
+                       mask, ~mask, 1000, 1000, true);
 
        return err;
 }
@@ -1443,6 +3219,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        int resp;
        int err = 0;
 
+       hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 
        switch (resp) {
@@ -1495,11 +3272,22 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
 
        if (!time_left) {
                err = -ETIMEDOUT;
+               dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+                       __func__, lrbp->task_tag);
                if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
-                       /* sucessfully cleared the command, retry if needed */
+                       /* successfully cleared the command, retry if needed */
                        err = -EAGAIN;
+               /*
+                * in case of an error, after clearing the doorbell,
+                * we also need to clear the outstanding_request
+                * field in hba
+                */
+               ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
        }
 
+       if (err)
+               ufsdbg_set_err_state(hba);
+
        return err;
 }
 
@@ -1560,6 +3348,15 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
        unsigned long flags;
 
        /*
+        * May get invoked from shutdown and IOCTL contexts.
+        * In shutdown context, it comes in with lock acquired.
+        * In error recovery context, it may come with lock acquired.
+        */
+
+       if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+               down_read(&hba->lock);
+
+       /*
         * Get free slot, sleep if slots are unavailable.
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by SCSI request timeout.
@@ -1575,15 +3372,23 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 
        hba->dev_cmd.complete = &wait;
 
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_send_command(hba, tag);
+       err = ufshcd_send_command(hba, tag);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-
+       if (err) {
+               dev_err(hba->dev, "%s: failed sending command, %d\n",
+                                                       __func__, err);
+               goto out_put_tag;
+       }
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
 
 out_put_tag:
        ufshcd_put_dev_cmd_tag(hba, tag);
        wake_up(&hba->dev_cmd.tag_wq);
+       if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+               up_read(&hba->lock);
        return err;
 }
 
@@ -1601,6 +3406,12 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
                struct ufs_query_req **request, struct ufs_query_res **response,
                enum query_opcode opcode, u8 idn, u8 index, u8 selector)
 {
+       int idn_t = (int)idn;
+
+       ufsdbg_error_inject_dispatcher(hba,
+               ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
+       idn = idn_t;
+
        *request = &hba->dev_cmd.query.request;
        *response = &hba->dev_cmd.query.response;
        memset(*request, 0, sizeof(struct ufs_query_req));
@@ -1609,6 +3420,31 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
        (*request)->upiu_req.idn = idn;
        (*request)->upiu_req.index = index;
        (*request)->upiu_req.selector = selector;
+
+       ufshcd_update_query_stats(hba, opcode, idn);
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+       enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+       int ret;
+       int retries;
+
+       for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+               ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+               if (ret)
+                       dev_dbg(hba->dev,
+                               "%s: failed with error %d, retries %d\n",
+                               __func__, ret, retries);
+               else
+                       break;
+       }
+
+       if (ret)
+               dev_err(hba->dev,
+                       "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+                       __func__, opcode, idn, ret, retries);
+       return ret;
 }
 
 /**
@@ -1620,16 +3456,17 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
  *
  * Returns 0 for success, non-zero in case of failure
  */
-static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
                        enum flag_idn idn, bool *flag_res)
 {
        struct ufs_query_req *request = NULL;
        struct ufs_query_res *response = NULL;
        int err, index = 0, selector = 0;
+       int timeout = QUERY_REQ_TIMEOUT;
 
        BUG_ON(!hba);
 
-       ufshcd_hold(hba, false);
+       ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
                        selector);
@@ -1658,12 +3495,12 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
                goto out_unlock;
        }
 
-       err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+       err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
 
        if (err) {
                dev_err(hba->dev,
                        "%s: Sending flag query for idn %d failed, err = %d\n",
-                       __func__, idn, err);
+                       __func__, request->upiu_req.idn, err);
                goto out_unlock;
        }
 
@@ -1673,9 +3510,10 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 
 out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
        return err;
 }
+EXPORT_SYMBOL(ufshcd_query_flag);
 
 /**
  * ufshcd_query_attr - API function for sending attribute requests
@@ -1688,7 +3526,7 @@ out_unlock:
  *
  * Returns 0 for success, non-zero in case of failure
 */
-static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                        enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
 {
        struct ufs_query_req *request = NULL;
@@ -1697,7 +3535,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 
        BUG_ON(!hba);
 
-       ufshcd_hold(hba, false);
+       ufshcd_hold_all(hba);
        if (!attr_val) {
                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
                                __func__, opcode);
@@ -1727,8 +3565,9 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
        if (err) {
-               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-                               __func__, opcode, idn, err);
+               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+                               __func__, opcode,
+                               request->upiu_req.idn, index, err);
                goto out_unlock;
        }
 
@@ -1737,25 +3576,49 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
 out:
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
        return err;
 }
+EXPORT_SYMBOL(ufshcd_query_attr);
 
 /**
- * ufshcd_query_descriptor - API function for sending descriptor requests
- * hba: per-adapter instance
- * opcode: attribute opcode
- * idn: attribute idn to access
- * index: index field
- * selector: selector field
- * desc_buf: the buffer that contains the descriptor
- * buf_len: length parameter passed to the device
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
  *
- * Returns 0 for success, non-zero in case of failure.
- * The buf_len parameter will contain, on return, the length parameter
- * received on the response.
- */
-static int ufshcd_query_descriptor(struct ufs_hba *hba,
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+       enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+       u32 *attr_val)
+{
+       int ret = 0;
+       u32 retries;
+
+        for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+               ret = ufshcd_query_attr(hba, opcode, idn, index,
+                                               selector, attr_val);
+               if (ret)
+                       dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+                               __func__, ret, retries);
+               else
+                       break;
+       }
+
+       if (ret)
+               dev_err(hba->dev,
+                       "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+                       __func__, idn, ret, retries);
+       return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
                        enum query_opcode opcode, enum desc_idn idn, u8 index,
                        u8 selector, u8 *desc_buf, int *buf_len)
 {
@@ -1765,7 +3628,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
 
        BUG_ON(!hba);
 
-       ufshcd_hold(hba, false);
+       ufshcd_hold_all(hba);
        if (!desc_buf) {
                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
                                __func__, opcode);
@@ -1804,8 +3667,9 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
        if (err) {
-               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-                               __func__, opcode, idn, err);
+               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+                               __func__, opcode,
+                               request->upiu_req.idn, index, err);
                goto out_unlock;
        }
 
@@ -1815,9 +3679,41 @@ out_unlock:
        hba->dev_cmd.query.descriptor = NULL;
        mutex_unlock(&hba->dev_cmd.lock);
 out:
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
+       return err;
+}
+
+/**
+ * ufshcd_query_descriptor - API function for sending descriptor requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * desc_buf: the buffer that contains the descriptor
+ * buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor(struct ufs_hba *hba,
+                       enum query_opcode opcode, enum desc_idn idn, u8 index,
+                       u8 selector, u8 *desc_buf, int *buf_len)
+{
+       int err;
+       int retries;
+
+       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+               err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+                                               selector, desc_buf, buf_len);
+               if (!err || err == -EINVAL)
+                       break;
+       }
+
        return err;
 }
+EXPORT_SYMBOL(ufshcd_query_descriptor);
 
 /**
  * ufshcd_read_desc_param - read the specified descriptor parameter
@@ -1865,15 +3761,38 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
                                      desc_id, desc_index, 0, desc_buf,
                                      &buff_len);
 
-       if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
-           (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
-            ufs_query_desc_max_size[desc_id])
-           || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
-               dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
-                       __func__, desc_id, param_offset, buff_len, ret);
-               if (!ret)
-                       ret = -EINVAL;
+       if (ret) {
+               dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+                       __func__, desc_id, desc_index, param_offset, ret);
+
+               goto out;
+       }
+
+       /* Sanity check */
+       if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+               dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+                       __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+               ret = -EINVAL;
+               goto out;
+       }
 
+       /*
+        * While reading variable size descriptors (like string descriptor),
+        * some UFS devices may report the "LENGTH" (field in "Transaction
+        * Specific fields" of Query Response UPIU) same as what was requested
+        * in Query Request UPIU instead of reporting the actual size of the
+        * variable size descriptor.
+        * Although it's safe to ignore the "LENGTH" field for variable size
+        * descriptors as we can always derive the length of the descriptor from
+        * the descriptor header fields. Hence this change impose the length
+        * match check only for fixed size descriptors (for which we always
+        * request the correct size as part of Query Request UPIU).
+        */
+       if ((desc_id != QUERY_DESC_IDN_STRING) &&
+           (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+               dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+                       __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1901,6 +3820,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
        return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
 }
 
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+       return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+                               u32 size, bool ascii)
+{
+       int err = 0;
+
+       err = ufshcd_read_desc(hba,
+                               QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+       if (err) {
+               dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+                       __func__, QUERY_REQ_RETRIES, err);
+               goto out;
+       }
+
+       if (ascii) {
+               int desc_len;
+               int ascii_len;
+               int i;
+               char *buff_ascii;
+
+               desc_len = buf[0];
+               /* remove header and divide by 2 to move from UTF16 to UTF8 */
+               ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+               if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+                       dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+                                       __func__);
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
+               if (!buff_ascii) {
+                       dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+                                       __func__, ascii_len);
+                       err = -ENOMEM;
+                       goto out_free_buff;
+               }
+
+               /*
+                * the descriptor contains string in UTF16 format
+                * we need to convert to utf-8 so it can be displayed
+                */
+               utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+                               desc_len - QUERY_DESC_HDR_SIZE,
+                               UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+               /* replace non-printable or non-ASCII characters with spaces */
+               for (i = 0; i < ascii_len; i++)
+                       ufshcd_remove_non_printable(&buff_ascii[i]);
+
+               memset(buf + QUERY_DESC_HDR_SIZE, 0,
+                               size - QUERY_DESC_HDR_SIZE);
+               memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+               buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+out_free_buff:
+               kfree(buff_ascii);
+       }
+out:
+       return err;
+}
+
 /**
  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
  * @hba: Pointer to adapter instance
@@ -1921,7 +3916,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
         * Unit descriptors are only available for general purpose LUs (LUN id
         * from 0 to 7) and RPMB Well known LU.
         */
-       if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+       if (!ufs_is_valid_unit_desc_lun(lun))
                return -EOPNOTSUPP;
 
        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -2063,12 +4058,19 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
 
                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+               hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+                               (i * sizeof(struct utp_transfer_req_desc));
                hba->lrb[i].ucd_req_ptr =
                        (struct utp_upiu_req *)(cmd_descp + i);
+               hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
                hba->lrb[i].ucd_rsp_ptr =
                        (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+               hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+                               response_offset;
                hba->lrb[i].ucd_prdt_ptr =
                        (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+               hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+                               prdt_offset;
        }
 }
 
@@ -2092,7 +4094,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
 
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
-               dev_err(hba->dev,
+               dev_dbg(hba->dev,
                        "dme-link-startup: error code %d\n", ret);
        return ret;
 }
@@ -2128,6 +4130,13 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
        usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
 }
 
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(
+                       struct ufs_hba *hba)
+{
+       if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
+               hba->last_dme_cmd_tstamp = ktime_get();
+}
+
 /**
  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  * @hba: per adapter instance
@@ -2148,6 +4157,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
        };
        const char *set = action[!!peer];
        int ret;
+       int retries = UFS_UIC_COMMAND_RETRIES;
+
+       ufsdbg_error_inject_dispatcher(hba,
+               ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
 
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
@@ -2155,10 +4168,18 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
        uic_cmd.argument3 = mib_val;
 
-       ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+       do {
+               /* for peer attributes we retry upon failure */
+               ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+               if (ret)
+                       dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+                               set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+       } while (ret && peer && --retries);
+
        if (ret)
-               dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
-                       set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+               dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+                       set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+                       UFS_UIC_COMMAND_RETRIES - retries);
 
        return ret;
 }
@@ -2183,6 +4204,7 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
        };
        const char *get = action[!!peer];
        int ret;
+       int retries = UFS_UIC_COMMAND_RETRIES;
        struct ufs_pa_layer_attr orig_pwr_info;
        struct ufs_pa_layer_attr temp_pwr_info;
        bool pwr_mode_change = false;
@@ -2211,16 +4233,26 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
 
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+
+       ufsdbg_error_inject_dispatcher(hba,
+               ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
+
        uic_cmd.argument1 = attr_sel;
 
-       ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
-       if (ret) {
-               dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
-                       get, UIC_GET_ATTR_ID(attr_sel), ret);
-               goto out;
-       }
+       do {
+               /* for peer attributes we retry upon failure */
+               ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+               if (ret)
+                       dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+                               get, UIC_GET_ATTR_ID(attr_sel), ret);
+       } while (ret && peer && --retries);
+
+       if (ret)
+               dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+                       get, UIC_GET_ATTR_ID(attr_sel),
+                       UFS_UIC_COMMAND_RETRIES - retries);
 
-       if (mib_val)
+       if (mib_val && !ret)
                *mib_val = uic_cmd.argument3;
 
        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@ -2253,6 +4285,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
        unsigned long flags;
        u8 status;
        int ret;
+       bool reenable_intr = false;
 
        mutex_lock(&hba->uic_cmd_mutex);
        init_completion(&uic_async_done);
@@ -2260,15 +4293,17 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->uic_async_done = &uic_async_done;
-       ret = __ufshcd_send_uic_cmd(hba, cmd);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-       if (ret) {
-               dev_err(hba->dev,
-                       "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
-                       cmd->command, cmd->argument3, ret);
-               goto out;
+       if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+               ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+               /*
+                * Make sure UIC command completion interrupt is disabled before
+                * issuing UIC command.
+                */
+               wmb();
+               reenable_intr = true;
        }
-       ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+       ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (ret) {
                dev_err(hba->dev,
                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -2292,12 +4327,83 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
                        cmd->command, status);
                ret = (status != PWR_OK) ? status : -1;
        }
+       ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+
 out:
+       if (ret) {
+               ufsdbg_set_err_state(hba);
+               ufshcd_print_host_state(hba);
+               ufshcd_print_pwr_info(hba);
+               ufshcd_print_host_regs(hba);
+               ufshcd_print_cmd_log(hba);
+       }
+
+       ufshcd_save_tstamp_of_last_dme_cmd(hba);
        spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->active_uic_cmd = NULL;
        hba->uic_async_done = NULL;
+       if (reenable_intr)
+               ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        mutex_unlock(&hba->uic_cmd_mutex);
+       return ret;
+}
+
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
+{
+       unsigned long flags;
+       int ret = 0;
+       u32 tm_doorbell;
+       u32 tr_doorbell;
+       bool timeout = false, do_last_check = false;
+       ktime_t start;
+
+       ufshcd_hold_all(hba);
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       /*
+        * Wait for all the outstanding tasks/transfer requests.
+        * Verify by checking the doorbell registers are clear.
+        */
+       start = ktime_get();
+       do {
+               if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+
+               tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+               tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+               if (!tm_doorbell && !tr_doorbell) {
+                       timeout = false;
+                       break;
+               } else if (do_last_check) {
+                       break;
+               }
 
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               schedule();
+               if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+                   wait_timeout_us) {
+                       timeout = true;
+                       /*
+                        * We might have scheduled out for long time so make
+                        * sure to check if doorbells are cleared by this time
+                        * or not.
+                        */
+                       do_last_check = true;
+               }
+               spin_lock_irqsave(hba->host->host_lock, flags);
+       } while (tm_doorbell || tr_doorbell);
+
+       if (timeout) {
+               dev_err(hba->dev,
+                       "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+                       __func__, tm_doorbell, tr_doorbell);
+               ret = -EBUSY;
+       }
+out:
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_release_all(hba);
        return ret;
 }
 
@@ -2311,49 +4417,165 @@ out:
  */
 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
 {
-       struct uic_command uic_cmd = {0};
+       struct uic_command uic_cmd = {0};
+       int ret;
+
+       if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
+               ret = ufshcd_dme_set(hba,
+                               UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
+               if (ret) {
+                       dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
+                                               __func__, ret);
+                       goto out;
+               }
+       }
+
+       uic_cmd.command = UIC_CMD_DME_SET;
+       uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+       uic_cmd.argument3 = mode;
+       hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
+       ufshcd_hold_all(hba);
+       ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
+       ufshcd_release_all(hba);
+out:
+       return ret;
+}
+
+static int ufshcd_link_recovery(struct ufs_hba *hba)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       /*
+        * Check if there is any race with fatal error handling.
+        * If so, wait for it to complete. Even though fatal error
+        * handling does reset and restore in some cases, don't assume
+        * anything out of it. We are just avoiding race here.
+        */
+       do {
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               if (!(work_pending(&hba->eh_work) ||
+                               hba->ufshcd_state == UFSHCD_STATE_RESET))
+                       break;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+               flush_work(&hba->eh_work);
+       } while (1);
+
+
+       /*
+        * we don't know if previous reset had really reset the host controller
+        * or not. So let's force reset here to be sure.
+        */
+       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+       hba->force_host_reset = true;
+       schedule_work(&hba->eh_work);
+
+       /* wait for the reset work to finish */
+       do {
+               if (!(work_pending(&hba->eh_work) ||
+                               hba->ufshcd_state == UFSHCD_STATE_RESET))
+                       break;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+               flush_work(&hba->eh_work);
+               spin_lock_irqsave(hba->host->host_lock, flags);
+       } while (1);
+
+       if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+             ufshcd_is_link_active(hba)))
+               ret = -ENOLINK;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
        int ret;
+       struct uic_command uic_cmd = {0};
+       ktime_t start = ktime_get();
 
-       if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
-               ret = ufshcd_dme_set(hba,
-                               UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
-               if (ret) {
-                       dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
-                                               __func__, ret);
-                       goto out;
+       uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+       ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+                            ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+       /*
+        * Do full reinit if enter failed or if LINERESET was detected during
+        * Hibern8 operation. After LINERESET, link moves to default PWM-G1
+        * mode hence full reinit is required to move link to HS speeds.
+        */
+       if (ret || hba->full_init_linereset) {
+               int err;
+
+               hba->full_init_linereset = false;
+               ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
+               dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
+                       __func__, ret);
+               /*
+                * If link recovery fails then return error code (-ENOLINK)
+                * returned ufshcd_link_recovery().
+                * If link recovery succeeds then return -EAGAIN to attempt
+                * hibern8 enter retry again.
+                */
+               err = ufshcd_link_recovery(hba);
+               if (err) {
+                       dev_err(hba->dev, "%s: link recovery failed", __func__);
+                       ret = err;
+               } else {
+                       ret = -EAGAIN;
                }
+       } else {
+               dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
+                       ktime_to_us(ktime_get()));
        }
 
-       uic_cmd.command = UIC_CMD_DME_SET;
-       uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
-       uic_cmd.argument3 = mode;
-       ufshcd_hold(hba, false);
-       ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
-       ufshcd_release(hba);
-
-out:
        return ret;
 }
 
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 {
-       struct uic_command uic_cmd = {0};
-
-       uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+       int ret = 0, retries;
 
-       return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+               ret = __ufshcd_uic_hibern8_enter(hba);
+               if (!ret)
+                       goto out;
+               else if (ret != -EAGAIN)
+                       /* Unable to recover the link, so no point proceeding */
+                       BUG();
+       }
+out:
+       return ret;
 }
 
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 {
        struct uic_command uic_cmd = {0};
        int ret;
+       ktime_t start = ktime_get();
 
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+                            ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+       /* Do full reinit if exit failed */
        if (ret) {
-               ufshcd_set_link_off(hba);
-               ret = ufshcd_host_reset_and_restore(hba);
+               ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
+               dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
+                       __func__, ret);
+               ret = ufshcd_link_recovery(hba);
+               /* Unable to recover the link, so no point proceeding */
+               if (ret)
+                       BUG();
+       } else {
+               dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
+                       ktime_to_us(ktime_get()));
+               hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+               hba->ufs_stats.hibern8_exit_cnt++;
        }
 
        return ret;
@@ -2386,8 +4608,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
        if (hba->max_pwr_info.is_valid)
                return 0;
 
-       pwr_info->pwr_tx = FASTAUTO_MODE;
-       pwr_info->pwr_rx = FASTAUTO_MODE;
+       pwr_info->pwr_tx = FAST_MODE;
+       pwr_info->pwr_rx = FAST_MODE;
        pwr_info->hs_rate = PA_HS_MODE_B;
 
        /* Get the connected lane count */
@@ -2418,7 +4640,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
                                __func__, pwr_info->gear_rx);
                        return -EINVAL;
                }
-               pwr_info->pwr_rx = SLOWAUTO_MODE;
+               pwr_info->pwr_rx = SLOW_MODE;
        }
 
        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2431,21 +4653,22 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
                                __func__, pwr_info->gear_tx);
                        return -EINVAL;
                }
-               pwr_info->pwr_tx = SLOWAUTO_MODE;
+               pwr_info->pwr_tx = SLOW_MODE;
        }
 
        hba->max_pwr_info.is_valid = true;
        return 0;
 }
 
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
+int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode)
 {
-       int ret;
+       int ret = 0;
 
        /* if already configured to the requested pwr_mode */
-       if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
-           pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+       if (!hba->restore_needed &&
+               pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+               pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
@@ -2455,6 +4678,10 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
                return 0;
        }
 
+       ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
+       if (ret)
+               return ret;
+
        /*
         * Configure attributes for power mode change with below.
         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
@@ -2486,10 +4713,25 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
                                                pwr_mode->hs_rate);
 
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+                       DL_FC0ProtectionTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+                       DL_TC0ReplayTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+                       DL_AFC0ReqTimeOutVal_Default);
+
+       ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+                       DL_FC0ProtectionTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+                       DL_TC0ReplayTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+                       DL_AFC0ReqTimeOutVal_Default);
+
        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
                        | pwr_mode->pwr_tx);
 
        if (ret) {
+               ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
                dev_err(hba->dev,
                        "%s: power mode change failed %d\n", __func__, ret);
        } else {
@@ -2498,6 +4740,7 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
 
                memcpy(&hba->pwr_info, pwr_mode,
                        sizeof(struct ufs_pa_layer_attr));
+               hba->ufs_stats.power_mode_change_cnt++;
        }
 
        return ret;
@@ -2521,6 +4764,8 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
                memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
 
        ret = ufshcd_change_power_mode(hba, &final_params);
+       if (!ret)
+               ufshcd_print_pwr_info(hba);
 
        return ret;
 }
@@ -2533,17 +4778,12 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  */
 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
 {
-       int i, retries, err = 0;
+       int i;
+       int err;
        bool flag_res = 1;
 
-       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
-               /* Set the fDeviceInit flag */
-               err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
-                                       QUERY_FLAG_IDN_FDEVICEINIT, NULL);
-               if (!err || err == -ETIMEDOUT)
-                       break;
-               dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
-       }
+       err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+               QUERY_FLAG_IDN_FDEVICEINIT, NULL);
        if (err) {
                dev_err(hba->dev,
                        "%s setting fDeviceInit flag failed with error %d\n",
@@ -2551,18 +4791,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
                goto out;
        }
 
-       /* poll for max. 100 iterations for fDeviceInit flag to clear */
-       for (i = 0; i < 100 && !err && flag_res; i++) {
-               for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
-                       err = ufshcd_query_flag(hba,
-                                       UPIU_QUERY_OPCODE_READ_FLAG,
-                                       QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
-                       if (!err || err == -ETIMEDOUT)
-                               break;
-                       dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
-                                       err);
-               }
-       }
+       /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+       for (i = 0; i < 1000 && !err && flag_res; i++)
+               err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+                       QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
        if (err)
                dev_err(hba->dev,
                        "%s reading fDeviceInit flag failed with error %d\n",
@@ -2583,7 +4816,7 @@ out:
  * To bring UFS host controller to operational state,
  * 1. Enable required interrupts
  * 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base addres
+ * 3. Program UTRL and UTMRL base address
  * 4. Configure run-stop-registers
  *
  * Returns 0 on success, non-zero value on failure
@@ -2613,8 +4846,13 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
                        REG_UTP_TASK_REQ_LIST_BASE_H);
 
        /*
+        * Make sure base address and interrupt setup are updated before
+        * enabling the run/stop registers below.
+        */
+       wmb();
+
+       /*
         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
-        * DEI, HEI bits must be 0
         */
        reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
        if (!(ufshcd_get_lists_status(reg))) {
@@ -2631,6 +4869,23 @@ out:
 }
 
 /**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+       int err;
+
+       ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
+       err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+                                       CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+                                       10, 1, can_sleep);
+       if (err)
+               dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
  * ufshcd_hba_enable - initialize the controller
  * @hba: per adapter instance
  *
@@ -2650,18 +4905,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
         * development and testing of this driver. msleep can be changed to
         * mdelay and retry count can be reduced based on the controller.
         */
-       if (!ufshcd_is_hba_active(hba)) {
-
+       if (!ufshcd_is_hba_active(hba))
                /* change controller state to "reset state" */
-               ufshcd_hba_stop(hba);
-
-               /*
-                * This delay is based on the testing done with UFS host
-                * controller FPGA. The delay can be changed based on the
-                * host controller used.
-                */
-               msleep(5);
-       }
+               ufshcd_hba_stop(hba, true);
 
        /* UniPro link is disabled at this point */
        ufshcd_set_link_off(hba);
@@ -2735,6 +4981,11 @@ static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
        return err;
 }
 
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+       return ufshcd_disable_tx_lcc(hba, false);
+}
+
 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
 {
        return ufshcd_disable_tx_lcc(hba, true);
@@ -2750,14 +5001,26 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
 {
        int ret;
        int retries = DME_LINKSTARTUP_RETRIES;
+       bool link_startup_again = false;
+
+       /*
+        * If UFS device isn't active then we will have to issue link startup
+        * 2 times to make sure the device state move to active.
+        */
+       if (!ufshcd_is_ufs_dev_active(hba))
+               link_startup_again = true;
 
+link_startup:
        do {
                ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
 
                ret = ufshcd_dme_link_startup(hba);
+               if (ret)
+                       ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
 
                /* check if device is detected by inter-connect layer */
                if (!ret && !ufshcd_is_device_present(hba)) {
+                       ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
                        dev_err(hba->dev, "%s: Device not present\n", __func__);
                        ret = -ENXIO;
                        goto out;
@@ -2776,12 +5039,28 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
                /* failed to get the link up... retire */
                goto out;
 
+       if (link_startup_again) {
+               link_startup_again = false;
+               retries = DME_LINKSTARTUP_RETRIES;
+               goto link_startup;
+       }
+
+       /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+       ufshcd_init_pwr_info(hba);
+       ufshcd_print_pwr_info(hba);
+
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
                ret = ufshcd_disable_device_tx_lcc(hba);
                if (ret)
                        goto out;
        }
 
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
+               ret = ufshcd_disable_host_tx_lcc(hba);
+               if (ret)
+                       goto out;
+       }
+
        /* Include any host controller configuration via UIC commands */
        ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
        if (ret)
@@ -2789,8 +5068,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
 
        ret = ufshcd_make_hba_operational(hba);
 out:
-       if (ret)
+       if (ret) {
                dev_err(hba->dev, "link startup failed %d\n", ret);
+               ufshcd_print_host_state(hba);
+               ufshcd_print_pwr_info(hba);
+               ufshcd_print_host_regs(hba);
+       }
        return ret;
 }
 
@@ -2809,7 +5092,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
        int err = 0;
        int retries;
 
-       ufshcd_hold(hba, false);
+       ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -2821,7 +5104,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
        }
        mutex_unlock(&hba->dev_cmd.lock);
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
 
        if (err)
                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -2847,10 +5130,10 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
 
        lun_qdepth = hba->nutrs;
        ret = ufshcd_read_unit_desc_param(hba,
-                                         ufshcd_scsi_to_upiu_lun(sdev->lun),
-                                         UNIT_DESC_PARAM_LU_Q_DEPTH,
-                                         &lun_qdepth,
-                                         sizeof(lun_qdepth));
+                         ufshcd_scsi_to_upiu_lun(sdev->lun),
+                         UNIT_DESC_PARAM_LU_Q_DEPTH,
+                         &lun_qdepth,
+                         sizeof(lun_qdepth));
 
        /* Some WLUN doesn't support unit descriptor */
        if (ret == -EOPNOTSUPP)
@@ -2980,6 +5263,9 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
        blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
 
+       sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
+       sdev->use_rpm_auto = 1;
+
        return 0;
 }
 
@@ -3089,6 +5375,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        int result = 0;
        int scsi_status;
        int ocs;
+       bool print_prdt;
 
        /* overall command status of utrd */
        ocs = ufshcd_get_tr_ocs(lrbp);
@@ -3096,7 +5383,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        switch (ocs) {
        case OCS_SUCCESS:
                result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
+               hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
                switch (result) {
                case UPIU_TRANSACTION_RESPONSE:
                        /*
@@ -3112,8 +5399,28 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                        scsi_status = result & MASK_SCSI_STATUS;
                        result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
 
-                       if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
-                               schedule_work(&hba->eeh_work);
+                       /*
+                        * Currently we are only supporting BKOPs exception
+                        * events hence we can ignore BKOPs exception event
+                        * during power management callbacks. BKOPs exception
+                        * event is not expected to be raised in runtime suspend
+                        * callback as it allows the urgent bkops.
+                        * During system suspend, we are anyway forcefully
+                        * disabling the bkops and if urgent bkops is needed
+                        * it will be enabled on system resume. Long term
+                        * solution could be to abort the system suspend if
+                        * UFS device needs urgent BKOPs.
+                        */
+                       if (!hba->pm_op_in_progress &&
+                           ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
+                               /*
+                                * Prevent suspend once eeh_work is scheduled
+                                * to avoid deadlock between ufshcd_suspend
+                                * and exception event handler.
+                                */
+                               if (schedule_work(&hba->eeh_work))
+                                       pm_runtime_get_noresume(hba->dev);
+                       }
                        break;
                case UPIU_TRANSACTION_REJECT_UPIU:
                        /* TODO: handle Reject UPIU Response */
@@ -3141,13 +5448,34 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        case OCS_MISMATCH_RESP_UPIU_SIZE:
        case OCS_PEER_COMM_FAILURE:
        case OCS_FATAL_ERROR:
+       case OCS_DEVICE_FATAL_ERROR:
+       case OCS_INVALID_CRYPTO_CONFIG:
+       case OCS_GENERAL_CRYPTO_ERROR:
        default:
                result |= DID_ERROR << 16;
                dev_err(hba->dev,
-               "OCS error from controller = %x\n", ocs);
+                               "OCS error from controller = %x for tag %d\n",
+                               ocs, lrbp->task_tag);
+               /*
+                * This is called in interrupt context, hence avoid sleep
+                * while printing debug registers. Also print only the minimum
+                * debug registers needed to debug OCS failure.
+                */
+               __ufshcd_print_host_regs(hba, true);
+               ufshcd_print_host_state(hba);
                break;
        } /* end of switch */
 
+       if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
+               print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
+                       ocs == OCS_MISMATCH_DATA_BUF_SIZE);
+               ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
+       }
+
+       if ((host_byte(result) == DID_ERROR) ||
+           (host_byte(result) == DID_ABORT))
+               ufsdbg_set_err_state(hba);
+
        return result;
 }
 
@@ -3155,64 +5483,178 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  * ufshcd_uic_cmd_compl - handle completion of uic command
  * @hba: per adapter instance
  * @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 {
+       irqreturn_t retval = IRQ_NONE;
+
        if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
                hba->active_uic_cmd->argument2 |=
                        ufshcd_get_uic_cmd_result(hba);
                hba->active_uic_cmd->argument3 =
                        ufshcd_get_dme_attr_val(hba);
                complete(&hba->active_uic_cmd->done);
+               retval = IRQ_HANDLED;
        }
 
-       if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
-               complete(hba->uic_async_done);
+       if (intr_status & UFSHCD_UIC_PWR_MASK) {
+               if (hba->uic_async_done) {
+                       complete(hba->uic_async_done);
+                       retval = IRQ_HANDLED;
+               } else if (ufshcd_is_auto_hibern8_supported(hba)) {
+                       /*
+                        * If uic_async_done flag is not set then this
+                        * is an Auto hibern8 err interrupt.
+                        * Perform a host reset followed by a full
+                        * link recovery.
+                        */
+                       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+                       hba->force_host_reset = true;
+                       dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+                               __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
+                               "Enter" : "Exit",
+                               intr_status, ufshcd_get_upmcrs(hba));
+                       __ufshcd_print_host_regs(hba, true);
+                       ufshcd_print_host_state(hba);
+                       schedule_work(&hba->eh_work);
+                       retval = IRQ_HANDLED;
+               }
+       }
+       return retval;
 }
 
 /**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
  * @hba: per adapter instance
+ * @result: error result to inform scsi layer about
  */
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
 {
+       u8 index;
        struct ufshcd_lrb *lrbp;
        struct scsi_cmnd *cmd;
-       unsigned long completed_reqs;
-       u32 tr_doorbell;
-       int result;
-       int index;
 
-       /* Resetting interrupt aggregation counters first and reading the
-        * DOOR_BELL afterward allows us to handle all the completed requests.
-        * In order to prevent other interrupts starvation the DB is read once
-        * after reset. The down side of this solution is the possibility of
-        * false interrupt if device completes another request after resetting
-        * aggregation and before reading the DB.
-        */
-       if (ufshcd_is_intr_aggr_allowed(hba))
-               ufshcd_reset_intr_aggr(hba);
+       if (!hba->outstanding_reqs)
+               return;
 
-       tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
-       completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+       for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+               lrbp = &hba->lrb[index];
+               cmd = lrbp->cmd;
+               if (cmd) {
+                       ufshcd_cond_add_cmd_trace(hba, index, "failed");
+                       ufshcd_update_error_stats(hba,
+                                       UFS_ERR_INT_FATAL_ERRORS);
+                       scsi_dma_unmap(cmd);
+                       cmd->result = result;
+                       /* Clear pending transfer requests */
+                       ufshcd_clear_cmd(hba, index);
+                       ufshcd_outstanding_req_clear(hba, index);
+                       clear_bit_unlock(index, &hba->lrb_in_use);
+                       lrbp->complete_time_stamp = ktime_get();
+                       update_req_stats(hba, lrbp);
+                       /* Mark completed command as NULL in LRB */
+                       lrbp->cmd = NULL;
+                       ufshcd_release_all(hba);
+                       if (cmd->request) {
+                               /*
+                                * As we are accessing the "request" structure,
+                                * this must be called before calling
+                                * ->scsi_done() callback.
+                                */
+                               ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+                                       true);
+                               ufshcd_vops_crypto_engine_cfg_end(hba,
+                                               lrbp, cmd->request);
+                       }
+                       /* Do not touch lrbp after scsi done */
+                       cmd->scsi_done(cmd);
+               } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+                       if (hba->dev_cmd.complete) {
+                               ufshcd_cond_add_cmd_trace(hba, index,
+                                                       "dev_failed");
+                               ufshcd_outstanding_req_clear(hba, index);
+                               complete(hba->dev_cmd.complete);
+                       }
+               }
+               if (ufshcd_is_clkscaling_supported(hba))
+                       hba->clk_scaling.active_reqs--;
+       }
+}
+
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+                                       unsigned long completed_reqs)
+{
+       struct ufshcd_lrb *lrbp;
+       struct scsi_cmnd *cmd;
+       int result;
+       int index;
+       struct request *req;
 
        for_each_set_bit(index, &completed_reqs, hba->nutrs) {
                lrbp = &hba->lrb[index];
                cmd = lrbp->cmd;
                if (cmd) {
+                       ufshcd_cond_add_cmd_trace(hba, index, "complete");
+                       ufshcd_update_tag_stats_completion(hba, cmd);
                        result = ufshcd_transfer_rsp_status(hba, lrbp);
                        scsi_dma_unmap(cmd);
                        cmd->result = result;
+                       clear_bit_unlock(index, &hba->lrb_in_use);
+                       lrbp->complete_time_stamp = ktime_get();
+                       update_req_stats(hba, lrbp);
                        /* Mark completed command as NULL in LRB */
                        lrbp->cmd = NULL;
-                       clear_bit_unlock(index, &hba->lrb_in_use);
+                       hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
+                       __ufshcd_release(hba, false);
+                       __ufshcd_hibern8_release(hba, false);
+                       if (cmd->request) {
+                               /*
+                                * As we are accessing the "request" structure,
+                                * this must be called before calling
+                                * ->scsi_done() callback.
+                                */
+                               ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+                                       false);
+                               ufshcd_vops_crypto_engine_cfg_end(hba,
+                                       lrbp, cmd->request);
+                       }
+
+                       req = cmd->request;
+                       if (req) {
+                               /* Update IO svc time latency histogram */
+                               if (req->lat_hist_enabled) {
+                                       ktime_t completion;
+                                       u_int64_t delta_us;
+
+                                       completion = ktime_get();
+                                       delta_us = ktime_us_delta(completion,
+                                                 req->lat_hist_io_start);
+                                       blk_update_latency_hist(
+                                               (rq_data_dir(req) == READ) ?
+                                               &hba->io_lat_read :
+                                               &hba->io_lat_write, delta_us);
+                               }
+                       }
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
-                       __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
-                       if (hba->dev_cmd.complete)
+                       if (hba->dev_cmd.complete) {
+                               ufshcd_cond_add_cmd_trace(hba, index,
+                                               "dcmp");
                                complete(hba->dev_cmd.complete);
+                       }
                }
+               if (ufshcd_is_clkscaling_supported(hba))
+                       hba->clk_scaling.active_reqs--;
        }
 
        /* clear corresponding bits of completed commands */
@@ -3225,6 +5667,40 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
 }
 
 /**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
+ */
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+       unsigned long completed_reqs;
+       u32 tr_doorbell;
+
+       /* Resetting interrupt aggregation counters first and reading the
+        * DOOR_BELL afterward allows us to handle all the completed requests.
+        * In order to prevent other interrupts starvation the DB is read once
+        * after reset. The down side of this solution is the possibility of
+        * false interrupt if device completes another request after resetting
+        * aggregation and before reading the DB.
+        */
+       if (ufshcd_is_intr_aggr_allowed(hba))
+               ufshcd_reset_intr_aggr(hba);
+
+       tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+       if (completed_reqs) {
+               __ufshcd_transfer_req_compl(hba, completed_reqs);
+               return IRQ_HANDLED;
+       } else {
+               return IRQ_NONE;
+       }
+}
+
+/**
  * ufshcd_disable_ee - disable exception event
  * @hba: per-adapter instance
  * @mask: exception event to disable
@@ -3244,7 +5720,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
 
        val = hba->ee_ctrl_mask & ~mask;
        val &= 0xFFFF; /* 2 bytes */
-       err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+       err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask &= ~mask;
@@ -3272,7 +5748,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
 
        val = hba->ee_ctrl_mask | mask;
        val &= 0xFFFF; /* 2 bytes */
-       err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+       err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask |= mask;
@@ -3298,7 +5774,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
        if (hba->auto_bkops_enabled)
                goto out;
 
-       err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+       err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to enable bkops %d\n",
@@ -3307,6 +5783,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
        }
 
        hba->auto_bkops_enabled = true;
+       trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
 
        /* No need of URGENT_BKOPS exception from the device */
        err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3347,7 +5824,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
                goto out;
        }
 
-       err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+       err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to disable bkops %d\n",
@@ -3357,6 +5834,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
        }
 
        hba->auto_bkops_enabled = false;
+       trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
 out:
        return err;
 }
@@ -3385,7 +5863,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
 
 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
 {
-       return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+       return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
                        QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
 }
 
@@ -3443,49 +5921,192 @@ out:
  */
 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
 {
-       return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+       return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+       return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+                       QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+       int err;
+       u32 curr_status = 0;
+
+       if (hba->is_urgent_bkops_lvl_checked)
+               goto enable_auto_bkops;
+
+       err = ufshcd_get_bkops_status(hba, &curr_status);
+       if (err) {
+               dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+                               __func__, err);
+               goto out;
+       }
+
+       /*
+        * We are seeing that some devices are raising the urgent bkops
+        * exception events even when BKOPS status doesn't indicate performace
+        * impacted or critical. Handle these device by determining their urgent
+        * bkops status at runtime.
+        */
+       if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+               dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+                               __func__, curr_status);
+               /* update the current status as the urgent bkops level */
+               hba->urgent_bkops_lvl = curr_status;
+               hba->is_urgent_bkops_lvl_checked = true;
+       }
+
+enable_auto_bkops:
+       err = ufshcd_enable_auto_bkops(hba);
+out:
+       if (err < 0)
+               dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+                               __func__, err);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+       struct ufs_hba *hba;
+       int err;
+       u32 status = 0;
+       hba = container_of(work, struct ufs_hba, eeh_work);
+
+       pm_runtime_get_sync(hba->dev);
+       ufshcd_scsi_block_requests(hba);
+       err = ufshcd_get_ee_status(hba, &status);
+       if (err) {
+               dev_err(hba->dev, "%s: failed to get exception status %d\n",
+                               __func__, err);
+               goto out;
+       }
+
+       status &= hba->ee_ctrl_mask;
+
+       if (status & MASK_EE_URGENT_BKOPS)
+               ufshcd_bkops_exception_event_handler(hba);
+
+out:
+       ufshcd_scsi_unblock_requests(hba);
+       /*
+        * pm_runtime_get_noresume is called while scheduling
+        * eeh_work to avoid suspend racing with exception work.
+        * Hence decrement usage counter using pm_runtime_put_noidle
+        * to allow suspend on completion of exception event handler.
+        */
+       pm_runtime_put_noidle(hba->dev);
+       pm_runtime_put(hba->dev);
+       return;
 }
 
-static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
 {
-       return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
-                       QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+       ufshcd_transfer_req_compl(hba);
+       ufshcd_tmc_handler(hba);
 }
 
 /**
- * ufshcd_exception_event_handler - handle exceptions raised by device
- * @work: pointer to work data
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ *                             to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
  *
- * Read bExceptionEventStatus attribute from the device and handle the
- * exception event accordingly.
+ * Returns true if error handling is required, false otherwise
  */
-static void ufshcd_exception_event_handler(struct work_struct *work)
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
 {
-       struct ufs_hba *hba;
-       int err;
-       u32 status = 0;
-       hba = container_of(work, struct ufs_hba, eeh_work);
+       unsigned long flags;
+       bool err_handling = true;
 
-       pm_runtime_get_sync(hba->dev);
-       scsi_block_requests(hba->host);
-       err = ufshcd_get_ee_status(hba, &status);
-       if (err) {
-               dev_err(hba->dev, "%s: failed to get exception status %d\n",
-                               __func__, err);
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       /*
+        * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+        * device fatal error and/or DL NAC & REPLAY timeout errors.
+        */
+       if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+               goto out;
+
+       if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+           ((hba->saved_err & UIC_ERROR) &&
+            (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
+               /*
+                * we have to do error recovery but atleast silence the error
+                * logs.
+                */
+               hba->silence_err_logs = true;
                goto out;
        }
 
-       status &= hba->ee_ctrl_mask;
-       if (status & MASK_EE_URGENT_BKOPS) {
-               err = ufshcd_urgent_bkops(hba);
-               if (err < 0)
-                       dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
-                                       __func__, err);
+       if ((hba->saved_err & UIC_ERROR) &&
+           (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+               int err;
+               /*
+                * wait for 50ms to see if we can get any other errors or not.
+                */
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               msleep(50);
+               spin_lock_irqsave(hba->host->host_lock, flags);
+
+               /*
+                * now check if we have got any other severe errors other than
+                * DL NAC error?
+                */
+               if ((hba->saved_err & INT_FATAL_ERRORS) ||
+                   ((hba->saved_err & UIC_ERROR) &&
+                   (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
+                       if (((hba->saved_err & INT_FATAL_ERRORS) ==
+                               DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
+                                       ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
+                               hba->silence_err_logs = true;
+                       goto out;
+               }
+
+               /*
+                * As DL NAC is the only error received so far, send out NOP
+                * command to confirm if link is still active or not.
+                *   - If we don't get any response then do error recovery.
+                *   - If we get response then clear the DL NAC error bit.
+                */
+
+               /* silence the error logs from NOP command */
+               hba->silence_err_logs = true;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               err = ufshcd_verify_dev_init(hba);
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->silence_err_logs = false;
+
+               if (err) {
+                       hba->silence_err_logs = true;
+                       goto out;
+               }
+
+               /* Link seems to be alive hence ignore the DL NAC errors */
+               if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+                       hba->saved_err &= ~UIC_ERROR;
+               /* clear NAC error */
+               hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+               if (!hba->saved_uic_err) {
+                       err_handling = false;
+                       goto out;
+               }
+               /*
+                * there seems to be some errors other than NAC, so do error
+                * recovery
+                */
+               hba->silence_err_logs = true;
        }
 out:
-       scsi_unblock_requests(hba->host);
-       pm_runtime_put_sync(hba->dev);
-       return;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return err_handling;
 }
 
 /**
@@ -3496,51 +6117,149 @@ static void ufshcd_err_handler(struct work_struct *work)
 {
        struct ufs_hba *hba;
        unsigned long flags;
-       u32 err_xfer = 0;
-       u32 err_tm = 0;
+       bool err_xfer = false, err_tm = false;
        int err = 0;
        int tag;
+       bool needs_reset = false;
+       bool clks_enabled = false;
 
        hba = container_of(work, struct ufs_hba, eh_work);
 
-       pm_runtime_get_sync(hba->dev);
-       ufshcd_hold(hba, false);
-
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufsdbg_set_err_state(hba);
+
+       if (hba->ufshcd_state == UFSHCD_STATE_RESET)
                goto out;
+
+       /*
+        * Make sure the clocks are ON before we proceed with err
+        * handling. For the majority of cases err handler would be
+        * run with clocks ON. There is a possibility that the err
+        * handler was scheduled due to auto hibern8 error interrupt,
+        * in which case the clocks could be gated or be in the
+        * process of gating when the err handler runs.
+        */
+       if (unlikely((hba->clk_gating.state != CLKS_ON) &&
+           ufshcd_is_auto_hibern8_supported(hba))) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
+               ufshcd_hold(hba, false);
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               clks_enabled = true;
        }
 
        hba->ufshcd_state = UFSHCD_STATE_RESET;
        ufshcd_set_eh_in_progress(hba);
 
        /* Complete requests that have door-bell cleared by h/w */
-       ufshcd_transfer_req_compl(hba);
-       ufshcd_tmc_handler(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_complete_requests(hba);
+
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+               bool ret;
+
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+               ret = ufshcd_quirk_dl_nac_errors(hba);
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               if (!ret)
+                       goto skip_err_handling;
+       }
+
+       /*
+        * Dump controller state before resetting. Transfer requests state
+        * will be dump as part of the request completion.
+        */
+       if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+               dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
+                       __func__, hba->saved_err, hba->saved_uic_err);
+               if (!hba->silence_err_logs) {
+                       /* release lock as print host regs sleeps */
+                       spin_unlock_irqrestore(hba->host->host_lock, flags);
+                       ufshcd_print_host_regs(hba);
+                       ufshcd_print_host_state(hba);
+                       ufshcd_print_pwr_info(hba);
+                       ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+                       ufshcd_print_cmd_log(hba);
+                       spin_lock_irqsave(hba->host->host_lock, flags);
+               }
+       }
+
+       if ((hba->saved_err & INT_FATAL_ERRORS)
+           || hba->saved_ce_err || hba->force_host_reset ||
+           ((hba->saved_err & UIC_ERROR) &&
+           (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+                                  UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+                                  UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+               needs_reset = true;
+
+       /*
+        * if host reset is required then skip clearing the pending
+        * transfers forcefully because they will automatically get
+        * cleared after link startup.
+        */
+       if (needs_reset)
+               goto skip_pending_xfer_clear;
 
+       /* release lock as clear command might sleep */
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
        /* Clear pending transfer requests */
-       for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
-               if (ufshcd_clear_cmd(hba, tag))
-                       err_xfer |= 1 << tag;
+       for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+               if (ufshcd_clear_cmd(hba, tag)) {
+                       err_xfer = true;
+                       goto lock_skip_pending_xfer_clear;
+               }
+       }
 
        /* Clear pending task management requests */
-       for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
-               if (ufshcd_clear_tm_cmd(hba, tag))
-                       err_tm |= 1 << tag;
+       for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+               if (ufshcd_clear_tm_cmd(hba, tag)) {
+                       err_tm = true;
+                       goto lock_skip_pending_xfer_clear;
+               }
+       }
 
-       /* Complete the requests that are cleared by s/w */
+lock_skip_pending_xfer_clear:
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_transfer_req_compl(hba);
-       ufshcd_tmc_handler(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       /* Complete the requests that are cleared by s/w */
+       ufshcd_complete_requests(hba);
+
+       if (err_xfer || err_tm)
+               needs_reset = true;
+
+skip_pending_xfer_clear:
        /* Fatal errors need reset */
-       if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
-                       ((hba->saved_err & UIC_ERROR) &&
-                        (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+       if (needs_reset) {
+               unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+               if (hba->saved_err & INT_FATAL_ERRORS)
+                       ufshcd_update_error_stats(hba,
+                                                 UFS_ERR_INT_FATAL_ERRORS);
+               if (hba->saved_ce_err)
+                       ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
+
+               if (hba->saved_err & UIC_ERROR)
+                       ufshcd_update_error_stats(hba,
+                                                 UFS_ERR_INT_UIC_ERROR);
+
+               if (err_xfer || err_tm)
+                       ufshcd_update_error_stats(hba,
+                                                 UFS_ERR_CLEAR_PEND_XFER_TM);
+
+               /*
+                * ufshcd_reset_and_restore() does the link reinitialization
+                * which will need atleast one empty doorbell slot to send the
+                * device management commands (NOP and query commands).
+                * If there is no slot empty at this moment then free up last
+                * slot forcefully.
+                */
+               if (hba->outstanding_reqs == max_doorbells)
+                       __ufshcd_transfer_req_compl(hba,
+                                                   (1UL << (hba->nutrs - 1)));
+
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
                err = ufshcd_reset_and_restore(hba);
+               spin_lock_irqsave(hba->host->host_lock, flags);
                if (err) {
                        dev_err(hba->dev, "%s: reset and restore failed\n",
                                        __func__);
@@ -3553,76 +6272,225 @@ static void ufshcd_err_handler(struct work_struct *work)
                scsi_report_bus_reset(hba->host, 0);
                hba->saved_err = 0;
                hba->saved_uic_err = 0;
+               hba->saved_ce_err = 0;
+               hba->force_host_reset = false;
+       }
+
+skip_err_handling:
+       if (!needs_reset) {
+               hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+               if (hba->saved_err || hba->saved_uic_err)
+                       dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+                           __func__, hba->saved_err, hba->saved_uic_err);
+       }
+
+       hba->silence_err_logs = false;
+
+       if (clks_enabled) {
+               __ufshcd_release(hba, false);
+               hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
        }
+out:
        ufshcd_clear_eh_in_progress(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+               u32 reg)
+{
+       reg_hist->reg[reg_hist->pos] = reg;
+       reg_hist->tstamp[reg_hist->pos] = ktime_get();
+       reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
+static void ufshcd_rls_handler(struct work_struct *work)
+{
+       struct ufs_hba *hba;
+       int ret = 0;
+       u32 mode;
+
+       hba = container_of(work, struct ufs_hba, rls_work);
+       pm_runtime_get_sync(hba->dev);
+       ufshcd_scsi_block_requests(hba);
+       down_write(&hba->lock);
+       ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+       if (ret) {
+               dev_err(hba->dev,
+                       "Timed out (%d) waiting for DB to clear\n",
+                       ret);
+               goto out;
+       }
+
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+       if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
+               hba->restore_needed = true;
+
+       if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
+               hba->restore_needed = true;
+
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
+       if (hba->pwr_info.gear_rx != mode)
+               hba->restore_needed = true;
+
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
+       if (hba->pwr_info.gear_tx != mode)
+               hba->restore_needed = true;
+
+       if (hba->restore_needed)
+               ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+
+       if (ret)
+               dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+                       __func__, ret);
+       else
+               hba->restore_needed = false;
 
 out:
-       scsi_unblock_requests(hba->host);
-       ufshcd_release(hba);
+       up_write(&hba->lock);
+       ufshcd_scsi_unblock_requests(hba);
        pm_runtime_put_sync(hba->dev);
 }
 
 /**
  * ufshcd_update_uic_error - check and set fatal UIC error flags.
  * @hba: per-adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
 {
        u32 reg;
+       irqreturn_t retval = IRQ_NONE;
+
+       /* PHY layer lane error */
+       reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+       if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+           (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+               /*
+                * To know whether this error is fatal or not, DB timeout
+                * must be checked but this error is handled separately.
+                */
+               dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
+                               __func__, reg);
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+
+               /*
+                * Don't ignore LINERESET indication during hibern8
+                * enter operation.
+                */
+               if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+                       struct uic_command *cmd = hba->active_uic_cmd;
+
+                       if (cmd) {
+                               if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
+                                       dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
+                                               __func__, reg);
+                                       hba->full_init_linereset = true;
+                               }
+                       }
+                       if (!hba->full_init_linereset)
+                               schedule_work(&hba->rls_work);
+               }
+               retval |= IRQ_HANDLED;
+       }
 
        /* PA_INIT_ERROR is fatal and needs UIC reset */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
-       if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
-               hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+       if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+           (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+               if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
+                       hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+               } else if (hba->dev_quirks &
+                          UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+                       if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+                               hba->uic_error |=
+                                       UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+                       else if (reg &
+                                UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+                               hba->uic_error |=
+                                       UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+               }
+               retval |= IRQ_HANDLED;
+       }
 
        /* UIC NL/TL/DME errors needs software retry */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
-       if (reg)
+       if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+           (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
                hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+               retval |= IRQ_HANDLED;
+       }
 
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
-       if (reg)
+       if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+           (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
                hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+               retval |= IRQ_HANDLED;
+       }
 
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
-       if (reg)
+       if ((reg & UIC_DME_ERROR) &&
+           (reg & UIC_DME_ERROR_CODE_MASK)) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
                hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+               retval |= IRQ_HANDLED;
+       }
 
        dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
                        __func__, hba->uic_error);
+       return retval;
 }
 
 /**
  * ufshcd_check_errors - Check for errors that need s/w attention
  * @hba: per-adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 {
        bool queue_eh_work = false;
+       irqreturn_t retval = IRQ_NONE;
 
-       if (hba->errors & INT_FATAL_ERRORS)
+       if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
                queue_eh_work = true;
 
        if (hba->errors & UIC_ERROR) {
                hba->uic_error = 0;
-               ufshcd_update_uic_error(hba);
+               retval = ufshcd_update_uic_error(hba);
                if (hba->uic_error)
                        queue_eh_work = true;
        }
 
        if (queue_eh_work) {
+               /*
+                * update the transfer error masks to sticky bits, let's do this
+                * irrespective of current ufshcd_state.
+                */
+               hba->saved_err |= hba->errors;
+               hba->saved_uic_err |= hba->uic_error;
+               hba->saved_ce_err |= hba->ce_error;
+
                /* handle fatal errors only when link is functional */
                if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
-                       /* block commands from scsi mid-layer */
-                       scsi_block_requests(hba->host);
-
-                       /* transfer error masks to sticky bits */
-                       hba->saved_err |= hba->errors;
-                       hba->saved_uic_err |= hba->uic_error;
+                       /*
+                        * Set error handling in progress flag early so that we
+                        * don't issue new requests any more.
+                        */
+                       ufshcd_set_eh_in_progress(hba);
 
                        hba->ufshcd_state = UFSHCD_STATE_ERROR;
                        schedule_work(&hba->eh_work);
                }
+               retval |= IRQ_HANDLED;
        }
        /*
         * if (!queue_eh_work) -
@@ -3630,40 +6498,63 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
         * itself without s/w intervention or errors that will be
         * handled by the SCSI core layer.
         */
+       return retval;
 }
 
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
        u32 tm_doorbell;
 
        tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
        hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
-       wake_up(&hba->tm_wq);
+       if (hba->tm_condition) {
+               wake_up(&hba->tm_wq);
+               return IRQ_HANDLED;
+       } else {
+               return IRQ_NONE;
+       }
 }
 
 /**
  * ufshcd_sl_intr - Interrupt service routine
  * @hba: per adapter instance
  * @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 {
+       irqreturn_t retval = IRQ_NONE;
+
+       ufsdbg_error_inject_dispatcher(hba,
+               ERR_INJECT_INTR, intr_status, &intr_status);
+
+       ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
+
        hba->errors = UFSHCD_ERROR_MASK & intr_status;
-       if (hba->errors)
-               ufshcd_check_errors(hba);
+       if (hba->errors || hba->ce_error)
+               retval |= ufshcd_check_errors(hba);
 
        if (intr_status & UFSHCD_UIC_MASK)
-               ufshcd_uic_cmd_compl(hba, intr_status);
+               retval |= ufshcd_uic_cmd_compl(hba, intr_status);
 
        if (intr_status & UTP_TASK_REQ_COMPL)
-               ufshcd_tmc_handler(hba);
+               retval |= ufshcd_tmc_handler(hba);
 
        if (intr_status & UTP_TRANSFER_REQ_COMPL)
-               ufshcd_transfer_req_compl(hba);
+               retval |= ufshcd_transfer_req_compl(hba);
+
+       return retval;
 }
 
 /**
@@ -3671,23 +6562,45 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  * @irq: irq number
  * @__hba: pointer to adapter instance
  *
- * Returns IRQ_HANDLED - If interrupt is valid
- *             IRQ_NONE - If invalid interrupt
+ * Returns
+ *  IRQ_HANDLED - If interrupt is valid
+ *  IRQ_NONE    - If invalid interrupt
  */
 static irqreturn_t ufshcd_intr(int irq, void *__hba)
 {
-       u32 intr_status;
+       u32 intr_status, enabled_intr_status;
        irqreturn_t retval = IRQ_NONE;
        struct ufs_hba *hba = __hba;
+       int retries = hba->nutrs;
 
        spin_lock(hba->host->host_lock);
        intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+       hba->ufs_stats.last_intr_status = intr_status;
+       hba->ufs_stats.last_intr_ts = ktime_get();
+       /*
+        * There could be max of hba->nutrs reqs in flight and in worst case
+        * if the reqs get finished 1 by 1 after the interrupt status is
+        * read, make sure we handle them by checking the interrupt status
+        * again in a loop until we process all of the reqs before returning.
+        */
+       do {
+               enabled_intr_status =
+                       intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+               if (intr_status)
+                       ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+               if (enabled_intr_status)
+                       retval |= ufshcd_sl_intr(hba, enabled_intr_status);
 
-       if (intr_status) {
-               ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
-               ufshcd_sl_intr(hba, intr_status);
-               retval = IRQ_HANDLED;
+               intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+       } while (intr_status && --retries);
+
+       if (retval == IRQ_NONE) {
+               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+                                       __func__, intr_status);
+               ufshcd_hex_dump("host regs: ", hba->mmio_base,
+                                       UFSHCI_REG_SPACE_SIZE);
        }
+
        spin_unlock(hba->host->host_lock);
        return retval;
 }
@@ -3708,7 +6621,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
        /* poll for max. 1 sec to clear door bell register by h/w */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TASK_REQ_DOOR_BELL,
-                       mask, 0, 1000, 1000);
+                       mask, 0, 1000, 1000, true);
 out:
        return err;
 }
@@ -3742,7 +6655,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
        wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
-       ufshcd_hold(hba, false);
+       hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
+       ufshcd_hold_all(hba);
 
        spin_lock_irqsave(host->host_lock, flags);
        task_req_descp = hba->utmrdl_base_addr;
@@ -3771,7 +6685,13 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
 
        /* send command to the controller */
        __set_bit(free_slot, &hba->outstanding_tasks);
+
+       /* Make sure descriptors are ready before ringing the task doorbell */
+       wmb();
+
        ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+       /* Make sure that doorbell is committed immediately */
+       wmb();
 
        spin_unlock_irqrestore(host->host_lock, flags);
 
@@ -3793,8 +6713,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        clear_bit(free_slot, &hba->tm_condition);
        ufshcd_put_tm_slot(hba, free_slot);
        wake_up(&hba->tm_tag_wq);
+       hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
 
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
        return err;
 }
 
@@ -3820,6 +6741,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
        hba = shost_priv(host);
        tag = cmd->request->tag;
 
+       ufshcd_print_cmd_log(hba);
        lrbp = &hba->lrb[tag];
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -3839,7 +6761,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
        spin_lock_irqsave(host->host_lock, flags);
        ufshcd_transfer_req_compl(hba);
        spin_unlock_irqrestore(host->host_lock, flags);
+
 out:
+       hba->req_abort_count = 0;
        if (!err) {
                err = SUCCESS;
        } else {
@@ -3849,6 +6773,17 @@ out:
        return err;
 }
 
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+       struct ufshcd_lrb *lrbp;
+       int tag;
+
+       for_each_set_bit(tag, &bitmap, hba->nutrs) {
+               lrbp = &hba->lrb[tag];
+               lrbp->req_abort_skip = true;
+       }
+}
+
 /**
  * ufshcd_abort - abort a specific command
  * @cmd: SCSI command pointer
@@ -3876,31 +6811,87 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
+       if (!ufshcd_valid_tag(hba, tag)) {
+               dev_err(hba->dev,
+                       "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+                       __func__, tag, cmd, cmd->request);
+               BUG();
+       }
 
-       ufshcd_hold(hba, false);
+       lrbp = &hba->lrb[tag];
+
+       ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
+
+       /*
+        * Task abort to the device W-LUN is illegal. When this command
+        * will fail, due to spec violation, scsi err handling next step
+        * will be to send LU reset which, again, is a spec violation.
+        * To avoid these unnecessary/illegal step we skip to the last error
+        * handling stage: reset and restore.
+        */
+       if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+               return ufshcd_eh_host_reset_handler(cmd);
+
+       ufshcd_hold_all(hba);
+       reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* If command is already aborted/completed, return SUCCESS */
-       if (!(test_bit(tag, &hba->outstanding_reqs)))
+       if (!(test_bit(tag, &hba->outstanding_reqs))) {
+               dev_err(hba->dev,
+                       "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+                       __func__, tag, hba->outstanding_reqs, reg);
                goto out;
+       }
 
-       reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        if (!(reg & (1 << tag))) {
                dev_err(hba->dev,
                "%s: cmd was completed, but without a notifying intr, tag = %d",
                __func__, tag);
        }
 
-       lrbp = &hba->lrb[tag];
+       /* Print Transfer Request of aborted task */
+       dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
+
+       /*
+        * Print detailed info about aborted request.
+        * As more than one request might get aborted at the same time,
+        * print full information only for the first aborted request in order
+        * to reduce repeated printouts. For other aborted requests only print
+        * basic details.
+        */
+       scsi_print_command(cmd);
+       if (!hba->req_abort_count) {
+               ufshcd_print_fsm_state(hba);
+               ufshcd_print_host_regs(hba);
+               ufshcd_print_host_state(hba);
+               ufshcd_print_pwr_info(hba);
+               ufshcd_print_trs(hba, 1 << tag, true);
+       } else {
+               ufshcd_print_trs(hba, 1 << tag, false);
+       }
+       hba->req_abort_count++;
+
+
+       /* Skip task abort in case previous aborts failed and report failure */
+       if (lrbp->req_abort_skip) {
+               err = -EIO;
+               goto out;
+       }
+
        for (poll_cnt = 100; poll_cnt; poll_cnt--) {
                err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                                UFS_QUERY_TASK, &resp);
                if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
                        /* cmd pending in the device */
+                       dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
+                               __func__, tag);
                        break;
                } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                        /*
                         * cmd not pending in the device, check if it is
                         * in transition.
                         */
+                       dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
+                               __func__, tag);
                        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
                        if (reg & (1 << tag)) {
                                /* sleep for max. 200us to stabilize */
@@ -3908,8 +6899,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                                continue;
                        }
                        /* command completed already */
+                       dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
+                               __func__, tag);
                        goto out;
                } else {
+                       dev_err(hba->dev,
+                               "%s: no response from device. tag = %d, err %d",
+                               __func__, tag, err);
                        if (!err)
                                err = resp; /* service response error */
                        goto out;
@@ -3924,19 +6920,25 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                        UFS_ABORT_TASK, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
-               if (!err)
+               if (!err) {
                        err = resp; /* service response error */
+                       dev_err(hba->dev, "%s: issued. tag = %d, err %d",
+                               __func__, tag, err);
+               }
                goto out;
        }
 
        err = ufshcd_clear_cmd(hba, tag);
-       if (err)
+       if (err) {
+               dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
+                       __func__, tag, err);
                goto out;
+       }
 
        scsi_dma_unmap(cmd);
 
        spin_lock_irqsave(host->host_lock, flags);
-       __clear_bit(tag, &hba->outstanding_reqs);
+       ufshcd_outstanding_req_clear(hba, tag);
        hba->lrb[tag].cmd = NULL;
        spin_unlock_irqrestore(host->host_lock, flags);
 
@@ -3948,14 +6950,15 @@ out:
                err = SUCCESS;
        } else {
                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+               ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
                err = FAILED;
        }
 
        /*
-        * This ufshcd_release() corresponds to the original scsi cmd that got
-        * aborted here (as we won't get any IRQ for it).
+        * This ufshcd_release_all() corresponds to the original scsi cmd that
+        * got aborted here (as we won't get any IRQ for it).
         */
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
        return err;
 }
 
@@ -3976,9 +6979,12 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 
        /* Reset the host controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_hba_stop(hba);
+       ufshcd_hba_stop(hba, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       /* scale up clocks to max frequency before full reinitialization */
+       ufshcd_set_clk_freq(hba, true);
+
        err = ufshcd_hba_enable(hba);
        if (err)
                goto out;
@@ -3986,8 +6992,21 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        /* Establish the link again and restore the device */
        err = ufshcd_probe_hba(hba);
 
-       if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+       if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
                err = -EIO;
+               goto out;
+       }
+
+       if (!err) {
+               err = ufshcd_vops_crypto_engine_reset(hba);
+               if (err) {
+                       dev_err(hba->dev,
+                               "%s: failed to reset crypto engine %d\n",
+                               __func__, err);
+                       goto out;
+               }
+       }
+
 out:
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -4011,10 +7030,26 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
        int retries = MAX_HOST_RESET_RETRIES;
 
        do {
+               err = ufshcd_vops_full_reset(hba);
+               if (err)
+                       dev_warn(hba->dev, "%s: full reset returned %d\n",
+                                __func__, err);
+
+               err = ufshcd_reset_device(hba);
+               if (err)
+                       dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+                                __func__, err);
+
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
 
        /*
+        * There is no point proceeding even after failing
+        * to recover after multiple retries.
+        */
+       if (err)
+               BUG();
+       /*
         * After reset the door-bell might be cleared, complete
         * outstanding requests in s/w here.
         */
@@ -4034,13 +7069,12 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
  */
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       int err;
+       int err = SUCCESS;
        unsigned long flags;
        struct ufs_hba *hba;
 
        hba = shost_priv(cmd->device->host);
 
-       ufshcd_hold(hba, false);
        /*
         * Check if there is any race with fatal error handling.
         * If so, wait for it to complete. Even though fatal error
@@ -4053,28 +7087,37 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
                                hba->ufshcd_state == UFSHCD_STATE_RESET))
                        break;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
-               dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+               dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
                flush_work(&hba->eh_work);
        } while (1);
 
-       hba->ufshcd_state = UFSHCD_STATE_RESET;
-       ufshcd_set_eh_in_progress(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       /*
+        * we don't know if previous reset had really reset the host controller
+        * or not. So let's force reset here to be sure.
+        */
+       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+       hba->force_host_reset = true;
+       schedule_work(&hba->eh_work);
 
-       err = ufshcd_reset_and_restore(hba);
+       /* wait for the reset work to finish */
+       do {
+               if (!(work_pending(&hba->eh_work) ||
+                               hba->ufshcd_state == UFSHCD_STATE_RESET))
+                       break;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
+               flush_work(&hba->eh_work);
+               spin_lock_irqsave(hba->host->host_lock, flags);
+       } while (1);
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (!err) {
-               err = SUCCESS;
-               hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-       } else {
+       if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+             ufshcd_is_link_active(hba))) {
                err = FAILED;
                hba->ufshcd_state = UFSHCD_STATE_ERROR;
        }
-       ufshcd_clear_eh_in_progress(hba);
+
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       ufshcd_release(hba);
        return err;
 }
 
@@ -4186,9 +7229,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
        dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
                        __func__, hba->init_prefetch_data.icc_level);
 
-       ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
-                       QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
-                       &hba->init_prefetch_data.icc_level);
+       ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+               QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+               &hba->init_prefetch_data.icc_level);
 
        if (ret)
                dev_err(hba->dev,
@@ -4264,6 +7307,210 @@ out:
 }
 
 /**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+       int ret = 0;
+       u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+       if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
+               return 0;
+
+       ret = ufshcd_dme_peer_get(hba,
+                                 UIC_ARG_MIB_SEL(
+                                       RX_MIN_ACTIVATETIME_CAPABILITY,
+                                       UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+                                 &peer_rx_min_activatetime);
+       if (ret)
+               goto out;
+
+       /* make sure proper unit conversion is applied */
+       tuned_pa_tactivate =
+               ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+                / PA_TACTIVATE_TIME_UNIT_US);
+       ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+                            tuned_pa_tactivate);
+
+out:
+       return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+       int ret = 0;
+       u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+       u32 max_hibern8_time, tuned_pa_hibern8time;
+
+       ret = ufshcd_dme_get(hba,
+                            UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+                                       UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+                                 &local_tx_hibern8_time_cap);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba,
+                                 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+                                       UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+                                 &peer_rx_hibern8_time_cap);
+       if (ret)
+               goto out;
+
+       max_hibern8_time = max(local_tx_hibern8_time_cap,
+                              peer_rx_hibern8_time_cap);
+       /* make sure proper unit conversion is applied */
+       tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+                               / PA_HIBERN8_TIME_UNIT_US);
+       ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+                            tuned_pa_hibern8time);
+out:
+       return ret;
+}
+
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+       int ret = 0;
+       u32 granularity, peer_granularity;
+       u32 pa_tactivate, peer_pa_tactivate;
+       u32 pa_tactivate_us, peer_pa_tactivate_us;
+       u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                 &granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                 &peer_granularity);
+       if (ret)
+               goto out;
+
+       if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+           (granularity > PA_GRANULARITY_MAX_VAL)) {
+               dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+                       __func__, granularity);
+               return -EINVAL;
+       }
+
+       if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+           (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+               dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+                       __func__, peer_granularity);
+               return -EINVAL;
+       }
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+                                 &peer_pa_tactivate);
+       if (ret)
+               goto out;
+
+       pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+       peer_pa_tactivate_us = peer_pa_tactivate *
+                            gran_to_us_table[peer_granularity - 1];
+
+       if (pa_tactivate_us > peer_pa_tactivate_us) {
+               u32 new_peer_pa_tactivate;
+
+               new_peer_pa_tactivate = pa_tactivate_us /
+                                     gran_to_us_table[peer_granularity - 1];
+               new_peer_pa_tactivate++;
+               ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+                                         new_peer_pa_tactivate);
+       }
+
+out:
+       return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+       if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+               ufshcd_tune_pa_tactivate(hba);
+               ufshcd_tune_pa_hibern8time(hba);
+       }
+
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+               /* set 1ms timeout for PA_TACTIVATE */
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+               ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+       ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+       int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+       memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+       hba->req_abort_count = 0;
+}
+
+static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
+{
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
+               if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
+                   UIC_LINK_OFF_STATE) {
+                       hba->rpm_lvl =
+                               ufs_get_desired_pm_lvl_for_dev_link_state(
+                                               UFS_SLEEP_PWR_MODE,
+                                               UIC_LINK_HIBERN8_STATE);
+                       dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
+                               hba->rpm_lvl);
+               }
+               if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+                   UIC_LINK_OFF_STATE) {
+                       hba->spm_lvl =
+                               ufs_get_desired_pm_lvl_for_dev_link_state(
+                                               UFS_SLEEP_PWR_MODE,
+                                               UIC_LINK_HIBERN8_STATE);
+                       dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
+                               hba->spm_lvl);
+               }
+       }
+}
+
+/**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
  *
@@ -4272,12 +7519,17 @@ out:
 static int ufshcd_probe_hba(struct ufs_hba *hba)
 {
        int ret;
+       ktime_t start = ktime_get();
 
        ret = ufshcd_link_startup(hba);
        if (ret)
                goto out;
 
-       ufshcd_init_pwr_info(hba);
+       /* Debug counters initialization */
+       ufshcd_clear_dbg_ufs_stats(hba);
+       /* set the default level for urgent bkops */
+       hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+       hba->is_urgent_bkops_lvl_checked = false;
 
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
@@ -4290,10 +7542,18 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufs_advertise_fixup_device(hba);
+       ufshcd_tune_unipro_params(hba);
+
+       ufshcd_apply_pm_quirks(hba);
+       ret = ufshcd_set_vccq_rail_unused(hba,
+               (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+       if (ret)
+               goto out;
+
        /* UFS device is also active now */
        ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
-       hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        hba->wlun_dev_clr_ua = true;
 
        if (ufshcd_get_max_pwr_mode(hba)) {
@@ -4302,11 +7562,15 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
                        __func__);
        } else {
                ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
-               if (ret)
+               if (ret) {
                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
                                        __func__, ret);
+                       goto out;
+               }
        }
 
+       /* set the state as operational after switching to desired gear */
+       hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        /*
         * If we are in error handling context or in power management callbacks
         * context, no need to scan the host
@@ -4316,8 +7580,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 
                /* clear any previous UFS device information */
                memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-               if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
-                                      QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+               if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+                               QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
                        hba->dev_info.f_power_on_wp_en = flag;
 
                if (!hba->is_init_prefetch)
@@ -4328,6 +7592,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
                if (ret)
                        goto out;
 
+               /* Initialize devfreq after UFS device is detected */
+               if (ufshcd_is_clkscaling_supported(hba)) {
+                       memcpy(&hba->clk_scaling.saved_pwr_info.info,
+                           &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+                       hba->clk_scaling.saved_pwr_info.is_valid = true;
+                       hba->clk_scaling.is_scaled_up = true;
+                       if (!hba->devfreq) {
+                               hba->devfreq = devfreq_add_device(hba->dev,
+                                                       &ufs_devfreq_profile,
+                                                       "simple_ondemand",
+                                                       gov_data);
+                               if (IS_ERR(hba->devfreq)) {
+                                       ret = PTR_ERR(hba->devfreq);
+                                       dev_err(hba->dev, "Unable to register with devfreq %d\n",
+                                               ret);
+                                       goto out;
+                               }
+                       }
+                       hba->clk_scaling.is_allowed = true;
+               }
+
                scsi_scan_host(hba->host);
                pm_runtime_put_sync(hba->dev);
        }
@@ -4335,10 +7620,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (!hba->is_init_prefetch)
                hba->is_init_prefetch = true;
 
-       /* Resume devfreq after UFS device is detected */
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_resume_device(hba->devfreq);
-
+       /*
+        * Enable auto hibern8 if supported, after full host and
+        * device initialization.
+        */
+       if (ufshcd_is_auto_hibern8_supported(hba))
+               ufshcd_set_auto_hibern8_timer(hba,
+                                     hba->hibern8_on_idle.delay_ms);
 out:
        /*
         * If we failed to initialize the device or the device is not
@@ -4349,6 +7637,9 @@ out:
                ufshcd_hba_exit(hba);
        }
 
+       trace_ufshcd_init(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
        return ret;
 }
 
@@ -4361,7 +7652,296 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 {
        struct ufs_hba *hba = (struct ufs_hba *)data;
 
+       /*
+        * Don't allow clock gating and hibern8 enter for faster device
+        * detection.
+        */
+       ufshcd_hold_all(hba);
        ufshcd_probe_hba(hba);
+       ufshcd_release_all(hba);
+}
+
+/**
+ * ufshcd_query_ioctl - perform user read queries
+ * @hba: per-adapter instance
+ * @lun: used for lun specific queries
+ * @buffer: user space buffer for reading and submitting query data and params
+ * @return: 0 for success negative error code otherwise
+ *
+ * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
+ * It will read the opcode, idn and buf_length parameters, and, put the
+ * response in the buffer field while updating the used size in buf_length.
+ */
+static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
+{
+       struct ufs_ioctl_query_data *ioctl_data;
+       int err = 0;
+       int length = 0;
+       void *data_ptr;
+       bool flag;
+       u32 att;
+       u8 index;
+       u8 *desc = NULL;
+
+       ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
+       if (!ioctl_data) {
+               dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
+                               sizeof(struct ufs_ioctl_query_data));
+               err = -ENOMEM;
+               goto out;
+       }
+
+       /* extract params from user buffer */
+       err = copy_from_user(ioctl_data, buffer,
+                       sizeof(struct ufs_ioctl_query_data));
+       if (err) {
+               dev_err(hba->dev,
+                       "%s: Failed copying buffer from user, err %d\n",
+                       __func__, err);
+               goto out_release_mem;
+       }
+
+       /* verify legal parameters & send query */
+       switch (ioctl_data->opcode) {
+       case UPIU_QUERY_OPCODE_READ_DESC:
+               switch (ioctl_data->idn) {
+               case QUERY_DESC_IDN_DEVICE:
+               case QUERY_DESC_IDN_CONFIGURAION:
+               case QUERY_DESC_IDN_INTERCONNECT:
+               case QUERY_DESC_IDN_GEOMETRY:
+               case QUERY_DESC_IDN_POWER:
+                       index = 0;
+                       break;
+               case QUERY_DESC_IDN_UNIT:
+                       if (!ufs_is_valid_unit_desc_lun(lun)) {
+                               dev_err(hba->dev,
+                                       "%s: No unit descriptor for lun 0x%x\n",
+                                       __func__, lun);
+                               err = -EINVAL;
+                               goto out_release_mem;
+                       }
+                       index = lun;
+                       break;
+               default:
+                       goto out_einval;
+               }
+               length = min_t(int, QUERY_DESC_MAX_SIZE,
+                               ioctl_data->buf_size);
+               desc = kzalloc(length, GFP_KERNEL);
+               if (!desc) {
+                       dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+                                       __func__, length);
+                       err = -ENOMEM;
+                       goto out_release_mem;
+               }
+               err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
+                               ioctl_data->idn, index, 0, desc, &length);
+               break;
+       case UPIU_QUERY_OPCODE_READ_ATTR:
+               switch (ioctl_data->idn) {
+               case QUERY_ATTR_IDN_BOOT_LU_EN:
+               case QUERY_ATTR_IDN_POWER_MODE:
+               case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+               case QUERY_ATTR_IDN_OOO_DATA_EN:
+               case QUERY_ATTR_IDN_BKOPS_STATUS:
+               case QUERY_ATTR_IDN_PURGE_STATUS:
+               case QUERY_ATTR_IDN_MAX_DATA_IN:
+               case QUERY_ATTR_IDN_MAX_DATA_OUT:
+               case QUERY_ATTR_IDN_REF_CLK_FREQ:
+               case QUERY_ATTR_IDN_CONF_DESC_LOCK:
+               case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+               case QUERY_ATTR_IDN_EE_CONTROL:
+               case QUERY_ATTR_IDN_EE_STATUS:
+               case QUERY_ATTR_IDN_SECONDS_PASSED:
+                       index = 0;
+                       break;
+               case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+               case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
+                       index = lun;
+                       break;
+               default:
+                       goto out_einval;
+               }
+               err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
+                                       index, 0, &att);
+               break;
+
+       case UPIU_QUERY_OPCODE_WRITE_ATTR:
+               err = copy_from_user(&att,
+                               buffer + sizeof(struct ufs_ioctl_query_data),
+                               sizeof(u32));
+               if (err) {
+                       dev_err(hba->dev,
+                               "%s: Failed copying buffer from user, err %d\n",
+                               __func__, err);
+                       goto out_release_mem;
+               }
+
+               switch (ioctl_data->idn) {
+               case QUERY_ATTR_IDN_BOOT_LU_EN:
+                       index = 0;
+                       if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
+                               dev_err(hba->dev,
+                                       "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
+                                       __func__, ioctl_data->opcode,
+                                       (unsigned int)ioctl_data->idn, att);
+                               err = -EINVAL;
+                               goto out_release_mem;
+                       }
+                       break;
+               default:
+                       goto out_einval;
+               }
+               err = ufshcd_query_attr(hba, ioctl_data->opcode,
+                                       ioctl_data->idn, index, 0, &att);
+               break;
+
+       case UPIU_QUERY_OPCODE_READ_FLAG:
+               switch (ioctl_data->idn) {
+               case QUERY_FLAG_IDN_FDEVICEINIT:
+               case QUERY_FLAG_IDN_PERMANENT_WPE:
+               case QUERY_FLAG_IDN_PWR_ON_WPE:
+               case QUERY_FLAG_IDN_BKOPS_EN:
+               case QUERY_FLAG_IDN_PURGE_ENABLE:
+               case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
+               case QUERY_FLAG_IDN_BUSY_RTC:
+                       break;
+               default:
+                       goto out_einval;
+               }
+               err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
+                               ioctl_data->idn, &flag);
+               break;
+       default:
+               goto out_einval;
+       }
+
+       if (err) {
+               dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
+                               ioctl_data->idn);
+               goto out_release_mem;
+       }
+
+       /*
+        * copy response data
+        * As we might end up reading less data then what is specified in
+        * "ioctl_data->buf_size". So we are updating "ioctl_data->
+        * buf_size" to what exactly we have read.
+        */
+       switch (ioctl_data->opcode) {
+       case UPIU_QUERY_OPCODE_READ_DESC:
+               ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
+               data_ptr = desc;
+               break;
+       case UPIU_QUERY_OPCODE_READ_ATTR:
+               ioctl_data->buf_size = sizeof(u32);
+               data_ptr = &att;
+               break;
+       case UPIU_QUERY_OPCODE_READ_FLAG:
+               ioctl_data->buf_size = 1;
+               data_ptr = &flag;
+               break;
+       case UPIU_QUERY_OPCODE_WRITE_ATTR:
+               goto out_release_mem;
+       default:
+               goto out_einval;
+       }
+
+       /* copy to user */
+       err = copy_to_user(buffer, ioctl_data,
+                       sizeof(struct ufs_ioctl_query_data));
+       if (err)
+               dev_err(hba->dev, "%s: Failed copying back to user.\n",
+                       __func__);
+       err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
+                       data_ptr, ioctl_data->buf_size);
+       if (err)
+               dev_err(hba->dev, "%s: err %d copying back to user.\n",
+                               __func__, err);
+       goto out_release_mem;
+
+out_einval:
+       dev_err(hba->dev,
+               "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
+               __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
+       err = -EINVAL;
+out_release_mem:
+       kfree(ioctl_data);
+       kfree(desc);
+out:
+       return err;
+}
+
+/**
+ * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
+ * @dev: scsi device required for per LUN queries
+ * @cmd: command opcode
+ * @buffer: user space buffer for transferring data
+ *
+ * Supported commands:
+ * UFS_IOCTL_QUERY
+ */
+static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
+{
+       struct ufs_hba *hba = shost_priv(dev->host);
+       int err = 0;
+
+       BUG_ON(!hba);
+       if (!buffer) {
+               dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
+               return -EINVAL;
+       }
+
+       switch (cmd) {
+       case UFS_IOCTL_QUERY:
+               pm_runtime_get_sync(hba->dev);
+               err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
+                               buffer);
+               pm_runtime_put_sync(hba->dev);
+               break;
+       default:
+               err = -ENOIOCTLCMD;
+               dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
+                       cmd);
+               break;
+       }
+
+       return err;
+}
+
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+       unsigned long flags;
+       struct Scsi_Host *host;
+       struct ufs_hba *hba;
+       int index;
+       bool found = false;
+
+       if (!scmd || !scmd->device || !scmd->device->host)
+               return BLK_EH_NOT_HANDLED;
+
+       host = scmd->device->host;
+       hba = shost_priv(host);
+       if (!hba)
+               return BLK_EH_NOT_HANDLED;
+
+       spin_lock_irqsave(host->host_lock, flags);
+
+       for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+               if (hba->lrb[index].cmd == scmd) {
+                       found = true;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       /*
+        * Bypass SCSI error handling and reset the block layer timer if this
+        * SCSI command was not actually dispatched to UFS driver, otherwise
+        * let SCSI layer handle the error as usual.
+        */
+       return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
 }
 
 static struct scsi_host_template ufshcd_driver_template = {
@@ -4376,6 +7956,11 @@ static struct scsi_host_template ufshcd_driver_template = {
        .eh_abort_handler       = ufshcd_abort,
        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
+       .eh_timed_out           = ufshcd_eh_timed_out,
+       .ioctl                  = ufshcd_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl           = ufshcd_ioctl,
+#endif
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@ -4413,7 +7998,13 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
-       return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+       if (!vreg)
+               return 0;
+       else if (vreg->unused)
+               return 0;
+       else
+               return ufshcd_config_vreg_load(hba->dev, vreg,
+                                              UFS_VREG_LPM_LOAD_UA);
 }
 
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -4421,8 +8012,10 @@ static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
 {
        if (!vreg)
                return 0;
-
-       return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+       else if (vreg->unused)
+               return 0;
+       else
+               return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 }
 
 static int ufshcd_config_vreg(struct device *dev,
@@ -4439,21 +8032,21 @@ static int ufshcd_config_vreg(struct device *dev,
        name = vreg->name;
 
        if (regulator_count_voltages(reg) > 0) {
+               uA_load = on ? vreg->max_uA : 0;
+               ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+               if (ret)
+                       goto out;
+
                if (vreg->min_uV && vreg->max_uV) {
                        min_uV = on ? vreg->min_uV : 0;
                        ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
                        if (ret) {
                                dev_err(dev,
                                        "%s: %s set voltage failed, err=%d\n",
-                                       __func__, name, ret);
-                               goto out;
-                       }
-               }
-
-               uA_load = on ? vreg->max_uA : 0;
-               ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
-               if (ret)
-                       goto out;
+                                       __func__, name, ret);
+                               goto out;
+                       }
+               }
        }
 out:
        return ret;
@@ -4463,7 +8056,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
 {
        int ret = 0;
 
-       if (!vreg || vreg->enabled)
+       if (!vreg)
+               goto out;
+       else if (vreg->enabled || vreg->unused)
                goto out;
 
        ret = ufshcd_config_vreg(dev, vreg, true);
@@ -4483,7 +8078,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
 {
        int ret = 0;
 
-       if (!vreg || !vreg->enabled)
+       if (!vreg)
+               goto out;
+       else if (!vreg->enabled || vreg->unused)
                goto out;
 
        ret = regulator_disable(vreg->reg);
@@ -4533,11 +8130,16 @@ out:
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 {
        struct ufs_vreg_info *info = &hba->vreg_info;
+       int ret = 0;
 
-       if (info)
-               return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+       if (info->vdd_hba) {
+               ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 
-       return 0;
+               if (!ret)
+                       ufshcd_vops_update_sec_cfg(hba, on);
+       }
+
+       return ret;
 }
 
 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -4589,22 +8191,73 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
        return 0;
 }
 
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
-                                       bool skip_ref_clk)
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+       int ret = 0;
+       struct ufs_vreg_info *info = &hba->vreg_info;
+
+       if (!info)
+               goto out;
+       else if (!info->vccq)
+               goto out;
+
+       if (unused) {
+               /* shut off the rail here */
+               ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+               /*
+                * Mark this rail as no longer used, so it doesn't get enabled
+                * later by mistake
+                */
+               if (!ret)
+                       info->vccq->unused = true;
+       } else {
+               /*
+                * rail should have been already enabled hence just make sure
+                * that unused flag is cleared.
+                */
+               info->vccq->unused = false;
+       }
+out:
+       return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+                              bool skip_ref_clk, bool is_gating_context)
 {
        int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
        unsigned long flags;
+       ktime_t start = ktime_get();
+       bool clk_state_changed = false;
 
        if (!head || list_empty(head))
                goto out;
 
+       /* call vendor specific bus vote before enabling the clocks */
+       if (on) {
+               ret = ufshcd_vops_set_bus_vote(hba, on);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * vendor specific setup_clocks ops may depend on clocks managed by
+        * this standard driver hence call the vendor specific setup_clocks
+        * before disabling the clocks managed here.
+        */
+       if (!on) {
+               ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+               if (ret)
+                       return ret;
+       }
+
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
                                continue;
 
+                       clk_state_changed = on ^ clki->enabled;
                        if (on && !clki->enabled) {
                                ret = clk_prepare_enable(clki->clk);
                                if (ret) {
@@ -4621,24 +8274,65 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
                }
        }
 
-       ret = ufshcd_vops_setup_clocks(hba, on);
+       /*
+        * vendor specific setup_clocks ops may depend on clocks managed by
+        * this standard driver hence call the vendor specific setup_clocks
+        * after enabling the clocks managed here.
+        */
+       if (on) {
+               ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+               if (ret)
+                       goto out;
+       }
+
+       /*
+        * call vendor specific bus vote to remove the vote after
+        * disabling the clocks.
+        */
+       if (!on)
+               ret = ufshcd_vops_set_bus_vote(hba, on);
+
 out:
        if (ret) {
+               if (on)
+                       /* Can't do much if this fails */
+                       (void) ufshcd_vops_set_bus_vote(hba, false);
                list_for_each_entry(clki, head, list) {
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
-       } else if (on) {
+       } else if (!ret && on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                       hba->clk_gating.state);
                spin_unlock_irqrestore(hba->host->host_lock, flags);
+               /* restore the secure configuration as clocks are enabled */
+               ufshcd_vops_update_sec_cfg(hba, true);
        }
+
+       if (clk_state_changed)
+               trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+                       (on ? "on" : "off"),
+                       ktime_to_us(ktime_sub(ktime_get(), start)), ret);
        return ret;
 }
 
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufshcd_enable_clocks(struct ufs_hba *hba)
+{
+       return  ufshcd_setup_clocks(hba, true, false, false);
+}
+
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+                                bool is_gating_context)
+{
+       return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
+}
+
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+                                             bool is_gating_context)
 {
-       return  __ufshcd_setup_clocks(hba, on, false);
+       return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
 }
 
 static int ufshcd_init_clocks(struct ufs_hba *hba)
@@ -4684,7 +8378,7 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
 {
        int err = 0;
 
-       if (!hba->vops)
+       if (!hba->var || !hba->var->vops)
                goto out;
 
        err = ufshcd_vops_init(hba);
@@ -4708,11 +8402,9 @@ out:
 
 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
 {
-       if (!hba->vops)
+       if (!hba->var || !hba->var->vops)
                return;
 
-       ufshcd_vops_setup_clocks(hba, false);
-
        ufshcd_vops_setup_regulators(hba, false);
 
        ufshcd_vops_exit(hba);
@@ -4741,7 +8433,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
        if (err)
                goto out_disable_hba_vreg;
 
-       err = ufshcd_setup_clocks(hba, true);
+       err = ufshcd_enable_clocks(hba);
        if (err)
                goto out_disable_hba_vreg;
 
@@ -4763,7 +8455,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
 out_disable_vreg:
        ufshcd_setup_vreg(hba, false);
 out_disable_clks:
-       ufshcd_setup_clocks(hba, false);
+       ufshcd_disable_clocks(hba, false);
 out_disable_hba_vreg:
        ufshcd_setup_hba_vreg(hba, false);
 out:
@@ -4775,7 +8467,13 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
        if (hba->is_powered) {
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
-               ufshcd_setup_clocks(hba, false);
+               if (ufshcd_is_clkscaling_supported(hba)) {
+                       if (hba->devfreq)
+                               ufshcd_suspend_clkscaling(hba);
+                       if (hba->clk_scaling.workq)
+                               destroy_workqueue(hba->clk_scaling.workq);
+               }
+               ufshcd_disable_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
        }
@@ -4788,19 +8486,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
                                0,
                                0,
                                0,
-                               SCSI_SENSE_BUFFERSIZE,
+                               UFSHCD_REQ_SENSE_SIZE,
                                0};
        char *buffer;
        int ret;
 
-       buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+       buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
                goto out;
        }
 
        ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
-                               SCSI_SENSE_BUFFERSIZE, NULL,
+                               UFSHCD_REQ_SENSE_SIZE, NULL,
                                msecs_to_jiffies(1000), 3, NULL, REQ_PM);
        if (ret)
                pr_err("%s: failed with err %d\n", __func__, ret);
@@ -4908,10 +8606,20 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
                   (!check_for_bkops || (check_for_bkops &&
                    !hba->auto_bkops_enabled))) {
                /*
+                * Let's make sure that link is in low power mode, we are doing
+                * this currently by putting the link in Hibern8. Otherway to
+                * put the link in low power mode is to send the DME end point
+                * to device and then send the DME reset command to local
+                * unipro. But putting the link in hibern8 is much faster.
+                */
+               ret = ufshcd_uic_hibern8_enter(hba);
+               if (ret)
+                       goto out;
+               /*
                 * Change controller state to "reset state" which
                 * should also put the link in off/reset state
                 */
-               ufshcd_hba_stop(hba);
+               ufshcd_hba_stop(hba, true);
                /*
                 * TODO: Check if we need any delay to make sure that
                 * controller is reset
@@ -4926,6 +8634,15 @@ out:
 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
 {
        /*
+        * It seems some UFS devices may keep drawing more than sleep current
+        * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+        * To avoid this situation, add 2ms delay before putting these UFS
+        * rails in LPM mode.
+        */
+       if (!ufshcd_is_link_active(hba))
+               usleep_range(2000, 2100);
+
+       /*
         * If UFS device is either in UFS_Sleep turn off VCC rail to save some
         * power.
         *
@@ -4957,7 +8674,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
            !hba->dev_info.is_lu_power_on_wp) {
                ret = ufshcd_setup_vreg(hba, true);
        } else if (!ufshcd_is_ufs_dev_active(hba)) {
-               ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
                if (!ret && !ufshcd_is_link_active(hba)) {
                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
                        if (ret)
@@ -4966,6 +8682,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
                        if (ret)
                                goto vccq_lpm;
                }
+               ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
        }
        goto out;
 
@@ -4979,13 +8696,17 @@ out:
 
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
 {
-       if (ufshcd_is_link_off(hba))
+       if (ufshcd_is_link_off(hba) ||
+           (ufshcd_is_link_hibern8(hba)
+            && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, false);
 }
 
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 {
-       if (ufshcd_is_link_off(hba))
+       if (ufshcd_is_link_off(hba) ||
+           (ufshcd_is_link_hibern8(hba)
+            && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, true);
 }
 
@@ -5027,8 +8748,17 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
         * If we can't transition into any of the low power modes
         * just gate the clocks.
         */
-       ufshcd_hold(hba, false);
+       WARN_ON(hba->hibern8_on_idle.is_enabled &&
+               hba->hibern8_on_idle.active_reqs);
+       ufshcd_hold_all(hba);
        hba->clk_gating.is_suspended = true;
+       hba->hibern8_on_idle.is_suspended = true;
+
+       if (hba->clk_scaling.is_allowed) {
+               cancel_work_sync(&hba->clk_scaling.suspend_work);
+               cancel_work_sync(&hba->clk_scaling.resume_work);
+               ufshcd_suspend_clkscaling(hba);
+       }
 
        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
                        req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -5037,12 +8767,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
        if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
            (req_link_state == hba->uic_link_state))
-               goto out;
+               goto enable_gating;
 
        /* UFS device & link must be active before we enter in this function */
        if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
                ret = -EINVAL;
-               goto out;
+               goto enable_gating;
        }
 
        if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5075,19 +8805,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
        if (ret)
                goto set_dev_active;
 
+       if (ufshcd_is_link_hibern8(hba) &&
+           ufshcd_is_hibern8_on_idle_allowed(hba))
+               hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+
        ufshcd_vreg_set_lpm(hba);
 
 disable_clks:
        /*
-        * The clock scaling needs access to controller registers. Hence, Wait
-        * for pending clock scaling work to be done before clocks are
-        * turned off.
-        */
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
-       }
-       /*
         * Call vendor specific suspend callback. As these callbacks may access
         * vendor specific host controller register space call them before the
         * host clocks are ON.
@@ -5096,17 +8821,19 @@ disable_clks:
        if (ret)
                goto set_link_active;
 
-       ret = ufshcd_vops_setup_clocks(hba, false);
-       if (ret)
-               goto vops_resume;
-
        if (!ufshcd_is_link_active(hba))
-               ufshcd_setup_clocks(hba, false);
+               ret = ufshcd_disable_clocks(hba, false);
        else
                /* If link is active, device ref_clk can't be switched off */
-               __ufshcd_setup_clocks(hba, false, true);
+               ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
+       if (ret)
+               goto set_link_active;
 
-       hba->clk_gating.state = CLKS_OFF;
+       if (ufshcd_is_clkgating_allowed(hba)) {
+               hba->clk_gating.state = CLKS_OFF;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                       hba->clk_gating.state);
+       }
        /*
         * Disable the host irq as host controller as there won't be any
         * host controller transaction expected till resume.
@@ -5116,22 +8843,31 @@ disable_clks:
        ufshcd_hba_vreg_set_lpm(hba);
        goto out;
 
-vops_resume:
-       ufshcd_vops_resume(hba, pm_op);
 set_link_active:
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
        ufshcd_vreg_set_hpm(hba);
-       if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+       if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
                ufshcd_set_link_active(hba);
-       else if (ufshcd_is_link_off(hba))
+       } else if (ufshcd_is_link_off(hba)) {
+               ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
                ufshcd_host_reset_and_restore(hba);
+       }
 set_dev_active:
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
                ufshcd_disable_auto_bkops(hba);
 enable_gating:
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
+       hba->hibern8_on_idle.is_suspended = false;
        hba->clk_gating.is_suspended = false;
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
 out:
        hba->pm_op_in_progress = 0;
+
+       if (ret)
+               ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
+
        return ret;
 }
 
@@ -5155,14 +8891,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
        ufshcd_hba_vreg_set_hpm(hba);
        /* Make sure clocks are enabled before accessing controller */
-       ret = ufshcd_setup_clocks(hba, true);
+       ret = ufshcd_enable_clocks(hba);
        if (ret)
                goto out;
 
        /* enable the host irq as host controller would be active soon */
-       ret = ufshcd_enable_irq(hba);
-       if (ret)
-               goto disable_irq_and_vops_clks;
+       ufshcd_enable_irq(hba);
 
        ret = ufshcd_vreg_set_hpm(hba);
        if (ret)
@@ -5179,18 +8913,28 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
        if (ufshcd_is_link_hibern8(hba)) {
                ret = ufshcd_uic_hibern8_exit(hba);
-               if (!ret)
+               if (!ret) {
                        ufshcd_set_link_active(hba);
-               else
+                       if (ufshcd_is_hibern8_on_idle_allowed(hba))
+                               hba->hibern8_on_idle.state = HIBERN8_EXITED;
+               } else {
                        goto vendor_suspend;
+               }
        } else if (ufshcd_is_link_off(hba)) {
-               ret = ufshcd_host_reset_and_restore(hba);
                /*
-                * ufshcd_host_reset_and_restore() should have already
+                * A full initialization of the host and the device is required
+                * since the link was put to off during suspend.
+                */
+               ret = ufshcd_reset_and_restore(hba);
+               /*
+                * ufshcd_reset_and_restore() should have already
                 * set the link state as active
                 */
                if (ret || !ufshcd_is_link_active(hba))
                        goto vendor_suspend;
+               /* mark link state as hibern8 exited */
+               if (ufshcd_is_hibern8_on_idle_allowed(hba))
+                       hba->hibern8_on_idle.state = HIBERN8_EXITED;
        }
 
        if (!ufshcd_is_ufs_dev_active(hba)) {
@@ -5209,25 +8953,37 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                ufshcd_urgent_bkops(hba);
 
        hba->clk_gating.is_suspended = false;
+       hba->hibern8_on_idle.is_suspended = false;
 
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_resume_device(hba->devfreq);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
 
        /* Schedule clock gating in case of no access to UFS device yet */
-       ufshcd_release(hba);
+       ufshcd_release_all(hba);
        goto out;
 
 set_old_link_state:
        ufshcd_link_state_transition(hba, old_link_state, 0);
+       if (ufshcd_is_link_hibern8(hba) &&
+           ufshcd_is_hibern8_on_idle_allowed(hba))
+               hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 vendor_suspend:
        ufshcd_vops_suspend(hba, pm_op);
 disable_vreg:
        ufshcd_vreg_set_lpm(hba);
 disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
-       ufshcd_setup_clocks(hba, false);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_suspend_clkscaling(hba);
+       ufshcd_disable_clocks(hba, false);
+       if (ufshcd_is_clkgating_allowed(hba))
+               hba->clk_gating.state = CLKS_OFF;
 out:
        hba->pm_op_in_progress = 0;
+
+       if (ret)
+               ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
+
        return ret;
 }
 
@@ -5243,20 +8999,18 @@ out:
 int ufshcd_system_suspend(struct ufs_hba *hba)
 {
        int ret = 0;
+       ktime_t start = ktime_get();
 
        if (!hba || !hba->is_powered)
                return 0;
 
-       if (pm_runtime_suspended(hba->dev)) {
-               if (hba->rpm_lvl == hba->spm_lvl)
-                       /*
-                        * There is possibility that device may still be in
-                        * active state during the runtime suspend.
-                        */
-                       if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
-                           hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
-                               goto out;
+       if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+            hba->curr_dev_pwr_mode) &&
+           (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+            hba->uic_link_state))
+               goto out;
 
+       if (pm_runtime_suspended(hba->dev)) {
                /*
                 * UFS device and/or UFS link low power states during runtime
                 * suspend seems to be different than what is expected during
@@ -5272,6 +9026,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
 
        ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
 out:
+       trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
        if (!ret)
                hba->is_sys_suspended = true;
        return ret;
@@ -5287,6 +9044,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
 
 int ufshcd_system_resume(struct ufs_hba *hba)
 {
+       int ret = 0;
+       ktime_t start = ktime_get();
+
        if (!hba)
                return -EINVAL;
 
@@ -5295,9 +9055,14 @@ int ufshcd_system_resume(struct ufs_hba *hba)
                 * Let the runtime resume take care of resuming
                 * if runtime suspended.
                 */
-               return 0;
-
-       return ufshcd_resume(hba, UFS_SYSTEM_PM);
+               goto out;
+       else
+               ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+       trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
+       return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
 
@@ -5311,13 +9076,23 @@ EXPORT_SYMBOL(ufshcd_system_resume);
  */
 int ufshcd_runtime_suspend(struct ufs_hba *hba)
 {
+       int ret = 0;
+       ktime_t start = ktime_get();
+
        if (!hba)
                return -EINVAL;
 
        if (!hba->is_powered)
-               return 0;
+               goto out;
+       else
+               ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+       trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode,
+               hba->uic_link_state);
+       return ret;
 
-       return ufshcd_suspend(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
@@ -5344,13 +9119,22 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
  */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
+       int ret = 0;
+       ktime_t start = ktime_get();
+
        if (!hba)
                return -EINVAL;
 
        if (!hba->is_powered)
-               return 0;
-
-       return ufshcd_resume(hba, UFS_RUNTIME_PM);
+               goto out;
+       else
+               ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+       trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode,
+               hba->uic_link_state);
+       return ret;
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -5360,6 +9144,246 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
 }
 EXPORT_SYMBOL(ufshcd_runtime_idle);
 
+static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count,
+                                          bool rpm)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags, value;
+
+       if (kstrtoul(buf, 0, &value))
+               return -EINVAL;
+
+       if (value >= UFS_PM_LVL_MAX)
+               return -EINVAL;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (rpm)
+               hba->rpm_lvl = value;
+       else
+               hba->spm_lvl = value;
+       ufshcd_apply_pm_quirks(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return count;
+}
+
+static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       int curr_len;
+       u8 lvl;
+
+       curr_len = snprintf(buf, PAGE_SIZE,
+                           "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
+                           hba->rpm_lvl,
+                           ufschd_ufs_dev_pwr_mode_to_string(
+                               ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
+                           ufschd_uic_link_state_to_string(
+                               ufs_pm_lvl_states[hba->rpm_lvl].link_state));
+
+       curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                            "\nAll available Runtime PM levels info:\n");
+       for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+               curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                                    "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
+                                   lvl,
+                                   ufschd_ufs_dev_pwr_mode_to_string(
+                                       ufs_pm_lvl_states[lvl].dev_state),
+                                   ufschd_uic_link_state_to_string(
+                                       ufs_pm_lvl_states[lvl].link_state));
+
+       return curr_len;
+}
+
+static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
+}
+
+static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+       hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
+       hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
+       sysfs_attr_init(&hba->rpm_lvl_attr.attr);
+       hba->rpm_lvl_attr.attr.name = "rpm_lvl";
+       hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
+               dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
+}
+
+static ssize_t ufshcd_spm_lvl_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       int curr_len;
+       u8 lvl;
+
+       curr_len = snprintf(buf, PAGE_SIZE,
+                           "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
+                           hba->spm_lvl,
+                           ufschd_ufs_dev_pwr_mode_to_string(
+                               ufs_pm_lvl_states[hba->spm_lvl].dev_state),
+                           ufschd_uic_link_state_to_string(
+                               ufs_pm_lvl_states[hba->spm_lvl].link_state));
+
+       curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                            "\nAll available System PM levels info:\n");
+       for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+               curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                                    "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
+                                   lvl,
+                                   ufschd_ufs_dev_pwr_mode_to_string(
+                                       ufs_pm_lvl_states[lvl].dev_state),
+                                   ufschd_uic_link_state_to_string(
+                                       ufs_pm_lvl_states[lvl].link_state));
+
+       return curr_len;
+}
+
+static ssize_t ufshcd_spm_lvl_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
+}
+
+static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+       hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
+       hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
+       sysfs_attr_init(&hba->spm_lvl_attr.attr);
+       hba->spm_lvl_attr.attr.name = "spm_lvl";
+       hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->spm_lvl_attr))
+               dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
+}
+
+static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
+                                 enum desc_idn desc_id,
+                                 u8 desc_index,
+                                 u8 param_offset,
+                                 u8 *sysfs_buf,
+                                 u8 param_size)
+{
+       u8 desc_buf[8] = {0};
+       int ret;
+
+       if (param_size > 8)
+               return -EINVAL;
+
+       pm_runtime_get_sync(hba->dev);
+       ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
+                               param_offset, desc_buf, param_size);
+       pm_runtime_put_sync(hba->dev);
+
+       if (ret)
+               return -EINVAL;
+       switch (param_size) {
+       case 1:
+               ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%02X\n", *desc_buf);
+               break;
+       case 2:
+               ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%04X\n",
+                       get_unaligned_be16(desc_buf));
+               break;
+       case 4:
+               ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%08X\n",
+                       get_unaligned_be32(desc_buf));
+               break;
+       case 8:
+               ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%016llX\n",
+                       get_unaligned_be64(desc_buf));
+               break;
+       }
+
+       return ret;
+}
+
+
+#define UFS_DESC_PARAM(_name, _puname, _duname, _size)                 \
+       static ssize_t _name##_show(struct device *dev,                 \
+               struct device_attribute *attr, char *buf)                       \
+{                                                                      \
+       struct ufs_hba *hba = dev_get_drvdata(dev);             \
+       return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
+               0, _duname##_DESC_PARAM##_puname, buf, _size);          \
+}                                                                      \
+static DEVICE_ATTR_RO(_name)
+
+#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size)                    \
+               UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
+
+UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
+UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
+UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
+
+static struct attribute *ufs_sysfs_health_descriptor[] = {
+       &dev_attr_eol_info.attr,
+       &dev_attr_life_time_estimation_a.attr,
+       &dev_attr_life_time_estimation_b.attr,
+       NULL,
+};
+
+static const struct attribute_group ufs_sysfs_health_descriptor_group = {
+       .name = "health_descriptor",
+       .attrs = ufs_sysfs_health_descriptor,
+};
+
+static const struct attribute_group *ufs_sysfs_groups[] = {
+       &ufs_sysfs_health_descriptor_group,
+       NULL,
+};
+
+
+static void ufshcd_add_desc_sysfs_nodes(struct device *dev)
+{
+       int ret;
+
+       ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
+       if (ret)
+               dev_err(dev,
+                       "%s: sysfs groups creation failed (err = %d)\n",
+                       __func__, ret);
+}
+
+static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
+{
+       ufshcd_add_rpm_lvl_sysfs_nodes(hba);
+       ufshcd_add_spm_lvl_sysfs_nodes(hba);
+       ufshcd_add_desc_sysfs_nodes(hba->dev);
+}
+
+static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+{
+       bool suspend = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_scaling.is_allowed) {
+               hba->clk_scaling.is_allowed = false;
+               suspend = true;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /**
+        * Scaling may be scheduled before, hence make sure it
+        * doesn't race with shutdown
+        */
+       if (ufshcd_is_clkscaling_supported(hba)) {
+               device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+               cancel_work_sync(&hba->clk_scaling.suspend_work);
+               cancel_work_sync(&hba->clk_scaling.resume_work);
+               if (suspend)
+                       ufshcd_suspend_clkscaling(hba);
+       }
+
+       /* Unregister so that devfreq_monitor can't race with shutdown */
+       if (hba->devfreq)
+               devfreq_remove_device(hba->devfreq);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -5378,20 +9402,88 @@ int ufshcd_shutdown(struct ufs_hba *hba)
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
 
-       if (pm_runtime_suspended(hba->dev)) {
-               ret = ufshcd_runtime_resume(hba);
-               if (ret)
-                       goto out;
-       }
+       pm_runtime_get_sync(hba->dev);
+       ufshcd_hold_all(hba);
+       ufshcd_mark_shutdown_ongoing(hba);
+       ufshcd_shutdown_clkscaling(hba);
+       /**
+        * (1) Acquire the lock to stop any more requests
+        * (2) Wait for all issued requests to complete
+        */
+       ufshcd_get_write_lock(hba);
+       ufshcd_scsi_block_requests(hba);
+       ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+       if (ret)
+               dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
+                       __func__, ret);
+       /* Requests may have errored out above, let it be handled */
+       flush_work(&hba->eh_work);
+       /* reqs issued from contexts other than shutdown will fail from now */
+       ufshcd_scsi_unblock_requests(hba);
+       ufshcd_release_all(hba);
+       ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+       if (ret)
+               dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+       /* allow force shutdown even in case of errors */
+       return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       long value;
+
+       if (kstrtol(buf, 0, &value))
+               return -EINVAL;
+       if (value == BLK_IO_LAT_HIST_ZERO) {
+               memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
+               memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
+       } else if (value == BLK_IO_LAT_HIST_ENABLE ||
+                value == BLK_IO_LAT_HIST_DISABLE)
+               hba->latency_hist_enabled = value;
+       return count;
+}
+
+ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr,
+                 char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       size_t written_bytes;
+
+       written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
+                       buf, PAGE_SIZE);
+       written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
+                       buf + written_bytes, PAGE_SIZE - written_bytes);
+
+       return written_bytes;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+                  latency_hist_show, latency_hist_store);
+
+static void
+ufshcd_init_latency_hist(struct ufs_hba *hba)
+{
+       if (device_create_file(hba->dev, &dev_attr_latency_hist))
+               dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
+}
 
-       ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
-out:
-       if (ret)
-               dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
-       /* allow force shutdown even in case of errors */
-       return 0;
+static void
+ufshcd_exit_latency_hist(struct ufs_hba *hba)
+{
+       device_create_file(hba->dev, &dev_attr_latency_hist);
 }
-EXPORT_SYMBOL(ufshcd_shutdown);
 
 /**
  * ufshcd_remove - de-allocate SCSI host and host memory space
@@ -5403,12 +9495,17 @@ void ufshcd_remove(struct ufs_hba *hba)
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
-       ufshcd_hba_stop(hba);
+       ufshcd_hba_stop(hba, true);
 
        ufshcd_exit_clk_gating(hba);
-       if (ufshcd_is_clkscaling_enabled(hba))
+       ufshcd_exit_hibern8_on_idle(hba);
+       if (ufshcd_is_clkscaling_supported(hba)) {
+               device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+               ufshcd_exit_latency_hist(hba);
                devfreq_remove_device(hba->devfreq);
+       }
        ufshcd_hba_exit(hba);
+       ufsdbg_remove_debugfs(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -5474,66 +9571,370 @@ out_error:
 }
 EXPORT_SYMBOL(ufshcd_alloc_host);
 
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+                                              bool scale_up)
 {
-       int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
 
        if (!head || list_empty(head))
-               goto out;
-
-       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
-       if (ret)
-               return ret;
+               return false;
 
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (scale_up && clki->max_freq) {
                                if (clki->curr_freq == clki->max_freq)
                                        continue;
-                               ret = clk_set_rate(clki->clk, clki->max_freq);
-                               if (ret) {
-                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-                                               __func__, clki->name,
-                                               clki->max_freq, ret);
-                                       break;
-                               }
-                               clki->curr_freq = clki->max_freq;
-
+                               return true;
                        } else if (!scale_up && clki->min_freq) {
                                if (clki->curr_freq == clki->min_freq)
                                        continue;
-                               ret = clk_set_rate(clki->clk, clki->min_freq);
-                               if (ret) {
-                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-                                               __func__, clki->name,
-                                               clki->min_freq, ret);
-                                       break;
-                               }
-                               clki->curr_freq = clki->min_freq;
+                               return true;
                        }
                }
-               dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
-                               clki->name, clk_get_rate(clki->clk));
        }
 
-       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+       return false;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+       struct ufs_pa_layer_attr new_pwr_info;
+       u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
+
+       BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
+
+       if (scale_up) {
+               memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+                      sizeof(struct ufs_pa_layer_attr));
+               /*
+                * Some UFS devices may stop responding after switching from
+                * HS-G1 to HS-G3. Also, it is found that these devices work
+                * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
+                * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
+                * quirk is enabled for such devices, this 2 steps gear switch
+                * workaround will be applied.
+                */
+               if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
+                   && (hba->pwr_info.gear_tx == UFS_HS_G1)
+                   && (new_pwr_info.gear_tx == UFS_HS_G3)) {
+                       /* scale up to G2 first */
+                       new_pwr_info.gear_tx = UFS_HS_G2;
+                       new_pwr_info.gear_rx = UFS_HS_G2;
+                       ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+                       if (ret)
+                               goto out;
+
+                       /* scale up to G3 now */
+                       new_pwr_info.gear_tx = UFS_HS_G3;
+                       new_pwr_info.gear_rx = UFS_HS_G3;
+                       /* now, fall through to set the HS-G3 */
+               }
+               ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+               if (ret)
+                       goto out;
+       } else {
+               memcpy(&new_pwr_info, &hba->pwr_info,
+                      sizeof(struct ufs_pa_layer_attr));
+
+               if (hba->pwr_info.gear_tx > scale_down_gear
+                   || hba->pwr_info.gear_rx > scale_down_gear) {
+                       /* save the current power mode */
+                       memcpy(&hba->clk_scaling.saved_pwr_info.info,
+                               &hba->pwr_info,
+                               sizeof(struct ufs_pa_layer_attr));
+
+                       /* scale down gear */
+                       new_pwr_info.gear_tx = scale_down_gear;
+                       new_pwr_info.gear_rx = scale_down_gear;
+                       if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
+                               new_pwr_info.pwr_tx = FASTAUTO_MODE;
+                               new_pwr_info.pwr_rx = FASTAUTO_MODE;
+                       }
+               }
+               ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+       }
 
 out:
+       if (ret)
+               dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
+                       __func__, ret,
+                       hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+                       new_pwr_info.gear_tx, new_pwr_info.gear_rx,
+                       scale_up);
+
+       return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+       #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
+       int ret = 0;
+       /*
+        * make sure that there are no outstanding requests when
+        * clock scaling is in progress
+        */
+       ufshcd_scsi_block_requests(hba);
+       down_write(&hba->lock);
+       if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+               ret = -EBUSY;
+               up_write(&hba->lock);
+               ufshcd_scsi_unblock_requests(hba);
+       }
+
+       return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+       up_write(&hba->lock);
+       ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+
+       /* let's not get into low power until clock scaling is completed */
+       hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
+       ufshcd_hold_all(hba);
+
+       ret = ufshcd_clock_scaling_prepare(hba);
+       if (ret)
+               goto out;
+
+       /* scale down the gear before scaling down clocks */
+       if (!scale_up) {
+               ret = ufshcd_scale_gear(hba, false);
+               if (ret)
+                       goto clk_scaling_unprepare;
+       }
+
+       /*
+        * If auto hibern8 is supported then put the link in
+        * hibern8 manually, this is to avoid auto hibern8
+        * racing during clock frequency scaling sequence.
+        */
+       if (ufshcd_is_auto_hibern8_supported(hba)) {
+               ret = ufshcd_uic_hibern8_enter(hba);
+               if (ret)
+                       /* link will be bad state so no need to scale_up_gear */
+                       return ret;
+       }
+
+       ret = ufshcd_scale_clks(hba, scale_up);
+       if (ret)
+               goto scale_up_gear;
+
+       if (ufshcd_is_auto_hibern8_supported(hba)) {
+               ret = ufshcd_uic_hibern8_exit(hba);
+               if (ret)
+                       /* link will be bad state so no need to scale_up_gear */
+                       return ret;
+       }
+
+       /* scale up the gear after scaling up clocks */
+       if (scale_up) {
+               ret = ufshcd_scale_gear(hba, true);
+               if (ret) {
+                       ufshcd_scale_clks(hba, false);
+                       goto clk_scaling_unprepare;
+               }
+       }
+
+       if (!ret) {
+               hba->clk_scaling.is_scaled_up = scale_up;
+               if (scale_up)
+                       hba->clk_gating.delay_ms =
+                               hba->clk_gating.delay_ms_perf;
+               else
+                       hba->clk_gating.delay_ms =
+                               hba->clk_gating.delay_ms_pwr_save;
+       }
+
+       goto clk_scaling_unprepare;
+
+scale_up_gear:
+       if (!scale_up)
+               ufshcd_scale_gear(hba, true);
+clk_scaling_unprepare:
+       ufshcd_clock_scaling_unprepare(hba);
+out:
+       hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
+       ufshcd_release_all(hba);
        return ret;
 }
 
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+       unsigned long flags;
+
+       devfreq_suspend_device(hba->devfreq);
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_scaling.window_start_t = 0;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+       unsigned long flags;
+       bool suspend = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (!hba->clk_scaling.is_suspended) {
+               suspend = true;
+               hba->clk_scaling.is_suspended = true;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (suspend)
+               __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+       unsigned long flags;
+       bool resume = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_scaling.is_suspended) {
+               resume = true;
+               hba->clk_scaling.is_suspended = false;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (resume)
+               devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       u32 value;
+       int err;
+
+       if (kstrtou32(buf, 0, &value))
+               return -EINVAL;
+
+       value = !!value;
+       if (value == hba->clk_scaling.is_allowed)
+               goto out;
+
+       pm_runtime_get_sync(hba->dev);
+       ufshcd_hold(hba, false);
+
+       cancel_work_sync(&hba->clk_scaling.suspend_work);
+       cancel_work_sync(&hba->clk_scaling.resume_work);
+
+       hba->clk_scaling.is_allowed = value;
+
+       if (value) {
+               ufshcd_resume_clkscaling(hba);
+       } else {
+               ufshcd_suspend_clkscaling(hba);
+               err = ufshcd_devfreq_scale(hba, true);
+               if (err)
+                       dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+                                       __func__, err);
+       }
+
+       ufshcd_release(hba, false);
+       pm_runtime_put_sync(hba->dev);
+out:
+       return count;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                                          clk_scaling.suspend_work);
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               return;
+       }
+       hba->clk_scaling.is_suspended = true;
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+       __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                                          clk_scaling.resume_work);
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (!hba->clk_scaling.is_suspended) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               return;
+       }
+       hba->clk_scaling.is_suspended = false;
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+       devfreq_resume_device(hba->devfreq);
+}
+
 static int ufshcd_devfreq_target(struct device *dev,
                                unsigned long *freq, u32 flags)
 {
-       int err = 0;
+       int ret = 0;
        struct ufs_hba *hba = dev_get_drvdata(dev);
-       bool release_clk_hold = false;
        unsigned long irq_flags;
+       ktime_t start;
+       bool scale_up, sched_clk_scaling_suspend_work = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return -EINVAL;
 
-       if (!ufshcd_is_clkscaling_enabled(hba))
+       if ((*freq > 0) && (*freq < UINT_MAX)) {
+               dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
                return -EINVAL;
+       }
 
        spin_lock_irqsave(hba->host->host_lock, irq_flags);
        if (ufshcd_eh_in_progress(hba)) {
@@ -5541,36 +9942,29 @@ static int ufshcd_devfreq_target(struct device *dev,
                return 0;
        }
 
-       if (ufshcd_is_clkgating_allowed(hba) &&
-           (hba->clk_gating.state != CLKS_ON)) {
-               if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
-                       /* hold the vote until the scaling work is completed */
-                       hba->clk_gating.active_reqs++;
-                       release_clk_hold = true;
-                       hba->clk_gating.state = CLKS_ON;
-               } else {
-                       /*
-                        * Clock gating work seems to be running in parallel
-                        * hence skip scaling work to avoid deadlock between
-                        * current scaling work and gating work.
-                        */
-                       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-                       return 0;
-               }
+       if (!hba->clk_scaling.active_reqs)
+               sched_clk_scaling_suspend_work = true;
+
+       scale_up = (*freq == UINT_MAX) ? true : false;
+       if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               ret = 0;
+               goto out; /* no state change required */
        }
        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
-       if (*freq == UINT_MAX)
-               err = ufshcd_scale_clks(hba, true);
-       else if (*freq == 0)
-               err = ufshcd_scale_clks(hba, false);
+       start = ktime_get();
+       ret = ufshcd_devfreq_scale(hba, scale_up);
+       trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+               (scale_up ? "up" : "down"),
+               ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (release_clk_hold)
-               __ufshcd_release(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+out:
+       if (sched_clk_scaling_suspend_work)
+               queue_work(hba->clk_scaling.workq,
+                          &hba->clk_scaling.suspend_work);
 
-       return err;
+       return ret;
 }
 
 static int ufshcd_devfreq_get_dev_status(struct device *dev,
@@ -5580,7 +9974,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
        unsigned long flags;
 
-       if (!ufshcd_is_clkscaling_enabled(hba))
+       if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
 
        memset(stat, 0, sizeof(*stat));
@@ -5611,12 +10005,31 @@ start_window:
        return 0;
 }
 
-static struct devfreq_dev_profile ufs_devfreq_profile = {
-       .polling_ms     = 100,
-       .target         = ufshcd_devfreq_target,
-       .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+       hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+       hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+       sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+       hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+       hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
 
+static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
+{
+       struct device *dev = hba->dev;
+       int ret;
+
+       ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
+               &hba->lanes_per_direction);
+       if (ret) {
+               dev_dbg(hba->dev,
+                       "%s: failed to read lanes-per-direction, ret=%d\n",
+                       __func__, ret);
+               hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
+       }
+}
 /**
  * ufshcd_init - Driver initialization routine
  * @hba: per-adapter instance
@@ -5640,6 +10053,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        hba->mmio_base = mmio_base;
        hba->irq = irq;
 
+       ufshcd_init_lanes_per_dir(hba);
+
        err = ufshcd_hba_init(hba);
        if (err)
                goto out_error;
@@ -5650,9 +10065,20 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
 
+       /* print error message if ufs_version is not valid */
+       if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+           (hba->ufs_version != UFSHCI_VERSION_11) &&
+           (hba->ufs_version != UFSHCI_VERSION_20) &&
+           (hba->ufs_version != UFSHCI_VERSION_21))
+               dev_err(hba->dev, "invalid UFS version 0x%x\n",
+                       hba->ufs_version);
+
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
 
+       /* Enable debug prints */
+       hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
+
        err = ufshcd_set_dma_mask(hba);
        if (err) {
                dev_err(hba->dev, "set dma mask failed\n");
@@ -5676,6 +10102,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        host->max_channel = UFSHCD_MAX_CHANNEL;
        host->unique_id = host->host_no;
        host->max_cmd_len = MAX_CDB_SIZE;
+       host->set_dbd_for_caching = 1;
 
        hba->max_pwr_info.is_valid = false;
 
@@ -5686,6 +10113,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Initialize work queues */
        INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+       INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
 
        /* Initialize UIC command mutex */
        mutex_init(&hba->uic_cmd_mutex);
@@ -5693,10 +10121,28 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Initialize mutex for device management commands */
        mutex_init(&hba->dev_cmd.lock);
 
+       init_rwsem(&hba->lock);
+
        /* Initialize device management tag acquire wait queue */
        init_waitqueue_head(&hba->dev_cmd.tag_wq);
 
        ufshcd_init_clk_gating(hba);
+       ufshcd_init_hibern8_on_idle(hba);
+
+       /*
+        * In order to avoid any spurious interrupt immediately after
+        * registering UFS controller interrupt handler, clear any pending UFS
+        * interrupt status and disable all the UFS interrupts.
+        */
+       ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+                     REG_INTERRUPT_STATUS);
+       ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+       /*
+        * Make sure that UFS interrupts are disabled and any pending interrupt
+        * status is cleared before registering UFS interrupt handler.
+        */
+       mb();
+
        /* IRQ registration */
        err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
@@ -5712,43 +10158,82 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                goto exit_gating;
        }
 
+       /* Reset controller to power on reset (POR) state */
+       ufshcd_vops_full_reset(hba);
+
+       /* reset connected UFS device */
+       err = ufshcd_reset_device(hba);
+       if (err)
+               dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+                        __func__, err);
+
        /* Host controller enable */
        err = ufshcd_hba_enable(hba);
        if (err) {
                dev_err(hba->dev, "Host controller enable failed\n");
+               ufshcd_print_host_regs(hba);
+               ufshcd_print_host_state(hba);
                goto out_remove_scsi_host;
        }
 
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
-                                                  "simple_ondemand", NULL);
-               if (IS_ERR(hba->devfreq)) {
-                       dev_err(hba->dev, "Unable to register with devfreq %ld\n",
-                                       PTR_ERR(hba->devfreq));
-                       goto out_remove_scsi_host;
-               }
-               /* Suspend devfreq until the UFS device is detected */
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
+       if (ufshcd_is_clkscaling_supported(hba)) {
+               char wq_name[sizeof("ufs_clkscaling_00")];
+
+               INIT_WORK(&hba->clk_scaling.suspend_work,
+                         ufshcd_clk_scaling_suspend_work);
+               INIT_WORK(&hba->clk_scaling.resume_work,
+                         ufshcd_clk_scaling_resume_work);
+
+               snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+                        host->host_no);
+               hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+               ufshcd_clkscaling_init_sysfs(hba);
        }
 
+       /*
+        * If rpm_lvl and and spm_lvl are not already set to valid levels,
+        * set the default power management level for UFS runtime and system
+        * suspend. Default power saving mode selected is keeping UFS link in
+        * Hibern8 state and UFS device in sleep.
+        */
+       if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
+               hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+                                                       UFS_SLEEP_PWR_MODE,
+                                                       UIC_LINK_HIBERN8_STATE);
+       if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
+               hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+                                                       UFS_SLEEP_PWR_MODE,
+                                                       UIC_LINK_HIBERN8_STATE);
+
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
 
+       ufshcd_init_latency_hist(hba);
+
        /*
-        * The device-initialize-sequence hasn't been invoked yet.
-        * Set the device to power-off state
+        * We are assuming that device wasn't put in sleep/power-down
+        * state exclusively during the boot stage before kernel.
+        * This assumption helps avoid doing link startup twice during
+        * ufshcd_probe_hba().
         */
-       ufshcd_set_ufs_dev_poweroff(hba);
+       ufshcd_set_ufs_dev_active(hba);
+
+       ufshcd_cmd_log_init(hba);
 
        async_schedule(ufshcd_async_scan, hba);
 
+       ufsdbg_add_debugfs(hba);
+
+       ufshcd_add_sysfs_nodes(hba);
+
        return 0;
 
 out_remove_scsi_host:
        scsi_remove_host(hba->host);
 exit_gating:
        ufshcd_exit_clk_gating(hba);
+       ufshcd_exit_latency_hist(hba);
 out_disable:
        hba->is_irq_enabled = false;
        ufshcd_hba_exit(hba);