OSDN Git Service

Merge android-4.4-p.198 (dbd0162) into msm-4.4
authorSrinivasarao P <spathi@codeaurora.org>
Wed, 30 Oct 2019 11:10:13 +0000 (16:40 +0530)
committerSrinivasarao P <spathi@codeaurora.org>
Wed, 30 Oct 2019 11:11:31 +0000 (16:41 +0530)
* refs/heads/tmp-dbd0162
  Linux 4.4.198
  RDMA/cxgb4: Do not dma memory off of the stack
  net: sched: Fix memory exposure from short TCA_U32_SEL
  PCI: PM: Fix pci_power_up()
  xen/netback: fix error path of xenvif_connect_data()
  cpufreq: Avoid cpufreq_suspend() deadlock on system shutdown
  memstick: jmb38x_ms: Fix an error handling path in 'jmb38x_ms_probe()'
  btrfs: block-group: Fix a memory leak due to missing btrfs_put_block_group()
  CIFS: avoid using MID 0xFFFF
  parisc: Fix vmap memory leak in ioremap()/iounmap()
  xtensa: drop EXPORT_SYMBOL for outs*/ins*
  mm/slub: fix a deadlock in show_slab_objects()
  scsi: zfcp: fix reaction on bit error threshold notification
  drm/edid: Add 6 bpc quirk for SDC panel in Lenovo G50
  mac80211: Reject malformed SSID elements
  cfg80211: wext: avoid copying malformed SSIDs
  ASoC: rsnd: Reinitialize bit clock inversion flag for every format setting
  scsi: core: try to get module before removing device
  USB: ldusb: fix read info leaks
  USB: usblp: fix use-after-free on disconnect
  USB: ldusb: fix memleak on disconnect
  USB: serial: ti_usb_3410_5052: fix port-close races
  usb: udc: lpc32xx: fix bad bit shift operation
  USB: legousbtower: fix memleak on disconnect
  memfd: Fix locking when tagging pins
  ipv4: Return -ENETUNREACH if we can't create route but saddr is valid
  net: avoid potential infinite loop in tc_ctl_action()
  sctp: change sctp_prot .no_autobind with true
  net: bcmgenet: Set phydev->dev_flags only for internal PHYs
  net: bcmgenet: Fix RGMII_MODE_EN value for GENET v1/2/3
  loop: Add LOOP_SET_DIRECT_IO to compat ioctl
  namespace: fix namespace.pl script to support relative paths
  net: hisilicon: Fix usage of uninitialized variable in function mdio_sc_cfg_reg_write()
  mips: Loongson: Fix the link time qualifier of 'serial_exit()'
  nl80211: fix null pointer dereference
  ARM: dts: am4372: Set memory bandwidth limit for DISPC
  ARM: OMAP2+: Fix missing reset done flag for am3 and am43
  scsi: qla2xxx: Fix unbound sleep in fcport delete path.
  scsi: megaraid: disable device when probe failed after enabled device
  scsi: ufs: skip shutdown if hba is not powered
  rtlwifi: Fix potential overflow on P2P code
  ANDROID: clang: update to 9.0.8 based on r365631c
  ANDROID: move up spin_unlock_bh() ahead of remove_proc_entry()
  ANDROID: refactor build.config files to remove duplication

Conflicts:
drivers/block/loop.c

Change-Id: I68d2106c6480b9a2573f31302b0c75922f427732
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
12 files changed:
1  2 
Makefile
drivers/base/core.c
drivers/block/loop.c
drivers/cpufreq/cpufreq.c
drivers/gpu/drm/drm_edid.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/ufs/ufshcd.c
mm/shmem.c
mm/slub.c
net/mac80211/mlme.c
net/netfilter/xt_quota2.c
net/wireless/nl80211.c

diff --combined Makefile
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 4
  PATCHLEVEL = 4
- SUBLEVEL = 197
+ SUBLEVEL = 198
  EXTRAVERSION =
  NAME = Blurry Fish Butt
  
@@@ -343,7 -343,7 +343,7 @@@ include scripts/Kbuild.includ
  # Make variables (CC, etc...)
  AS            = $(CROSS_COMPILE)as
  LD            = $(CROSS_COMPILE)ld
 -CC            = $(CROSS_COMPILE)gcc
 +REAL_CC               = $(CROSS_COMPILE)gcc
  CPP           = $(CC) -E
  AR            = $(CROSS_COMPILE)ar
  NM            = $(CROSS_COMPILE)nm
@@@ -358,10 -358,6 +358,10 @@@ PERL             = per
  PYTHON                = python
  CHECK         = sparse
  
 +# Use the wrapper for the compiler.  This wrapper scans for new
 +# warnings and causes the build to stop upon encountering them.
 +CC            = $(PYTHON) $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
 +
  CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
                  -Wbitwise -Wno-return-void $(CF)
  CFLAGS_MODULE   =
@@@ -399,9 -395,7 +399,9 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Wno-format-security \
                   -std=gnu89 $(call cc-option,-fno-PIE)
  
 -
 +ifeq ($(TARGET_BOARD_TYPE),auto)
 +KBUILD_CFLAGS    += -DCONFIG_PLATFORM_AUTO
 +endif
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
  KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
@@@ -421,7 -415,7 +421,7 @@@ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MOD
  
  export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
  export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
 -export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
 +export CFLAGS_KASAN CFLAGS_UBSAN CFLAGS_KASAN_NOSANITIZE
  export CFLAGS_KCOV
  export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
  export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
@@@ -617,11 -611,8 +617,11 @@@ all: vmlinu
  
  ifeq ($(cc-name),clang)
  ifneq ($(CROSS_COMPILE),)
 -CLANG_TRIPLE    ?= $(CROSS_COMPILE)
 +CLANG_TRIPLE  ?= $(CROSS_COMPILE)
  CLANG_TARGET  := --target=$(notdir $(CLANG_TRIPLE:%-=%))
 +ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_TARGET)), y)
 +$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
 +endif
  GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
  CLANG_PREFIX  := --prefix=$(GCC_TOOLCHAIN_DIR)
  GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
@@@ -849,7 -840,6 +849,7 @@@ KBUILD_ARFLAGS := $(call ar-option,D
  
  include scripts/Makefile.kasan
  include scripts/Makefile.extrawarn
 +include scripts/Makefile.ubsan
  
  # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
  # last assignments
diff --combined drivers/base/core.c
@@@ -10,6 -10,7 +10,7 @@@
   *
   */
  
+ #include <linux/cpufreq.h>
  #include <linux/device.h>
  #include <linux/err.h>
  #include <linux/fwnode.h>
@@@ -72,11 -73,6 +73,11 @@@ int lock_device_hotplug_sysfs(void
        return restart_syscall();
  }
  
 +void lock_device_hotplug_assert(void)
 +{
 +      lockdep_assert_held(&device_hotplug_lock);
 +}
 +
  #ifdef CONFIG_BLOCK
  static inline int device_is_not_partition(struct device *dev)
  {
@@@ -2129,6 -2125,8 +2130,8 @@@ void device_shutdown(void
  {
        struct device *dev, *parent;
  
+       cpufreq_suspend();
        spin_lock(&devices_kset->list_lock);
        /*
         * Walk the devices list backward, shutting down each in turn.
diff --combined drivers/block/loop.c
@@@ -1070,7 -1070,6 +1070,7 @@@ static int loop_clr_fd(struct loop_devi
        memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
        memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
        memset(lo->lo_file_name, 0, LO_NAME_SIZE);
 +      blk_queue_logical_block_size(lo->lo_queue, 512);
        if (bdev) {
                bdput(bdev);
                invalidate_bdev(bdev);
@@@ -1121,12 -1120,6 +1121,12 @@@ loop_set_status(struct loop_device *lo
        if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
                return -EINVAL;
  
 +      if (lo->lo_offset != info->lo_offset ||
 +          lo->lo_sizelimit != info->lo_sizelimit) {
 +              sync_blockdev(lo->lo_device);
 +              kill_bdev(lo->lo_device);
 +      }
 +
        /* I/O need to be drained during transfer transition */
        blk_mq_freeze_queue(lo->lo_queue);
  
                goto exit;
  
        if (lo->lo_offset != info->lo_offset ||
 -          lo->lo_sizelimit != info->lo_sizelimit)
 +          lo->lo_sizelimit != info->lo_sizelimit) {
 +              /* kill_bdev should have truncated all the pages */
 +              if (lo->lo_device->bd_inode->i_mapping->nrpages) {
 +                      err = -EAGAIN;
 +                      pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
 +                              __func__, lo->lo_number, lo->lo_file_name,
 +                              lo->lo_device->bd_inode->i_mapping->nrpages);
 +                      goto exit;
 +              }
                if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
                        err = -EFBIG;
                        goto exit;
                }
 +      }
  
        loop_config_discard(lo);
  
@@@ -1371,41 -1355,6 +1371,41 @@@ static int loop_set_dio(struct loop_dev
        return error;
  }
  
 +static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
 +{
 +      int err = 0;
 +
 +      if (lo->lo_state != Lo_bound)
 +              return -ENXIO;
 +
 +      if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
 +              return -EINVAL;
 +
 +      if (lo->lo_queue->limits.logical_block_size != arg) {
 +              sync_blockdev(lo->lo_device);
 +              kill_bdev(lo->lo_device);
 +      }
 +
 +      blk_mq_freeze_queue(lo->lo_queue);
 +
 +      /* kill_bdev should have truncated all the pages */
 +      if (lo->lo_queue->limits.logical_block_size != arg &&
 +                      lo->lo_device->bd_inode->i_mapping->nrpages) {
 +              err = -EAGAIN;
 +              pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
 +                      __func__, lo->lo_number, lo->lo_file_name,
 +                      lo->lo_device->bd_inode->i_mapping->nrpages);
 +              goto out_unfreeze;
 +      }
 +
 +      blk_queue_logical_block_size(lo->lo_queue, arg);
 +      loop_update_dio(lo);
 +out_unfreeze:
 +      blk_mq_unfreeze_queue(lo->lo_queue);
 +
 +      return err;
 +}
 +
  static int lo_ioctl(struct block_device *bdev, fmode_t mode,
        unsigned int cmd, unsigned long arg)
  {
                if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
                        err = loop_set_dio(lo, arg);
                break;
 +      case LOOP_SET_BLOCK_SIZE:
 +              err = -EPERM;
 +              if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
 +                      err = loop_set_block_size(lo, arg);
 +              break;
        default:
                err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
        }
@@@ -1613,7 -1557,7 +1613,8 @@@ static int lo_compat_ioctl(struct block
                arg = (unsigned long) compat_ptr(arg);
        case LOOP_SET_FD:
        case LOOP_CHANGE_FD:
 +      case LOOP_SET_BLOCK_SIZE:
+       case LOOP_SET_DIRECT_IO:
                err = lo_ioctl(bdev, mode, cmd, arg);
                break;
        default:
@@@ -1846,7 -1790,6 +1847,7 @@@ static int loop_add(struct loop_device 
        }
        lo->lo_queue->queuedata = lo;
  
 +      blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
        /*
         * It doesn't make sense to enable merge because the I/O
         * submitted to backing file is handled page by page.
@@@ -131,7 -131,6 +131,7 @@@ static void handle_update(struct work_s
   */
  static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  static struct srcu_notifier_head cpufreq_transition_notifier_list;
 +struct atomic_notifier_head cpufreq_govinfo_notifier_list;
  
  static bool init_cpufreq_transition_notifier_list_called;
  static int __init init_cpufreq_transition_notifier_list(void)
  }
  pure_initcall(init_cpufreq_transition_notifier_list);
  
 +static bool init_cpufreq_govinfo_notifier_list_called;
 +static int __init init_cpufreq_govinfo_notifier_list(void)
 +{
 +      ATOMIC_INIT_NOTIFIER_HEAD(&cpufreq_govinfo_notifier_list);
 +      init_cpufreq_govinfo_notifier_list_called = true;
 +      return 0;
 +}
 +pure_initcall(init_cpufreq_govinfo_notifier_list);
 +
  static int off __read_mostly;
  static int cpufreq_disabled(void)
  {
@@@ -1109,8 -1099,7 +1109,8 @@@ static int cpufreq_add_policy_cpu(struc
        if (has_target()) {
                ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
                if (ret) {
 -                      pr_err("%s: Failed to stop governor\n", __func__);
 +                      pr_err("%s: Failed to stop governor for CPU%u, policy CPU%u\n",
 +                             __func__, cpu, policy->cpu);
                        return ret;
                }
        }
                        ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
  
                if (ret) {
 -                      pr_err("%s: Failed to start governor\n", __func__);
 +                      pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
 +                             __func__, cpu, policy->cpu);
                        return ret;
                }
        }
@@@ -1440,8 -1428,7 +1440,8 @@@ static void cpufreq_offline_prepare(uns
        if (has_target()) {
                int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
                if (ret)
 -                      pr_err("%s: Failed to stop governor\n", __func__);
 +                      pr_err("%s: Failed to stop governor for CPU%u\n",
 +                             __func__, cpu);
        }
  
        down_write(&policy->rwsem);
@@@ -1491,8 -1478,7 +1491,8 @@@ static void cpufreq_offline_finish(unsi
        if (has_target()) {
                int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
                if (ret)
 -                      pr_err("%s: Failed to exit governor\n", __func__);
 +                      pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
 +                             __func__, cpu, policy->cpu);
        }
  
        /*
@@@ -1821,8 -1807,7 +1821,8 @@@ int cpufreq_register_notifier(struct no
        if (cpufreq_disabled())
                return -EINVAL;
  
 -      WARN_ON(!init_cpufreq_transition_notifier_list_called);
 +      WARN_ON(!init_cpufreq_transition_notifier_list_called ||
 +              !init_cpufreq_govinfo_notifier_list_called);
  
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
                ret = blocking_notifier_chain_register(
                                &cpufreq_policy_notifier_list, nb);
                break;
 +      case CPUFREQ_GOVINFO_NOTIFIER:
 +              ret = atomic_notifier_chain_register(
 +                              &cpufreq_govinfo_notifier_list, nb);
 +              break;
        default:
                ret = -EINVAL;
        }
@@@ -1871,10 -1852,6 +1871,10 @@@ int cpufreq_unregister_notifier(struct 
                ret = blocking_notifier_chain_unregister(
                                &cpufreq_policy_notifier_list, nb);
                break;
 +      case CPUFREQ_GOVINFO_NOTIFIER:
 +              ret = atomic_notifier_chain_unregister(
 +                              &cpufreq_govinfo_notifier_list, nb);
 +              break;
        default:
                ret = -EINVAL;
        }
@@@ -1987,6 -1964,15 +1987,6 @@@ int __cpufreq_driver_target(struct cpuf
        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
                 policy->cpu, target_freq, relation, old_target_freq);
  
 -      /*
 -       * This might look like a redundant call as we are checking it again
 -       * after finding index. But it is left intentionally for cases where
 -       * exactly same freq is called again and so we can save on few function
 -       * calls.
 -       */
 -      if (target_freq == policy->cur)
 -              return 0;
 -
        /* Save last value to restore later on errors */
        policy->restore_freq = policy->cur;
  
@@@ -2375,9 -2361,6 +2375,9 @@@ static int cpufreq_cpu_callback(struct 
  {
        unsigned int cpu = (unsigned long)hcpu;
  
 +      if (!cpufreq_driver)
 +              return NOTIFY_OK;
 +
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
                cpufreq_online(cpu);
@@@ -2544,9 -2527,6 +2544,9 @@@ int cpufreq_register_driver(struct cpuf
  
        pr_debug("trying to register driver %s\n", driver_data->name);
  
 +      /* Register for hotplug notifers before blocking hotplug. */
 +      register_hotcpu_notifier(&cpufreq_cpu_notifier);
 +
        /* Protect against concurrent CPU online/offline. */
        get_online_cpus();
  
                goto err_if_unreg;
        }
  
 -      register_hotcpu_notifier(&cpufreq_cpu_notifier);
 -      pr_debug("driver %s up and running\n", driver_data->name);
 +      pr_info("driver %s up and running\n", driver_data->name);
  
  out:
        put_online_cpus();
@@@ -2612,7 -2593,7 +2612,7 @@@ int cpufreq_unregister_driver(struct cp
        if (!cpufreq_driver || (driver != cpufreq_driver))
                return -EINVAL;
  
 -      pr_debug("unregistering driver %s\n", driver->name);
 +      pr_info("unregistering driver %s\n", driver->name);
  
        /* Protect against concurrent cpu hotplug */
        get_online_cpus();
  }
  EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
  
- /*
-  * Stop cpufreq at shutdown to make sure it isn't holding any locks
-  * or mutexes when secondary CPUs are halted.
-  */
- static struct syscore_ops cpufreq_syscore_ops = {
-       .shutdown = cpufreq_suspend,
- };
  struct kobject *cpufreq_global_kobject;
  EXPORT_SYMBOL(cpufreq_global_kobject);
  
@@@ -2650,8 -2623,6 +2642,6 @@@ static int __init cpufreq_core_init(voi
        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
        BUG_ON(!cpufreq_global_kobject);
  
-       register_syscore_ops(&cpufreq_syscore_ops);
        return 0;
  }
  core_initcall(cpufreq_core_init);
@@@ -91,14 -91,6 +91,14 @@@ struct detailed_mode_closure 
  #define LEVEL_GTF2    2
  #define LEVEL_CVT     3
  
 +/*Enum storing luminance types for HDR blocks in EDID*/
 +enum luminance_value {
 +      NO_LUMINANCE_DATA = 3,
 +      MAXIMUM_LUMINANCE = 4,
 +      FRAME_AVERAGE_LUMINANCE = 5,
 +      MINIMUM_LUMINANCE = 6
 +};
 +
  static struct edid_quirk {
        char vendor[4];
        int product_id;
        /* Medion MD 30217 PG */
        { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
  
+       /* Lenovo G50 */
+       { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
        /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
        { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
  
@@@ -1005,224 -1000,9 +1008,224 @@@ static const struct drm_display_mode ed
         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 64 - 1920x1080@100Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
 -                 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 +                 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 +      /* 65 - 1280x720@24Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
 +                 3080, 3300, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 66 - 1280x720@25Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
 +                 3740, 3960, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 67 - 1280x720@30Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
 +                 3080, 3300, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 68 - 1280x720@50Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
 +                 1760, 1980, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 69 - 1280x720@60Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
 +                 1430, 1650, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 70 - 1280x720@100Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
 +                 1760, 1980, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 71 - 1280x720@120Hz */
 +      { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
 +                 1430, 1650, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 72 - 1920x1080@24Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
 +                 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 73 - 1920x1080@25Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 +                 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 74 - 1920x1080@30Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 +                 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 75 - 1920x1080@50Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 +                 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 76 - 1920x1080@60Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 +                 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 77 - 1920x1080@100Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
 +                 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 78 - 1920x1080@120Hz */
 +      { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
 +                 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 79 - 1680x720@24Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
 +                 3080, 3300, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 80 - 1680x720@25Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
 +                 2948, 3168, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 81 - 1680x720@30Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
 +                 2420, 2640, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 82 - 1680x720@50Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
 +                 1980, 2200, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 83 - 1680x720@60Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
 +                 1980, 2200, 0, 720, 725, 730, 750, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 84 - 1680x720@100Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
 +                 1780, 2000, 0, 720, 725, 730, 825, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 85 - 1680x720@120Hz */
 +      { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
 +                 1780, 2000, 0, 720, 725, 730, 825, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 86 - 2560x1080@24Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
 +                 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 87 - 2560x1080@25Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
 +                 3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 88 - 2560x1080@30Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
 +                 3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 89 - 2560x1080@50Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
 +                 3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 90 - 2560x1080@60Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
 +                 2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 91 - 2560x1080@100Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
 +                 2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 92 - 2560x1080@120Hz */
 +      { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
 +                 3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 93 - 3840x2160p@24Hz 16:9 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
 +                 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 +      /* 94 - 3840x2160p@25Hz 16:9 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
 +                 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 +      /* 95 - 3840x2160p@30Hz 16:9 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
 +                 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 +      /* 96 - 3840x2160p@50Hz 16:9 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
 +                 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 +      /* 97 - 3840x2160p@60Hz 16:9 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
 +                 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 +      /* 98 - 4096x2160p@24Hz 256:135 */
 +      { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
 +                 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
 +      /* 99 - 4096x2160p@25Hz 256:135 */
 +      { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
 +                 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
 +      /* 100 - 4096x2160p@30Hz 256:135 */
 +      { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
 +                 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
 +      /* 101 - 4096x2160p@50Hz 256:135 */
 +      { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
 +                 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
 +      /* 102 - 4096x2160p@60Hz 256:135 */
 +      { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
 +                 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
 +      /* 103 - 3840x2160p@24Hz 64:27 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
 +                 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 104 - 3840x2160p@25Hz 64:27 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
 +                 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 105 - 3840x2160p@30Hz 64:27 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
 +                 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 106 - 3840x2160p@50Hz 64:27 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
 +                 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
 +      /* 107 - 3840x2160p@60Hz 64:27 */
 +      { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
 +                 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
 +                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 +        .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
  };
  
  /*
@@@ -1501,10 -1281,6 +1504,10 @@@ drm_do_probe_ddc_edid(void *data, u8 *b
   * level, drivers must make all reasonable efforts to expose it as an I2C
   * adapter and use drm_get_edid() instead of abusing this function.
   *
 + * The EDID may be overridden using debugfs override_edid or firmare EDID
 + * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
 + * order. Having either of them bypasses actual EDID reads.
 + *
   * Return: Pointer to valid EDID or NULL if we couldn't find any.
   */
  struct edid *drm_do_get_edid(struct drm_connector *connector,
        int i, j = 0, valid_extensions = 0;
        u8 *block, *new;
        bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 +      struct edid *override = NULL;
 +
 +      if (connector->override_edid)
 +              override = drm_edid_duplicate((const struct edid *)
 +                                            connector->edid_blob_ptr->data);
 +
 +      if (!override)
 +              override = drm_load_edid_firmware(connector);
 +
 +      if (!IS_ERR_OR_NULL(override))
 +              return override;
  
        if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
                return NULL;
@@@ -2728,16 -2493,12 +2731,16 @@@ add_detailed_modes(struct drm_connecto
  
        return closure.modes;
  }
 -
 +#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
  #define AUDIO_BLOCK   0x01
  #define VIDEO_BLOCK     0x02
  #define VENDOR_BLOCK    0x03
  #define SPEAKER_BLOCK 0x04
 +#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
 +#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05
 +#define EXTENDED_TAG  0x07
  #define VIDEO_CAPABILITY_BLOCK        0x07
 +#define Y420_VIDEO_DATA_BLOCK 0x0E
  #define EDID_BASIC_AUDIO      (1 << 6)
  #define EDID_CEA_YCRCB444     (1 << 5)
  #define EDID_CEA_YCRCB422     (1 << 4)
@@@ -3326,21 -3087,6 +3329,21 @@@ static bool cea_db_is_hdmi_vsdb(const u
        return hdmi_id == HDMI_IEEE_OUI;
  }
  
 +static bool cea_db_is_hdmi_hf_vsdb(const u8 *db)
 +{
 +      int hdmi_id;
 +
 +      if (cea_db_tag(db) != VENDOR_BLOCK)
 +              return false;
 +
 +      if (cea_db_payload_len(db) < 7)
 +              return false;
 +
 +      hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
 +
 +      return hdmi_id == HDMI_IEEE_OUI_HF;
 +}
 +
  #define for_each_cea_db(cea, i, start, end) \
        for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
  
@@@ -3464,311 -3210,6 +3467,311 @@@ parse_hdmi_vsdb(struct drm_connector *c
  }
  
  static void
 +parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
 +{
 +      u8 len = cea_db_payload_len(db);
 +
 +      if (len < 7)
 +              return;
 +
 +      if (db[4] != 1)
 +              return; /* invalid version */
 +
 +      connector->max_tmds_char = db[5] * 5;
 +      connector->scdc_present = db[6] & (1 << 7);
 +      connector->rr_capable = db[6] & (1 << 6);
 +      connector->flags_3d = db[6] & 0x7;
 +      connector->supports_scramble = connector->scdc_present &&
 +                      (db[6] & (1 << 3));
 +
 +      DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, "
 +                      "scdc %s, "
 +                      "rr %s, "
 +                      "3D flags 0x%x, "
 +                      "scramble %s\n",
 +                      connector->max_tmds_char,
 +                      connector->scdc_present ? "available" : "not available",
 +                      connector->rr_capable ? "capable" : "not capable",
 +                      connector->flags_3d,
 +                      connector->supports_scramble ?
 +                              "supported" : "not supported");
 +}
 +
 +static void
 +drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid)
 +{
 +      const u8 *cea = drm_find_cea_extension(edid);
 +      const u8 *db = NULL;
 +
 +      if (cea && cea_revision(cea) >= 3) {
 +              int i, start, end;
 +
 +              if (cea_db_offsets(cea, &start, &end))
 +                      return;
 +
 +              for_each_cea_db(cea, i, start, end) {
 +                      db = &cea[i];
 +
 +                      if (cea_db_tag(db) == VENDOR_BLOCK) {
 +                              /* HDMI Vendor-Specific Data Block */
 +                              if (cea_db_is_hdmi_vsdb(db))
 +                                      parse_hdmi_vsdb(connector, db);
 +                              /* HDMI Forum Vendor-Specific Data Block */
 +                              else if (cea_db_is_hdmi_hf_vsdb(db))
 +                                      parse_hdmi_hf_vsdb(connector, db);
 +                      }
 +              }
 +      }
 +}
 +
 +/*
 + * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
 + * @connector: connector corresponding to the HDMI sink
 + * @db: start of the CEA vendor specific block
 + *
 + * Parses the HDMI VCDB to extract sink info for @connector.
 + */
 +static void
 +drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
 +{
 +      /*
 +       * Check if the sink specifies underscan
 +       * support for:
 +       * BIT 5: preferred video format
 +       * BIT 3: IT video format
 +       * BIT 1: CE video format
 +       */
 +
 +      connector->pt_scan_info =
 +              (db[2] & (BIT(4) | BIT(5))) >> 4;
 +      connector->it_scan_info =
 +              (db[2] & (BIT(3) | BIT(2))) >> 2;
 +      connector->ce_scan_info =
 +              db[2] & (BIT(1) | BIT(0));
 +      connector->rgb_qs =
 +              db[2] & BIT(6);
 +      connector->yuv_qs =
 +              db[2] & BIT(7);
 +
 +      DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
 +                        (int) connector->pt_scan_info,
 +                        (int) connector->it_scan_info,
 +                        (int) connector->ce_scan_info);
 +      DRM_DEBUG_KMS("rgb_quant_range_select %d", connector->rgb_qs);
 +      DRM_DEBUG_KMS("ycc_quant_range_select %d", connector->yuv_qs);
 +}
 +
 +static bool drm_edid_is_luminance_value_present(
 +u32 block_length, enum luminance_value value)
 +{
 +      return block_length > NO_LUMINANCE_DATA && value <= block_length;
 +}
 +
 +/*
 + * drm_extract_hdr_db - Parse the HDMI HDR extended block
 + * @connector: connector corresponding to the HDMI sink
 + * @db: start of the HDMI HDR extended block
 + *
 + * Parses the HDMI HDR extended block to extract sink info for @connector.
 + */
 +static void
 +drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
 +{
 +
 +      u8 len = 0;
 +
 +      if (!db) {
 +              DRM_ERROR("invalid db\n");
 +              return;
 +      }
 +
 +      len = db[0] & 0x1f;
 +      /* Byte 3: Electro-Optical Transfer Functions */
 +      connector->hdr_eotf = db[2] & 0x3F;
 +
 +      /* Byte 4: Static Metadata Descriptor Type 1 */
 +      connector->hdr_metadata_type_one = (db[3] & BIT(0));
 +
 +      /* Byte 5: Desired Content Maximum Luminance */
 +      if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
 +              connector->hdr_max_luminance =
 +                      db[MAXIMUM_LUMINANCE];
 +
 +      /* Byte 6: Desired Content Max Frame-average Luminance */
 +      if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
 +              connector->hdr_avg_luminance =
 +                      db[FRAME_AVERAGE_LUMINANCE];
 +
 +      /* Byte 7: Desired Content Min Luminance */
 +      if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
 +              connector->hdr_min_luminance =
 +                      db[MINIMUM_LUMINANCE];
 +
 +      connector->hdr_supported = true;
 +
 +      DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
 +      DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
 +      DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
 +      DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
 +      DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
 +}
 +
 +/*
 + * drm_extract_colorimetry_db - Parse the HDMI colorimetry extended block
 + * @connector: connector corresponding to the HDMI sink
 + * @db: start of the HDMI colorimetry extended block
 + *
 + * Parses the HDMI colorimetry block to extract sink info for @connector.
 + */
 +static void
 +drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db)
 +{
 +
 +      if (!db) {
 +              DRM_ERROR("invalid db\n");
 +              return;
 +      }
 +
 +      /* Bit 0: xvYCC_601 */
 +      if (db[2] & BIT(0))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_601;
 +      /* Bit 0: xvYCC_709 */
 +      if (db[2] & BIT(1))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_709;
 +      /* Bit 0: sYCC_601 */
 +      if (db[2] & BIT(2))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_sYCC_601;
 +      /* Bit 0: ADBYCC_601 */
 +      if (db[2] & BIT(3))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADBYCC_601;
 +      /* Bit 0: ADB_RGB */
 +      if (db[2] & BIT(4))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADB_RGB;
 +      /* Bit 0: BT2020_CYCC */
 +      if (db[2] & BIT(5))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_CYCC;
 +      /* Bit 0: BT2020_YCC */
 +      if (db[2] & BIT(6))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_YCC;
 +      /* Bit 0: BT2020_RGB */
 +      if (db[2] & BIT(7))
 +              connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_RGB;
 +
 +      DRM_DEBUG_KMS("colorimetry fmt 0x%x\n", connector->color_enc_fmt);
 +}
 +
 +/*
 + * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
 + * @connector: connector corresponding to the HDMI sink
 + * @edid: handle to the EDID structure
 + * Parses the all extended tag blocks extract sink info for @connector.
 + */
 +static void
 +drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
 +struct edid *edid)
 +{
 +      const u8 *cea = drm_find_cea_extension(edid);
 +      const u8 *db = NULL;
 +
 +      if (cea && cea_revision(cea) >= 3) {
 +              int i, start, end;
 +
 +              if (cea_db_offsets(cea, &start, &end))
 +                      return;
 +
 +              for_each_cea_db(cea, i, start, end) {
 +                      db = &cea[i];
 +
 +                      if (cea_db_tag(db) == EXTENDED_TAG) {
 +                              DRM_DEBUG_KMS("found extended tag block = %d\n",
 +                              db[1]);
 +                              switch (db[1]) {
 +                              case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
 +                                      drm_extract_vcdb_info(connector, db);
 +                                      break;
 +                              case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
 +                                      drm_extract_hdr_db(connector, db);
 +                                      break;
 +                              case COLORIMETRY_EXTENDED_DATA_BLOCK:
 +                                      drm_extract_clrmetry_db(connector, db);
 +                                      break;
 +                              default:
 +                                      break;
 +                              }
 +                      }
 +              }
 +      }
 +}
 +
 +static u8 *
 +drm_edid_find_extended_tag_block(struct edid *edid, int blk_id)
 +{
 +      u8 *db = NULL;
 +      u8 *cea = NULL;
 +
 +      if (!edid) {
 +              pr_err("%s: invalid input\n", __func__);
 +              return NULL;
 +      }
 +
 +      cea = drm_find_cea_extension(edid);
 +
 +      if (cea && cea_revision(cea) >= 3) {
 +              int i, start, end;
 +
 +              if (cea_db_offsets(cea, &start, &end))
 +                      return NULL;
 +
 +              for_each_cea_db(cea, i, start, end) {
 +                      db = &cea[i];
 +                      if ((cea_db_tag(db) == EXTENDED_TAG) &&
 +                              (db[1] == blk_id))
 +                              return db;
 +              }
 +      }
 +      return NULL;
 +}
 +
 +/*
 + * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block
 + * @connector: connector corresponding to the HDMI sink
 + * @edid: handle to the EDID structure
 + * Parses the YCbCr420 VDB block and adds the modes to @connector.
 + */
 +static int
 +add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid)
 +{
 +
 +      const u8 *db = NULL;
 +      u32 i = 0;
 +      u32 modes = 0;
 +      u32 video_format = 0;
 +      u8 len = 0;
 +
 +      /*Find the YCbCr420 VDB*/
 +      db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK);
 +      /* Offset to byte 3 */
 +      if (db) {
 +              len = db[0] & 0x1F;
 +              db += 2;
 +              for (i = 0; i < len - 1; i++) {
 +                      struct drm_display_mode *mode;
 +
 +                      video_format = *(db + i) & 0x7F;
 +                      mode = drm_display_mode_from_vic_index(connector,
 +                                      db, len-1, i);
 +                      if (mode) {
 +                              DRM_DEBUG_KMS("Adding mode for vic = %d\n",
 +                              video_format);
 +                              drm_mode_probed_add(connector, mode);
 +                              modes++;
 +                      }
 +              }
 +      }
 +      return modes;
 +}
 +
 +static void
  monitor_name(struct detailed_timing *t, void *data)
  {
        if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
@@@ -3846,9 -3287,6 +3849,9 @@@ void drm_edid_to_eld(struct drm_connect
                                /* HDMI Vendor-Specific Data Block */
                                if (cea_db_is_hdmi_vsdb(db))
                                        parse_hdmi_vsdb(connector, db);
 +                              /* HDMI Forum Vendor-Specific Data Block */
 +                              else if (cea_db_is_hdmi_hf_vsdb(db))
 +                                      parse_hdmi_hf_vsdb(connector, db);
                                break;
                        default:
                                break;
@@@ -4311,10 -3749,6 +4314,10 @@@ static void drm_add_display_info(struc
                        info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
        }
  
 +      /* Extract audio and video latency fields for the sink */
 +      drm_hdmi_extract_vsdbs_info(connector, edid);
 +      /* Extract info from extended tag blocks */
 +      drm_hdmi_extract_extended_blk_info(connector, edid);
        /* HDMI deep color modes supported? Assign to info, if so */
        drm_assign_hdmi_deep_color_info(edid, info, connector);
  
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
  }
  
 +static int validate_displayid(u8 *displayid, int length, int idx)
 +{
 +      int i;
 +      u8 csum = 0;
 +      struct displayid_hdr *base;
 +
 +      base = (struct displayid_hdr *)&displayid[idx];
 +
 +      DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
 +                    base->rev, base->bytes, base->prod_id, base->ext_count);
 +
 +      if (base->bytes + 5 > length - idx)
 +              return -EINVAL;
 +      for (i = idx; i <= base->bytes + 5; i++)
 +              csum += displayid[i];
 +
 +      if (csum) {
 +              DRM_ERROR("DisplayID checksum invalid, remainder is %d\n",
 +                                csum);
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +static struct drm_display_mode *
 +drm_mode_displayid_detailed(struct drm_device *dev,
 +struct displayid_detailed_timings_1 *timings)
 +{
 +      struct drm_display_mode *mode;
 +      unsigned pixel_clock = (timings->pixel_clock[0] |
 +                              (timings->pixel_clock[1] << 8) |
 +                              (timings->pixel_clock[2] << 16));
 +      unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
 +      unsigned hblank =
 +              (timings->hblank[0] |
 +              timings->hblank[1] << 8) + 1;
 +      unsigned hsync = (timings->hsync[0] |
 +                      (timings->hsync[1] & 0x7f) << 8) + 1;
 +      unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
 +      unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
 +      unsigned vblank =
 +              (timings->vblank[0] |
 +               timings->vblank[1] << 8) + 1;
 +      unsigned vsync =
 +              (timings->vsync[0] |
 +               (timings->vsync[1] & 0x7f) << 8) + 1;
 +      unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
 +      bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
 +      bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
 +
 +      mode = drm_mode_create(dev);
 +      if (!mode)
 +              return NULL;
 +
 +      mode->clock = pixel_clock * 10;
 +      mode->hdisplay = hactive;
 +      mode->hsync_start = mode->hdisplay + hsync;
 +      mode->hsync_end = mode->hsync_start + hsync_width;
 +      mode->htotal = mode->hdisplay + hblank;
 +
 +      mode->vdisplay = vactive;
 +      mode->vsync_start = mode->vdisplay + vsync;
 +      mode->vsync_end = mode->vsync_start + vsync_width;
 +      mode->vtotal = mode->vdisplay + vblank;
 +
 +      mode->flags = 0;
 +      mode->flags |= hsync_positive ?
 +                              DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
 +      mode->flags |= vsync_positive ?
 +                              DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 +      mode->type = DRM_MODE_TYPE_DRIVER;
 +
 +      if (timings->flags & 0x80)
 +              mode->type |= DRM_MODE_TYPE_PREFERRED;
 +      mode->vrefresh = drm_mode_vrefresh(mode);
 +      drm_mode_set_name(mode);
 +
 +      return mode;
 +}
 +
 +static int add_displayid_detailed_1_modes(struct drm_connector *connector,
 +                      struct displayid_block *block)
 +{
 +      struct displayid_detailed_timing_block *det =
 +              (struct displayid_detailed_timing_block *)block;
 +      int i;
 +      int num_timings;
 +      struct drm_display_mode *newmode;
 +      int num_modes = 0;
 +      /* blocks must be multiple of 20 bytes length */
 +      if (block->num_bytes % 20)
 +              return 0;
 +
 +      num_timings = block->num_bytes / 20;
 +      for (i = 0; i < num_timings; i++) {
 +              struct displayid_detailed_timings_1 *timings = &det->timings[i];
 +
 +              newmode = drm_mode_displayid_detailed(connector->dev, timings);
 +              if (!newmode)
 +                      continue;
 +
 +              drm_mode_probed_add(connector, newmode);
 +              num_modes++;
 +      }
 +      return num_modes;
 +}
 +
 +static int add_displayid_detailed_modes(struct drm_connector *connector,
 +                                      struct edid *edid)
 +{
 +      u8 *displayid;
 +      int ret;
 +      int idx = 1;
 +      int length = EDID_LENGTH;
 +      struct displayid_block *block;
 +      int num_modes = 0;
 +
 +      displayid = drm_find_displayid_extension(edid);
 +      if (!displayid)
 +              return 0;
 +
 +      ret = validate_displayid(displayid, length, idx);
 +      if (ret)
 +              return 0;
 +
 +      idx += sizeof(struct displayid_hdr);
 +      while (block = (struct displayid_block *)&displayid[idx],
 +             idx + sizeof(struct displayid_block) <= length &&
 +             idx + sizeof(struct displayid_block) +
 +                 block->num_bytes <= length &&
 +             block->num_bytes > 0) {
 +              idx += block->num_bytes + sizeof(struct displayid_block);
 +              switch (block->tag) {
 +              case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
 +                      num_modes += add_displayid_detailed_1_modes(connector,
 +                                              block);
 +                      break;
 +              }
 +      }
 +      return num_modes;
 +}
 +
  /**
   * drm_add_edid_modes - add modes from EDID data, if available
   * @connector: connector we're probing
@@@ -4544,8 -3836,6 +4547,8 @@@ int drm_add_edid_modes(struct drm_conne
        num_modes += add_established_modes(connector, edid);
        num_modes += add_cea_modes(connector, edid);
        num_modes += add_alternate_cea_modes(connector, edid);
 +      num_modes += add_displayid_detailed_modes(connector, edid);
 +      num_modes += add_YCbCr420VDB_modes(connector, edid);
        if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
                num_modes += add_inferred_modes(connector, edid);
  
@@@ -4758,105 -4048,96 +4761,105 @@@ drm_hdmi_vendor_infoframe_from_display_
  }
  EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
  
 +static int drm_parse_tiled_block(struct drm_connector *connector,
 +                               struct displayid_block *block)
 +{
 +      struct displayid_tiled_block *tile =
 +              (struct displayid_tiled_block *)block;
 +      u16 w, h;
 +      u8 tile_v_loc, tile_h_loc;
 +      u8 num_v_tile, num_h_tile;
 +      struct drm_tile_group *tg;
 +
 +      w = tile->tile_size[0] | tile->tile_size[1] << 8;
 +      h = tile->tile_size[2] | tile->tile_size[3] << 8;
 +
 +      num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
 +      num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
 +      tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
 +      tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
 +
 +      connector->has_tile = true;
 +      if (tile->tile_cap & 0x80)
 +              connector->tile_is_single_monitor = true;
 +
 +      connector->num_h_tile = num_h_tile + 1;
 +      connector->num_v_tile = num_v_tile + 1;
 +      connector->tile_h_loc = tile_h_loc;
 +      connector->tile_v_loc = tile_v_loc;
 +      connector->tile_h_size = w + 1;
 +      connector->tile_v_size = h + 1;
 +
 +      DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
 +      DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
 +      DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
 +                    num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
 +      DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0],
 +                                tile->topology_id[1], tile->topology_id[2]);
 +
 +      tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
 +      if (!tg)
 +              tg = drm_mode_create_tile_group(connector->dev,
 +                              tile->topology_id);
 +
 +      if (!tg)
 +              return -ENOMEM;
 +
 +      if (connector->tile_group != tg) {
 +              /* if we haven't got a pointer,
 +               * take the reference, drop ref to old tile group
 +               */
 +              if (connector->tile_group)
 +                      drm_mode_put_tile_group(connector->dev,
 +                      connector->tile_group);
 +
 +              connector->tile_group = tg;
 +      } else
 +              /* if same tile group, then release the ref we just took. */
 +              drm_mode_put_tile_group(connector->dev, tg);
 +      return 0;
 +}
 +
  static int drm_parse_display_id(struct drm_connector *connector,
                                u8 *displayid, int length,
                                bool is_edid_extension)
  {
        /* if this is an EDID extension the first byte will be 0x70 */
        int idx = 0;
 -      struct displayid_hdr *base;
        struct displayid_block *block;
 -      u8 csum = 0;
 -      int i;
 +      int ret;
  
        if (is_edid_extension)
                idx = 1;
  
 -      base = (struct displayid_hdr *)&displayid[idx];
 -
 -      DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
 -                    base->rev, base->bytes, base->prod_id, base->ext_count);
 -
 -      if (base->bytes + 5 > length - idx)
 -              return -EINVAL;
 -
 -      for (i = idx; i <= base->bytes + 5; i++) {
 -              csum += displayid[i];
 -      }
 -      if (csum) {
 -              DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
 -              return -EINVAL;
 -      }
 +      ret = validate_displayid(displayid, length, idx);
 +      if (ret)
 +              return ret;
  
 -      block = (struct displayid_block *)&displayid[idx + 4];
 -      DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
 -                    block->tag, block->rev, block->num_bytes);
 -
 -      switch (block->tag) {
 -      case DATA_BLOCK_TILED_DISPLAY: {
 -              struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
 -
 -              u16 w, h;
 -              u8 tile_v_loc, tile_h_loc;
 -              u8 num_v_tile, num_h_tile;
 -              struct drm_tile_group *tg;
 -
 -              w = tile->tile_size[0] | tile->tile_size[1] << 8;
 -              h = tile->tile_size[2] | tile->tile_size[3] << 8;
 -
 -              num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
 -              num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
 -              tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
 -              tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
 -
 -              connector->has_tile = true;
 -              if (tile->tile_cap & 0x80)
 -                      connector->tile_is_single_monitor = true;
 -
 -              connector->num_h_tile = num_h_tile + 1;
 -              connector->num_v_tile = num_v_tile + 1;
 -              connector->tile_h_loc = tile_h_loc;
 -              connector->tile_v_loc = tile_v_loc;
 -              connector->tile_h_size = w + 1;
 -              connector->tile_v_size = h + 1;
 -
 -              DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
 -              DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
 -              DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
 -                     num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
 -              DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
 -
 -              tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
 -              if (!tg) {
 -                      tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
 +      idx += sizeof(struct displayid_hdr);
 +      while (block = (struct displayid_block *)&displayid[idx],
 +             idx + sizeof(struct displayid_block) <= length &&
 +             idx + sizeof(struct displayid_block) +
 +                 block->num_bytes <= length &&
 +             block->num_bytes > 0) {
 +              idx += block->num_bytes + sizeof(struct displayid_block);
 +              DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
 +                            block->tag, block->rev, block->num_bytes);
 +
 +              switch (block->tag) {
 +              case DATA_BLOCK_TILED_DISPLAY:
 +                      ret = drm_parse_tiled_block(connector, block);
 +                      if (ret)
 +                              return ret;
 +                      break;
 +              case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
 +                      /* handled in mode gathering code. */
 +                      break;
 +              default:
 +                      DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n",
 +                                                block->tag);
 +                      break;
                }
 -              if (!tg)
 -                      return -ENOMEM;
 -
 -              if (connector->tile_group != tg) {
 -                      /* if we haven't got a pointer,
 -                         take the reference, drop ref to old tile group */
 -                      if (connector->tile_group) {
 -                              drm_mode_put_tile_group(connector->dev, connector->tile_group);
 -                      }
 -                      connector->tile_group = tg;
 -              } else
 -                      /* if same tile group, then release the ref we just took. */
 -                      drm_mode_put_tile_group(connector->dev, tg);
 -      }
 -              break;
 -      default:
 -              printk("unknown displayid tag %d\n", block->tag);
 -              break;
        }
        return 0;
  }
@@@ -679,6 -679,14 +679,14 @@@ sdev_store_delete(struct device *dev, s
                  const char *buf, size_t count)
  {
        struct kernfs_node *kn;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       /*
+        * We need to try to get module, avoiding the module been removed
+        * during delete.
+        */
+       if (scsi_device_get(sdev))
+               return -ENODEV;
  
        kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
        WARN_ON_ONCE(!kn);
         * state into SDEV_DEL.
         */
        device_remove_file(dev, attr);
-       scsi_remove_device(to_scsi_device(dev));
+       scsi_remove_device(sdev);
        if (kn)
                sysfs_unbreak_active_protection(kn);
+       scsi_device_put(sdev);
        return count;
  };
  static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
@@@ -1056,8 -1065,7 +1065,8 @@@ int scsi_sysfs_add_sdev(struct scsi_dev
        device_enable_async_suspend(&sdev->sdev_gendev);
        scsi_autopm_get_target(starget);
        pm_runtime_set_active(&sdev->sdev_gendev);
 -      pm_runtime_forbid(&sdev->sdev_gendev);
 +      if (!sdev->use_rpm_auto)
 +              pm_runtime_forbid(&sdev->sdev_gendev);
        pm_runtime_enable(&sdev->sdev_gendev);
        scsi_autopm_put_target(starget);
  
@@@ -3,7 -3,7 +3,7 @@@
   *
   * This code is based on drivers/scsi/ufs/ufshcd.c
   * Copyright (C) 2011-2013 Samsung India Software Operations
 - * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
   *
   * Authors:
   *    Santosh Yaraganavi <santosh.sy@samsung.com>
   */
  
  #include <linux/async.h>
 +#include <scsi/ufs/ioctl.h>
  #include <linux/devfreq.h>
 +#include <linux/nls.h>
 +#include <linux/of.h>
  #include <linux/blkdev.h>
 +#include <asm/unaligned.h>
  
  #include "ufshcd.h"
 -#include "unipro.h"
 +#include "ufshci.h"
 +#include "ufs_quirks.h"
 +#include "ufs-debugfs.h"
 +#include "ufs-qcom.h"
 +
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/ufs.h>
 +
 +#ifdef CONFIG_DEBUG_FS
 +
 +static int ufshcd_tag_req_type(struct request *rq)
 +{
 +      int rq_type = TS_WRITE;
 +
 +      if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
 +              rq_type = TS_NOT_SUPPORTED;
 +      else if (rq->cmd_flags & REQ_FLUSH)
 +              rq_type = TS_FLUSH;
 +      else if (rq_data_dir(rq) == READ)
 +              rq_type = (rq->cmd_flags & REQ_URGENT) ?
 +                      TS_URGENT_READ : TS_READ;
 +      else if (rq->cmd_flags & REQ_URGENT)
 +              rq_type = TS_URGENT_WRITE;
 +
 +      return rq_type;
 +}
 +
 +static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
 +{
 +      ufsdbg_set_err_state(hba);
 +      if (type < UFS_ERR_MAX)
 +              hba->ufs_stats.err_stats[type]++;
 +}
 +
 +static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
 +{
 +      struct request *rq =
 +              hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
 +      u64 **tag_stats = hba->ufs_stats.tag_stats;
 +      int rq_type;
 +
 +      if (!hba->ufs_stats.enabled)
 +              return;
 +
 +      tag_stats[tag][TS_TAG]++;
 +      if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
 +              return;
 +
 +      WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
 +      rq_type = ufshcd_tag_req_type(rq);
 +      if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
 +              tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
 +}
 +
 +static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
 +              struct scsi_cmnd *cmd)
 +{
 +      struct request *rq = cmd ? cmd->request : NULL;
 +
 +      if (rq && rq->cmd_type & REQ_TYPE_FS)
 +              hba->ufs_stats.q_depth--;
 +}
 +
 +static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +      int rq_type;
 +      struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
 +      s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
 +              lrbp->issue_time_stamp);
 +
 +      /* update general request statistics */
 +      if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
 +              hba->ufs_stats.req_stats[TS_TAG].min = delta;
 +      hba->ufs_stats.req_stats[TS_TAG].count++;
 +      hba->ufs_stats.req_stats[TS_TAG].sum += delta;
 +      if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
 +              hba->ufs_stats.req_stats[TS_TAG].max = delta;
 +      if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
 +                      hba->ufs_stats.req_stats[TS_TAG].min = delta;
 +
 +      rq_type = ufshcd_tag_req_type(rq);
 +      if (rq_type == TS_NOT_SUPPORTED)
 +              return;
 +
 +      /* update request type specific statistics */
 +      if (hba->ufs_stats.req_stats[rq_type].count == 0)
 +              hba->ufs_stats.req_stats[rq_type].min = delta;
 +      hba->ufs_stats.req_stats[rq_type].count++;
 +      hba->ufs_stats.req_stats[rq_type].sum += delta;
 +      if (delta > hba->ufs_stats.req_stats[rq_type].max)
 +              hba->ufs_stats.req_stats[rq_type].max = delta;
 +      if (delta < hba->ufs_stats.req_stats[rq_type].min)
 +                      hba->ufs_stats.req_stats[rq_type].min = delta;
 +}
 +
 +static void
 +ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
 +{
 +      if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
 +              hba->ufs_stats.query_stats_arr[opcode][idn]++;
 +}
 +
 +#else
 +static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
 +{
 +}
 +
 +static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
 +              struct scsi_cmnd *cmd)
 +{
 +}
 +
 +static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
 +{
 +}
 +
 +static inline
 +void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +}
 +
 +static inline
 +void ufshcd_update_query_stats(struct ufs_hba *hba,
 +                             enum query_opcode opcode, u8 idn)
 +{
 +}
 +#endif
 +
 +#define PWR_INFO_MASK 0xF
 +#define PWR_RX_OFFSET 4
 +
 +#define UFSHCD_REQ_SENSE_SIZE 18
  
  #define UFSHCD_ENABLE_INTRS   (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
  #define NOP_OUT_TIMEOUT    30 /* msecs */
  
  /* Query request retries */
 -#define QUERY_REQ_RETRIES 10
 +#define QUERY_REQ_RETRIES 3
  /* Query request timeout */
 -#define QUERY_REQ_TIMEOUT 30 /* msec */
 +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  
  /* Task management command timeout */
  #define TM_CMD_TIMEOUT        100 /* msecs */
  
 +/* maximum number of retries for a general UIC command  */
 +#define UFS_UIC_COMMAND_RETRIES 3
 +
  /* maximum number of link-startup retries */
  #define DME_LINKSTARTUP_RETRIES 3
  
 +/* Maximum retries for Hibern8 enter */
 +#define UIC_HIBERN8_ENTER_RETRIES 3
 +
  /* maximum number of reset retries before giving up */
  #define MAX_HOST_RESET_RETRIES 5
  
  /* Interrupt aggregation default timeout, unit: 40us */
  #define INT_AGGR_DEF_TO       0x02
  
 +/* default value of auto suspend is 3 seconds */
 +#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
 +
 +#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE   10
 +#define UFSHCD_CLK_GATING_DELAY_MS_PERF               50
 +
 +/* IOCTL opcode for command - ufs set device read only */
 +#define UFS_IOCTL_BLKROSET      BLKROSET
 +
 +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION            2
 +
  #define ufshcd_toggle_vreg(_dev, _vreg, _on)                          \
        ({                                                              \
                int _ret;                                               \
                _ret;                                                   \
        })
  
 +#define ufshcd_hex_dump(prefix_str, buf, len) \
 +print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
 +
  static u32 ufs_query_desc_max_size[] = {
        QUERY_DESC_DEVICE_MAX_SIZE,
        QUERY_DESC_CONFIGURAION_MAX_SIZE,
        QUERY_DESC_RFU_MAX_SIZE,
        QUERY_DESC_GEOMETRY_MAZ_SIZE,
        QUERY_DESC_POWER_MAX_SIZE,
 +      QUERY_DESC_HEALTH_MAX_SIZE,
        QUERY_DESC_RFU_MAX_SIZE,
  };
  
@@@ -276,11 -120,9 +276,11 @@@ enum 
  /* UFSHCD UIC layer error flags */
  enum {
        UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
 -      UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
 -      UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
 -      UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
 +      UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
 +      UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
 +      UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
 +      UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
 +      UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
  };
  
  /* Interrupt configuration options */
@@@ -290,8 -132,6 +290,8 @@@ enum 
        UFSHCD_INT_CLEAR,
  };
  
 +#define DEFAULT_UFSHCD_DBG_PRINT_EN   UFSHCD_DBG_PRINT_ALL
 +
  #define ufshcd_set_eh_in_progress(h) \
        (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
  #define ufshcd_eh_in_progress(h) \
@@@ -333,1702 -173,489 +333,1702 @@@ ufs_get_pm_lvl_to_link_pwr_state(enum u
        return ufs_pm_lvl_states[lvl].link_state;
  }
  
 -static void ufshcd_tmc_handler(struct ufs_hba *hba);
 +static inline enum ufs_pm_level
 +ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 +                                      enum uic_link_state link_state)
 +{
 +      enum ufs_pm_level lvl;
 +
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
 +              if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
 +                      (ufs_pm_lvl_states[lvl].link_state == link_state))
 +                      return lvl;
 +      }
 +
 +      /* if no match found, return the level 0 */
 +      return UFS_PM_LVL_0;
 +}
 +
 +static inline bool ufshcd_is_valid_pm_lvl(int lvl)
 +{
 +      if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
 +              return true;
 +      else
 +              return false;
 +}
 +
 +static irqreturn_t ufshcd_intr(int irq, void *__hba);
 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
  static void ufshcd_async_scan(void *data, async_cookie_t cookie);
  static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
  static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
  static void ufshcd_hba_exit(struct ufs_hba *hba);
  static int ufshcd_probe_hba(struct ufs_hba *hba);
 -static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 -                               bool skip_ref_clk);
 -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 +static int ufshcd_enable_clocks(struct ufs_hba *hba);
 +static int ufshcd_disable_clocks(struct ufs_hba *hba,
 +                               bool is_gating_context);
 +static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 +                                            bool is_gating_context);
 +static void ufshcd_hold_all(struct ufs_hba *hba);
 +static void ufshcd_release_all(struct ufs_hba *hba);
 +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
  static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 +static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
  static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 -static irqreturn_t ufshcd_intr(int irq, void *__hba);
 -static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
 -              struct ufs_pa_layer_attr *desired_pwr_mode);
 -static int ufshcd_change_power_mode(struct ufs_hba *hba,
 -                           struct ufs_pa_layer_attr *pwr_mode);
 +static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 +static void ufshcd_release_all(struct ufs_hba *hba);
 +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 +static int ufshcd_devfreq_target(struct device *dev,
 +                              unsigned long *freq, u32 flags);
 +static int ufshcd_devfreq_get_dev_status(struct device *dev,
 +              struct devfreq_dev_status *stat);
 +
 +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 +static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
 +      .upthreshold = 35,
 +      .downdifferential = 30,
 +      .simple_scaling = 1,
 +};
 +
 +static void *gov_data = &ufshcd_ondemand_data;
 +#else
 +static void *gov_data;
 +#endif
  
 -static inline int ufshcd_enable_irq(struct ufs_hba *hba)
 +static struct devfreq_dev_profile ufs_devfreq_profile = {
 +      .polling_ms     = 40,
 +      .target         = ufshcd_devfreq_target,
 +      .get_dev_status = ufshcd_devfreq_get_dev_status,
 +};
 +
 +static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
  {
 -      int ret = 0;
 +      return tag >= 0 && tag < hba->nutrs;
 +}
  
 +static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 +{
        if (!hba->is_irq_enabled) {
 -              ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
 -                              hba);
 -              if (ret)
 -                      dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
 -                              __func__, ret);
 +              enable_irq(hba->irq);
                hba->is_irq_enabled = true;
        }
 -
 -      return ret;
  }
  
  static inline void ufshcd_disable_irq(struct ufs_hba *hba)
  {
        if (hba->is_irq_enabled) {
 -              free_irq(hba->irq, hba);
 +              disable_irq(hba->irq);
                hba->is_irq_enabled = false;
        }
  }
  
 -/*
 - * ufshcd_wait_for_register - wait for register value to change
 - * @hba - per-adapter interface
 - * @reg - mmio register offset
 - * @mask - mask to apply to read register value
 - * @val - wait condition
 - * @interval_us - polling interval in microsecs
 - * @timeout_ms - timeout in millisecs
 - *
 - * Returns -ETIMEDOUT on error, zero on success
 - */
 -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 -              u32 val, unsigned long interval_us, unsigned long timeout_ms)
 +void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
  {
 -      int err = 0;
 -      unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 -
 -      /* ignore bits that we don't intend to wait on */
 -      val = val & mask;
 -
 -      while ((ufshcd_readl(hba, reg) & mask) != val) {
 -              /* wakeup within 50us of expiry */
 -              usleep_range(interval_us, interval_us + 50);
 -
 -              if (time_after(jiffies, timeout)) {
 -                      if ((ufshcd_readl(hba, reg) & mask) != val)
 -                              err = -ETIMEDOUT;
 -                      break;
 -              }
 -      }
 +      unsigned long flags;
 +      bool unblock = false;
  
 -      return err;
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->scsi_block_reqs_cnt--;
 +      unblock = !hba->scsi_block_reqs_cnt;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      if (unblock)
 +              scsi_unblock_requests(hba->host);
  }
 +EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
  
 -/**
 - * ufshcd_get_intr_mask - Get the interrupt bit mask
 - * @hba - Pointer to adapter instance
 - *
 - * Returns interrupt bit mask per version
 - */
 -static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 +static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
  {
 -      if (hba->ufs_version == UFSHCI_VERSION_10)
 -              return INTERRUPT_MASK_ALL_VER_10;
 -      else
 -              return INTERRUPT_MASK_ALL_VER_11;
 +      if (!hba->scsi_block_reqs_cnt++)
 +              scsi_block_requests(hba->host);
  }
  
 -/**
 - * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 - * @hba - Pointer to adapter instance
 - *
 - * Returns UFSHCI version supported by the controller
 - */
 -static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 +void ufshcd_scsi_block_requests(struct ufs_hba *hba)
  {
 -      if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 -              return ufshcd_vops_get_ufs_hci_version(hba);
 +      unsigned long flags;
  
 -      return ufshcd_readl(hba, REG_UFS_VERSION);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_scsi_block_requests(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
  }
 +EXPORT_SYMBOL(ufshcd_scsi_block_requests);
  
 -/**
 - * ufshcd_is_device_present - Check if any device connected to
 - *                          the host controller
 - * @hba: pointer to adapter instance
 - *
 - * Returns 1 if device present, 0 if no device detected
 - */
 -static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 +static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
  {
 -      return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 -                                              DEVICE_PRESENT) ? 1 : 0;
 +      int ret = 0;
 +
 +      if (!hba->pctrl)
 +              return 0;
 +
 +      /* Assert reset if ctrl == true */
 +      if (ctrl)
 +              ret = pinctrl_select_state(hba->pctrl,
 +                      pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
 +      else
 +              ret = pinctrl_select_state(hba->pctrl,
 +                      pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
 +
 +      if (ret < 0)
 +              dev_err(hba->dev, "%s: %s failed with err %d\n",
 +                      __func__, ctrl ? "Assert" : "Deassert", ret);
 +
 +      return ret;
  }
  
 -/**
 - * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 - * @lrb: pointer to local command reference block
 - *
 - * This function is used to get the OCS field from UTRD
 - * Returns the OCS field in the UTRD
 - */
 -static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 +static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
  {
 -      return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 +      return ufshcd_device_reset_ctrl(hba, true);
  }
  
 -/**
 - * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 - * @task_req_descp: pointer to utp_task_req_desc structure
 - *
 - * This function is used to get the OCS field from UTMRD
 - * Returns the OCS field in the UTMRD
 - */
 -static inline int
 -ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 +static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
  {
 -      return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 +      return ufshcd_device_reset_ctrl(hba, false);
  }
  
 -/**
 - * ufshcd_get_tm_free_slot - get a free slot for task management request
 - * @hba: per adapter instance
 - * @free_slot: pointer to variable with available slot value
 - *
 - * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 - * Returns 0 if free slot is not available, else return 1 with tag value
 - * in @free_slot.
 - */
 -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 +static int ufshcd_reset_device(struct ufs_hba *hba)
  {
 -      int tag;
 -      bool ret = false;
 +      int ret;
  
 -      if (!free_slot)
 +      /* reset the connected UFS device */
 +      ret = ufshcd_assert_device_reset(hba);
 +      if (ret)
                goto out;
 +      /*
 +       * The reset signal is active low.
 +       * The UFS device shall detect more than or equal to 1us of positive
 +       * or negative RST_n pulse width.
 +       * To be on safe side, keep the reset low for atleast 10us.
 +       */
 +      usleep_range(10, 15);
  
 -      do {
 -              tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 -              if (tag >= hba->nutmrs)
 -                      goto out;
 -      } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 -
 -      *free_slot = tag;
 -      ret = true;
 +      ret = ufshcd_deassert_device_reset(hba);
 +      if (ret)
 +              goto out;
 +      /* same as assert, wait for atleast 10us after deassert */
 +      usleep_range(10, 15);
  out:
        return ret;
  }
  
 -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 +/* replace non-printable or non-ASCII characters with spaces */
 +static inline void ufshcd_remove_non_printable(char *val)
  {
 -      clear_bit_unlock(slot, &hba->tm_slots_in_use);
 +      if (!val || !*val)
 +              return;
 +
 +      if (*val < 0x20 || *val > 0x7e)
 +              *val = ' ';
  }
  
 -/**
 - * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 - * @hba: per adapter instance
 - * @pos: position of the bit to be cleared
 - */
 -static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 +#define UFSHCD_MAX_CMD_LOGGING        200
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
 +                      struct ufshcd_cmd_log_entry *entry, u8 opcode)
 +{
 +      if (trace_ufshcd_command_enabled()) {
 +              u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +
 +              trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
 +                                   entry->doorbell, entry->transfer_len, intr,
 +                                   entry->lba, opcode);
 +      }
 +}
 +#else
 +static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
 +                      struct ufshcd_cmd_log_entry *entry, u8 opcode)
  {
 -      ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  }
 +#endif
  
 -/**
 - * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 - * @reg: Register value of host controller status
 - *
 - * Returns integer, 0 on Success and positive value if failed
 - */
 -static inline int ufshcd_get_lists_status(u32 reg)
 +#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
 +static void ufshcd_cmd_log_init(struct ufs_hba *hba)
  {
 -      /*
 -       * The mask 0xFF is for the following HCS register bits
 -       * Bit          Description
 -       *  0           Device Present
 -       *  1           UTRLRDY
 -       *  2           UTMRLRDY
 -       *  3           UCRDY
 -       *  4           HEI
 -       *  5           DEI
 -       * 6-7          reserved
 -       */
 -      return (((reg) & (0xFF)) >> 1) ^ (0x07);
 +      /* Allocate log entries */
 +      if (!hba->cmd_log.entries) {
 +              hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
 +                      sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
 +              if (!hba->cmd_log.entries)
 +                      return;
 +              dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
 +                              __func__);
 +      }
  }
  
 -/**
 - * ufshcd_get_uic_cmd_result - Get the UIC command result
 - * @hba: Pointer to adapter instance
 - *
 - * This function gets the result of UIC command completion
 - * Returns 0 on success, non zero value on error
 - */
 -static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 +static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +                           unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
 +                           sector_t lba, int transfer_len, u8 opcode)
  {
 -      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 -             MASK_UIC_COMMAND_RESULT;
 +      struct ufshcd_cmd_log_entry *entry;
 +
 +      if (!hba->cmd_log.entries)
 +              return;
 +
 +      entry = &hba->cmd_log.entries[hba->cmd_log.pos];
 +      entry->lun = lun;
 +      entry->str = str;
 +      entry->cmd_type = cmd_type;
 +      entry->cmd_id = cmd_id;
 +      entry->lba = lba;
 +      entry->transfer_len = transfer_len;
 +      entry->idn = idn;
 +      entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      entry->tag = tag;
 +      entry->tstamp = ktime_get();
 +      entry->outstanding_reqs = hba->outstanding_reqs;
 +      entry->seq_num = hba->cmd_log.seq_num;
 +      hba->cmd_log.seq_num++;
 +      hba->cmd_log.pos =
 +                      (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 +
 +      ufshcd_add_command_trace(hba, entry, opcode);
  }
  
 -/**
 - * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 +static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +      unsigned int tag, u8 cmd_id, u8 idn)
 +{
 +      __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
 +                       0xff, (sector_t)-1, -1, -1);
 +}
 +
 +static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
 +{
 +      ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
 +}
 +
 +static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 +{
 +      int i;
 +      int pos;
 +      struct ufshcd_cmd_log_entry *p;
 +
 +      if (!hba->cmd_log.entries)
 +              return;
 +
 +      pos = hba->cmd_log.pos;
 +      for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
 +              p = &hba->cmd_log.entries[pos];
 +              pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 +
 +              if (ktime_to_us(p->tstamp)) {
 +                      pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
 +                              p->cmd_type, p->str, p->seq_num,
 +                              p->lun, p->cmd_id, (unsigned long long)p->lba,
 +                              p->transfer_len, p->tag, p->doorbell,
 +                              p->outstanding_reqs, p->idn,
 +                              ktime_to_us(p->tstamp));
 +                              usleep_range(1000, 1100);
 +              }
 +      }
 +}
 +#else
 +static void ufshcd_cmd_log_init(struct ufs_hba *hba)
 +{
 +}
 +
 +static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +                           unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
 +                           sector_t lba, int transfer_len, u8 opcode)
 +{
 +      struct ufshcd_cmd_log_entry entry;
 +
 +      entry.str = str;
 +      entry.lba = lba;
 +      entry.cmd_id = cmd_id;
 +      entry.transfer_len = transfer_len;
 +      entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      entry.tag = tag;
 +
 +      ufshcd_add_command_trace(hba, &entry, opcode);
 +}
 +
 +static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
 +{
 +}
 +
 +static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 +{
 +}
 +#endif
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
 +                                      unsigned int tag, const char *str)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      char *cmd_type = NULL;
 +      u8 opcode = 0;
 +      u8 cmd_id = 0, idn = 0;
 +      sector_t lba = -1;
 +      int transfer_len = -1;
 +
 +      lrbp = &hba->lrb[tag];
 +
 +      if (lrbp->cmd) { /* data phase exists */
 +              opcode = (u8)(*lrbp->cmd->cmnd);
 +              if ((opcode == READ_10) || (opcode == WRITE_10)) {
 +                      /*
 +                       * Currently we only fully trace read(10) and write(10)
 +                       * commands
 +                       */
 +                      if (lrbp->cmd->request && lrbp->cmd->request->bio)
 +                              lba =
 +                              lrbp->cmd->request->bio->bi_iter.bi_sector;
 +                      transfer_len = be32_to_cpu(
 +                              lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 +              }
 +      }
 +
 +      if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
 +              cmd_type = "scsi";
 +              cmd_id = (u8)(*lrbp->cmd->cmnd);
 +      } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 +              if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
 +                      cmd_type = "nop";
 +                      cmd_id = 0;
 +              } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
 +                      cmd_type = "query";
 +                      cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
 +                      idn = hba->dev_cmd.query.request.upiu_req.idn;
 +              }
 +      }
 +
 +      __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
 +                       lrbp->lun, lba, transfer_len, opcode);
 +}
 +#else
 +static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
 +                                      unsigned int tag, const char *str)
 +{
 +}
 +#endif
 +
 +static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
 +{
 +      struct ufs_clk_info *clki;
 +      struct list_head *head = &hba->clk_list_head;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
 +              return;
 +
 +      if (!head || list_empty(head))
 +              return;
 +
 +      list_for_each_entry(clki, head, list) {
 +              if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
 +                              clki->max_freq)
 +                      dev_err(hba->dev, "clk: %s, rate: %u\n",
 +                                      clki->name, clki->curr_freq);
 +      }
 +}
 +
 +static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
 +              struct ufs_uic_err_reg_hist *err_hist, char *err_name)
 +{
 +      int i;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
 +              return;
 +
 +      for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
 +              int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
 +
 +              if (err_hist->reg[p] == 0)
 +                      continue;
 +              dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
 +                      err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
 +      }
 +}
 +
 +static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
 +{
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
 +              return;
 +
 +      /*
 +       * hex_dump reads its data without the readl macro. This might
 +       * cause inconsistency issues on some platform, as the printed
 +       * values may be from cache and not the most recent value.
 +       * To know whether you are looking at an un-cached version verify
 +       * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
 +       * during platform/pci probe function.
 +       */
 +      ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
 +      dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
 +              hba->ufs_version, hba->capabilities);
 +      dev_err(hba->dev,
 +              "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
 +              (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
 +      dev_err(hba->dev,
 +              "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
 +              ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 +              hba->ufs_stats.hibern8_exit_cnt);
 +
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
 +
 +      ufshcd_print_clk_freqs(hba);
 +
 +      ufshcd_vops_dbg_register_dump(hba, no_sleep);
 +}
 +
 +static void ufshcd_print_host_regs(struct ufs_hba *hba)
 +{
 +      __ufshcd_print_host_regs(hba, false);
 +}
 +
 +static
 +void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      int prdt_length;
 +      int tag;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
 +              return;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutrs) {
 +              lrbp = &hba->lrb[tag];
 +
 +              dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
 +                              tag, ktime_to_us(lrbp->issue_time_stamp));
 +              dev_err(hba->dev,
 +                      "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
 +                      tag, (u64)lrbp->utrd_dma_addr);
 +              ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
 +                              sizeof(struct utp_transfer_req_desc));
 +              dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
 +                      (u64)lrbp->ucd_req_dma_addr);
 +              ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
 +                              sizeof(struct utp_upiu_req));
 +              dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
 +                      (u64)lrbp->ucd_rsp_dma_addr);
 +              ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
 +                              sizeof(struct utp_upiu_rsp));
 +              prdt_length =
 +                      le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
 +              dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
 +                      tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
 +              if (pr_prdt)
 +                      ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 +                              sizeof(struct ufshcd_sg_entry) * prdt_length);
 +      }
 +}
 +
 +static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 +{
 +      struct utp_task_req_desc *tmrdp;
 +      int tag;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
 +              return;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutmrs) {
 +              tmrdp = &hba->utmrdl_base_addr[tag];
 +              dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
 +              ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
 +                              sizeof(struct request_desc_header));
 +              dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
 +                              tag);
 +              ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
 +                              sizeof(struct utp_upiu_req));
 +              dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
 +                              tag);
 +              ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
 +                              sizeof(struct utp_task_req_desc));
 +      }
 +}
 +
 +static void ufshcd_print_fsm_state(struct ufs_hba *hba)
 +{
 +      int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
 +
 +      err = ufshcd_dme_get(hba,
 +                      UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 +                      UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 +                      &tx_fsm_val);
 +      dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
 +                      tx_fsm_val, err);
 +      err = ufshcd_dme_get(hba,
 +                      UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
 +                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                      &rx_fsm_val);
 +      dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
 +                      rx_fsm_val, err);
 +}
 +
 +static void ufshcd_print_host_state(struct ufs_hba *hba)
 +{
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
 +              return;
 +
 +      dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 +      dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
 +              hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
 +      dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
 +              hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
 +      dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
 +              hba->pm_op_in_progress, hba->is_sys_suspended);
 +      dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
 +              hba->auto_bkops_enabled, hba->host->host_self_blocked);
 +      dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
 +              hba->clk_gating.state, hba->hibern8_on_idle.state);
 +      dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
 +              hba->eh_flags, hba->req_abort_count);
 +      dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
 +              hba->capabilities, hba->caps);
 +      dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
 +              hba->dev_quirks);
 +}
 +
 +/**
 + * ufshcd_print_pwr_info - print power params as saved in hba
 + * power info
 + * @hba: per-adapter instance
 + */
 +static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 +{
 +      char *names[] = {
 +              "INVALID MODE",
 +              "FAST MODE",
 +              "SLOW_MODE",
 +              "INVALID MODE",
 +              "FASTAUTO_MODE",
 +              "SLOWAUTO_MODE",
 +              "INVALID MODE",
 +      };
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
 +              return;
 +
 +      dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 +               __func__,
 +               hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 +               hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 +               names[hba->pwr_info.pwr_rx],
 +               names[hba->pwr_info.pwr_tx],
 +               hba->pwr_info.hs_rate);
 +}
 +
 +/*
 + * ufshcd_wait_for_register - wait for register value to change
 + * @hba - per-adapter interface
 + * @reg - mmio register offset
 + * @mask - mask to apply to read register value
 + * @val - wait condition
 + * @interval_us - polling interval in microsecs
 + * @timeout_ms - timeout in millisecs
 + * @can_sleep - perform sleep or just spin
 + * Returns -ETIMEDOUT on error, zero on success
 + */
 +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 +                              u32 val, unsigned long interval_us,
 +                              unsigned long timeout_ms, bool can_sleep)
 +{
 +      int err = 0;
 +      unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 +
 +      /* ignore bits that we don't intend to wait on */
 +      val = val & mask;
 +
 +      while ((ufshcd_readl(hba, reg) & mask) != val) {
 +              if (can_sleep)
 +                      usleep_range(interval_us, interval_us + 50);
 +              else
 +                      udelay(interval_us);
 +              if (time_after(jiffies, timeout)) {
 +                      if ((ufshcd_readl(hba, reg) & mask) != val)
 +                              err = -ETIMEDOUT;
 +                      break;
 +              }
 +      }
 +
 +      return err;
 +}
 +
 +/**
 + * ufshcd_get_intr_mask - Get the interrupt bit mask
 + * @hba - Pointer to adapter instance
 + *
 + * Returns interrupt bit mask per version
 + */
 +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 +{
 +      u32 intr_mask = 0;
 +
 +      switch (hba->ufs_version) {
 +      case UFSHCI_VERSION_10:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_10;
 +              break;
 +      /* allow fall through */
 +      case UFSHCI_VERSION_11:
 +      case UFSHCI_VERSION_20:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_11;
 +              break;
 +      /* allow fall through */
 +      case UFSHCI_VERSION_21:
 +      default:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_21;
 +      }
 +
 +      if (!ufshcd_is_crypto_supported(hba))
 +              intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
 +
 +      return intr_mask;
 +}
 +
 +/**
 + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 + * @hba - Pointer to adapter instance
 + *
 + * Returns UFSHCI version supported by the controller
 + */
 +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 +{
 +      if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 +              return ufshcd_vops_get_ufs_hci_version(hba);
 +
 +      return ufshcd_readl(hba, REG_UFS_VERSION);
 +}
 +
 +/**
 + * ufshcd_is_device_present - Check if any device connected to
 + *                          the host controller
 + * @hba: pointer to adapter instance
 + *
 + * Returns 1 if device present, 0 if no device detected
 + */
 +static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 +{
 +      return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 +                                              DEVICE_PRESENT) ? 1 : 0;
 +}
 +
 +/**
 + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 + * @lrb: pointer to local command reference block
 + *
 + * This function is used to get the OCS field from UTRD
 + * Returns the OCS field in the UTRD
 + */
 +static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 +{
 +      return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 +}
 +
 +/**
 + * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 + * @task_req_descp: pointer to utp_task_req_desc structure
 + *
 + * This function is used to get the OCS field from UTMRD
 + * Returns the OCS field in the UTMRD
 + */
 +static inline int
 +ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 +{
 +      return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 +}
 +
 +/**
 + * ufshcd_get_tm_free_slot - get a free slot for task management request
 + * @hba: per adapter instance
 + * @free_slot: pointer to variable with available slot value
 + *
 + * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 + * Returns 0 if free slot is not available, else return 1 with tag value
 + * in @free_slot.
 + */
 +static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 +{
 +      int tag;
 +      bool ret = false;
 +
 +      if (!free_slot)
 +              goto out;
 +
 +      do {
 +              tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 +              if (tag >= hba->nutmrs)
 +                      goto out;
 +      } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 +
 +      *free_slot = tag;
 +      ret = true;
 +out:
 +      return ret;
 +}
 +
 +static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 +{
 +      clear_bit_unlock(slot, &hba->tm_slots_in_use);
 +}
 +
 +/**
 + * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 + * @hba: per adapter instance
 + * @pos: position of the bit to be cleared
 + */
 +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 +{
 +      ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 +}
 +
 +/**
 + * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
 + * @hba: per adapter instance
 + * @tag: position of the bit to be cleared
 + */
 +static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
 +{
 +      __clear_bit(tag, &hba->outstanding_reqs);
 +}
 +
 +/**
 + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 + * @reg: Register value of host controller status
 + *
 + * Returns integer, 0 on Success and positive value if failed
 + */
 +static inline int ufshcd_get_lists_status(u32 reg)
 +{
 +      /*
 +       * The mask 0xFF is for the following HCS register bits
 +       * Bit          Description
 +       *  0           Device Present
 +       *  1           UTRLRDY
 +       *  2           UTMRLRDY
 +       *  3           UCRDY
 +       * 4-7          reserved
 +       */
 +      return ((reg & 0xFF) >> 1) ^ 0x07;
 +}
 +
 +/**
 + * ufshcd_get_uic_cmd_result - Get the UIC command result
 + * @hba: Pointer to adapter instance
 + *
 + * This function gets the result of UIC command completion
 + * Returns 0 on success, non zero value on error
 + */
 +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 +{
 +      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 +             MASK_UIC_COMMAND_RESULT;
 +}
 +
 +/**
 + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
   * @hba: Pointer to adapter instance
   *
 - * This function gets UIC command argument3
 - * Returns 0 on success, non zero value on error
 + * This function gets UIC command argument3
 + * Returns 0 on success, non zero value on error
 + */
 +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 +{
 +      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 +}
 +
 +/**
 + * ufshcd_get_req_rsp - returns the TR response transaction type
 + * @ucd_rsp_ptr: pointer to response UPIU
 + */
 +static inline int
 +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 +}
 +
 +/**
 + * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * This function gets the response status and scsi_status from response UPIU
 + * Returns the response result code.
 + */
 +static inline int
 +ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 +}
 +
 +/*
 + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 + *                            from response UPIU
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * Return the data segment length.
 + */
 +static inline unsigned int
 +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 +              MASK_RSP_UPIU_DATA_SEG_LEN;
 +}
 +
 +/**
 + * ufshcd_is_exception_event - Check if the device raised an exception event
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * The function checks if the device raised an exception event indicated in
 + * the Device Information field of response UPIU.
 + *
 + * Returns true if exception is raised, false otherwise.
 + */
 +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 +                      MASK_RSP_EXCEPTION_EVENT ? true : false;
 +}
 +
 +/**
 + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 + * @hba: per adapter instance
 + */
 +static inline void
 +ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, INT_AGGR_ENABLE |
 +                    INT_AGGR_COUNTER_AND_TIMER_RESET,
 +                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 + * @hba: per adapter instance
 + * @cnt: Interrupt aggregation counter threshold
 + * @tmout: Interrupt aggregation timeout value
 + */
 +static inline void
 +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 +{
 +      ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 +                    INT_AGGR_COUNTER_THLD_VAL(cnt) |
 +                    INT_AGGR_TIMEOUT_VAL(tmout),
 +                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 + * @hba: per adapter instance
 + */
 +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 + *                    When run-stop registers are set to 1, it indicates the
 + *                    host controller that it can process the requests
 + * @hba: per adapter instance
 + */
 +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 +                    REG_UTP_TASK_REQ_LIST_RUN_STOP);
 +      ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 +                    REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 +}
 +
 +/**
 + * ufshcd_hba_start - Start controller initialization sequence
 + * @hba: per adapter instance
 + */
 +static inline void ufshcd_hba_start(struct ufs_hba *hba)
 +{
 +      u32 val = CONTROLLER_ENABLE;
 +
 +      if (ufshcd_is_crypto_supported(hba))
 +              val |= CRYPTO_GENERAL_ENABLE;
 +      ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
 +}
 +
 +/**
 + * ufshcd_is_hba_active - Get controller state
 + * @hba: per adapter instance
 + *
 + * Returns zero if controller is active, 1 otherwise
 + */
 +static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 +{
 +      return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 +}
 +
 +static const char *ufschd_uic_link_state_to_string(
 +                      enum uic_link_state state)
 +{
 +      switch (state) {
 +      case UIC_LINK_OFF_STATE:        return "OFF";
 +      case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
 +      case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
 +      default:                        return "UNKNOWN";
 +      }
 +}
 +
 +static const char *ufschd_ufs_dev_pwr_mode_to_string(
 +                      enum ufs_dev_pwr_mode state)
 +{
 +      switch (state) {
 +      case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
 +      case UFS_SLEEP_PWR_MODE:        return "SLEEP";
 +      case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
 +      default:                        return "UNKNOWN";
 +      }
 +}
 +
 +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 +{
 +      /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
 +      if ((hba->ufs_version == UFSHCI_VERSION_10) ||
 +          (hba->ufs_version == UFSHCI_VERSION_11))
 +              return UFS_UNIPRO_VER_1_41;
 +      else
 +              return UFS_UNIPRO_VER_1_6;
 +}
 +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
 +
 +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 +{
 +      /*
 +       * If both host and device support UniPro ver1.6 or later, PA layer
 +       * parameters tuning happens during link startup itself.
 +       *
 +       * We can manually tune PA layer parameters if either host or device
 +       * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
 +       * logic simple, we will only do manual tuning if local unipro version
 +       * doesn't support ver1.6 or later.
 +       */
 +      if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
 +              return true;
 +      else
 +              return false;
 +}
 +
 +/**
 + * ufshcd_set_clk_freq - set UFS controller clock frequencies
 + * @hba: per adapter instance
 + * @scale_up: If True, set max possible frequency othewise set low frequency
 + *
 + * Returns 0 if successful
 + * Returns < 0 for any other errors
   */
 -static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 +static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
  {
 -      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 +      int ret = 0;
 +      struct ufs_clk_info *clki;
 +      struct list_head *head = &hba->clk_list_head;
 +
 +      if (!head || list_empty(head))
 +              goto out;
 +
 +      list_for_each_entry(clki, head, list) {
 +              if (!IS_ERR_OR_NULL(clki->clk)) {
 +                      if (scale_up && clki->max_freq) {
 +                              if (clki->curr_freq == clki->max_freq)
 +                                      continue;
 +
 +                              ret = clk_set_rate(clki->clk, clki->max_freq);
 +                              if (ret) {
 +                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 +                                              __func__, clki->name,
 +                                              clki->max_freq, ret);
 +                                      break;
 +                              }
 +                              trace_ufshcd_clk_scaling(dev_name(hba->dev),
 +                                              "scaled up", clki->name,
 +                                              clki->curr_freq,
 +                                              clki->max_freq);
 +                              clki->curr_freq = clki->max_freq;
 +
 +                      } else if (!scale_up && clki->min_freq) {
 +                              if (clki->curr_freq == clki->min_freq)
 +                                      continue;
 +
 +                              ret = clk_set_rate(clki->clk, clki->min_freq);
 +                              if (ret) {
 +                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 +                                              __func__, clki->name,
 +                                              clki->min_freq, ret);
 +                                      break;
 +                              }
 +                              trace_ufshcd_clk_scaling(dev_name(hba->dev),
 +                                              "scaled down", clki->name,
 +                                              clki->curr_freq,
 +                                              clki->min_freq);
 +                              clki->curr_freq = clki->min_freq;
 +                      }
 +              }
 +              dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 +                              clki->name, clk_get_rate(clki->clk));
 +      }
 +
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_scale_clks - scale up or scale down UFS controller clocks
 + * @hba: per adapter instance
 + * @scale_up: True if scaling up and false if scaling down
 + *
 + * Returns 0 if successful
 + * Returns < 0 for any other errors
 + */
 +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +
 +      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 +      if (ret)
 +              return ret;
 +
 +      ret = ufshcd_set_clk_freq(hba, scale_up);
 +      if (ret)
 +              return ret;
 +
 +      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 +      if (ret) {
 +              ufshcd_set_clk_freq(hba, !scale_up);
 +              return ret;
 +      }
 +
 +      return ret;
 +}
 +
 +static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
 +{
 +      hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
 +      cancel_work_sync(&hba->clk_gating.gate_work);
 +}
 +
 +static void ufshcd_ungate_work(struct work_struct *work)
 +{
 +      int ret;
 +      unsigned long flags;
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                      clk_gating.ungate_work);
 +
 +      ufshcd_cancel_gate_work(hba);
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_gating.state == CLKS_ON) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              goto unblock_reqs;
 +      }
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_hba_vreg_set_hpm(hba);
 +      ufshcd_enable_clocks(hba);
 +
 +      /* Exit from hibern8 */
 +      if (ufshcd_can_hibern8_during_gating(hba)) {
 +              /* Prevent gating in this path */
 +              hba->clk_gating.is_suspended = true;
 +              if (ufshcd_is_link_hibern8(hba)) {
 +                      ret = ufshcd_uic_hibern8_exit(hba);
 +                      if (ret)
 +                              dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 +                                      __func__, ret);
 +                      else
 +                              ufshcd_set_link_active(hba);
 +              }
 +              hba->clk_gating.is_suspended = false;
 +      }
 +unblock_reqs:
 +      ufshcd_scsi_unblock_requests(hba);
 +}
 +
 +/**
 + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
 + * Also, exit from hibern8 mode and set the link as active.
 + * @hba: per adapter instance
 + * @async: This indicates whether caller should ungate clocks asynchronously.
 + */
 +int ufshcd_hold(struct ufs_hba *hba, bool async)
 +{
 +      int rc = 0;
 +      unsigned long flags;
 +
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              goto out;
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_gating.active_reqs++;
 +
 +      if (ufshcd_eh_in_progress(hba)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return 0;
 +      }
 +
 +start:
 +      switch (hba->clk_gating.state) {
 +      case CLKS_ON:
 +              /*
 +               * Wait for the ungate work to complete if in progress.
 +               * Though the clocks may be in ON state, the link could
 +               * still be in hibner8 state if hibern8 is allowed
 +               * during clock gating.
 +               * Make sure we exit hibern8 state also in addition to
 +               * clocks being ON.
 +               */
 +              if (ufshcd_can_hibern8_during_gating(hba) &&
 +                  ufshcd_is_link_hibern8(hba)) {
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      flush_work(&hba->clk_gating.ungate_work);
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      goto start;
 +              }
 +              break;
 +      case REQ_CLKS_OFF:
 +              /*
 +               * If the timer was active but the callback was not running
 +               * we have nothing to do, just change state and return.
 +               */
 +              if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
 +                      hba->clk_gating.state = CLKS_ON;
 +                      trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                              hba->clk_gating.state);
 +                      break;
 +              }
 +              /*
 +               * If we are here, it means gating work is either done or
 +               * currently running. Hence, fall through to cancel gating
 +               * work and to enable clocks.
 +               */
 +      case CLKS_OFF:
 +              __ufshcd_scsi_block_requests(hba);
 +              hba->clk_gating.state = REQ_CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +              queue_work(hba->clk_gating.clk_gating_workq,
 +                              &hba->clk_gating.ungate_work);
 +              /*
 +               * fall through to check if we should wait for this
 +               * work to be done or not.
 +               */
 +      case REQ_CLKS_ON:
 +              if (async) {
 +                      rc = -EAGAIN;
 +                      hba->clk_gating.active_reqs--;
 +                      break;
 +              }
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              flush_work(&hba->clk_gating.ungate_work);
 +              /* Make sure state is CLKS_ON before returning */
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              goto start;
 +      default:
 +              dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 +                              __func__, hba->clk_gating.state);
 +              break;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +out:
 +      hba->ufs_stats.clk_hold.ts = ktime_get();
 +      return rc;
 +}
 +EXPORT_SYMBOL_GPL(ufshcd_hold);
 +
 +static void ufshcd_gate_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                              clk_gating.gate_work);
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * In case you are here to cancel this work the gating state
 +       * would be marked as REQ_CLKS_ON. In this case save time by
 +       * skipping the gating work and exit after changing the clock
 +       * state to CLKS_ON.
 +       */
 +      if (hba->clk_gating.is_suspended ||
 +              (hba->clk_gating.state != REQ_CLKS_OFF)) {
 +              hba->clk_gating.state = CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +              goto rel_lock;
 +      }
 +
 +      if (hba->clk_gating.active_reqs
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done)
 +              goto rel_lock;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          hba->hibern8_on_idle.is_enabled)
 +              /*
 +               * Hibern8 enter work (on Idle) needs clocks to be ON hence
 +               * make sure that it is flushed before turning off the clocks.
 +               */
 +              flush_delayed_work(&hba->hibern8_on_idle.enter_work);
 +
 +      /* put the link into hibern8 mode before turning off clocks */
 +      if (ufshcd_can_hibern8_during_gating(hba)) {
 +              if (ufshcd_uic_hibern8_enter(hba)) {
 +                      hba->clk_gating.state = CLKS_ON;
 +                      trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                              hba->clk_gating.state);
 +                      goto out;
 +              }
 +              ufshcd_set_link_hibern8(hba);
 +      }
 +
 +      /*
 +       * If auto hibern8 is supported then the link will already
 +       * be in hibern8 state and the ref clock can be gated.
 +       */
 +      if ((ufshcd_is_auto_hibern8_supported(hba) ||
 +           !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
 +              ufshcd_disable_clocks(hba, true);
 +      else
 +              /* If link is active, device ref_clk can't be switched off */
 +              ufshcd_disable_clocks_skip_ref_clk(hba, true);
 +
 +      /* Put the host controller in low power mode if possible */
 +      ufshcd_hba_vreg_set_lpm(hba);
 +
 +      /*
 +       * In case you are here to cancel this work the gating state
 +       * would be marked as REQ_CLKS_ON. In this case keep the state
 +       * as REQ_CLKS_ON which would anyway imply that clocks are off
 +       * and a request to turn them on is pending. By doing this way,
 +       * we keep the state machine in tact and this would ultimately
 +       * prevent from doing cancel work multiple times when there are
 +       * new requests arriving before the current cancel work is done.
 +       */
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_gating.state == REQ_CLKS_OFF) {
 +              hba->clk_gating.state = CLKS_OFF;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +      }
 +rel_lock:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +out:
 +      return;
  }
  
 -/**
 - * ufshcd_get_req_rsp - returns the TR response transaction type
 - * @ucd_rsp_ptr: pointer to response UPIU
 - */
 -static inline int
 -ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 +/* host lock must be held before calling this variant */
 +static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 -}
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
  
 -/**
 - * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 - * @ucd_rsp_ptr: pointer to response UPIU
 - *
 - * This function gets the response status and scsi_status from response UPIU
 - * Returns the response result code.
 - */
 -static inline int
 -ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 -{
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 +      hba->clk_gating.active_reqs--;
 +
 +      if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done
 +              || ufshcd_eh_in_progress(hba) || no_sched)
 +              return;
 +
 +      hba->clk_gating.state = REQ_CLKS_OFF;
 +      trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
 +      hba->ufs_stats.clk_rel.ts = ktime_get();
 +
 +      hrtimer_start(&hba->clk_gating.gate_hrtimer,
 +                      ms_to_ktime(hba->clk_gating.delay_ms),
 +                      HRTIMER_MODE_REL);
  }
  
 -/*
 - * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 - *                            from response UPIU
 - * @ucd_rsp_ptr: pointer to response UPIU
 - *
 - * Return the data segment length.
 - */
 -static inline unsigned int
 -ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 +void ufshcd_release(struct ufs_hba *hba, bool no_sched)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 -              MASK_RSP_UPIU_DATA_SEG_LEN;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_release(hba, no_sched);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
  }
 +EXPORT_SYMBOL_GPL(ufshcd_release);
  
 -/**
 - * ufshcd_is_exception_event - Check if the device raised an exception event
 - * @ucd_rsp_ptr: pointer to response UPIU
 - *
 - * The function checks if the device raised an exception event indicated in
 - * the Device Information field of response UPIU.
 - *
 - * Returns true if exception is raised, false otherwise.
 - */
 -static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 -                      MASK_RSP_EXCEPTION_EVENT ? true : false;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
  }
  
 -/**
 - * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 - * @hba: per adapter instance
 - */
 -static inline void
 -ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      ufshcd_writel(hba, INT_AGGR_ENABLE |
 -                    INT_AGGR_COUNTER_AND_TIMER_RESET,
 -                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_gating.delay_ms = value;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
  }
  
 -/**
 - * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 - * @hba: per adapter instance
 - * @cnt: Interrupt aggregation counter threshold
 - * @tmout: Interrupt aggregation timeout value
 - */
 -static inline void
 -ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 +static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 -                    INT_AGGR_COUNTER_THLD_VAL(cnt) |
 -                    INT_AGGR_TIMEOUT_VAL(tmout),
 -                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n",
 +                      hba->clk_gating.delay_ms_pwr_save);
  }
  
 -/**
 - * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 - * @hba: per adapter instance
 - */
 -static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      hba->clk_gating.delay_ms_pwr_save = value;
 +      if (ufshcd_is_clkscaling_supported(hba) &&
 +          !hba->clk_scaling.is_scaled_up)
 +              hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
  }
  
 -/**
 - * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 - *                    When run-stop registers are set to 1, it indicates the
 - *                    host controller that it can process the requests
 - * @hba: per adapter instance
 - */
 -static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 -                    REG_UTP_TASK_REQ_LIST_RUN_STOP);
 -      ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 -                    REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
  }
  
 -/**
 - * ufshcd_hba_start - Start controller initialization sequence
 - * @hba: per adapter instance
 - */
 -static inline void ufshcd_hba_start(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      hba->clk_gating.delay_ms_perf = value;
 +      if (ufshcd_is_clkscaling_supported(hba) &&
 +          hba->clk_scaling.is_scaled_up)
 +              hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
  }
  
 -/**
 - * ufshcd_is_hba_active - Get controller state
 - * @hba: per adapter instance
 - *
 - * Returns zero if controller is active, 1 otherwise
 - */
 -static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
  }
  
 -static void ufshcd_ungate_work(struct work_struct *work)
 +static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      int ret;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags;
 -      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 -                      clk_gating.ungate_work);
 +      u32 value;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
  
 -      cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 +      value = !!value;
 +      if (value == hba->clk_gating.is_enabled)
 +              goto out;
  
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.state == CLKS_ON) {
 +      if (value) {
 +              ufshcd_release(hba, false);
 +      } else {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->clk_gating.active_reqs++;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              goto unblock_reqs;
        }
  
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      ufshcd_setup_clocks(hba, true);
 +      hba->clk_gating.is_enabled = value;
 +out:
 +      return count;
 +}
  
 -      /* Exit from hibern8 */
 -      if (ufshcd_can_hibern8_during_gating(hba)) {
 -              /* Prevent gating in this path */
 -              hba->clk_gating.is_suspended = true;
 -              if (ufshcd_is_link_hibern8(hba)) {
 -                      ret = ufshcd_uic_hibern8_exit(hba);
 -                      if (ret)
 -                              dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 -                                      __func__, ret);
 -                      else
 -                              ufshcd_set_link_active(hba);
 -              }
 -              hba->clk_gating.is_suspended = false;
 +static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
 +                                      struct hrtimer *timer)
 +{
 +      struct ufs_hba *hba = container_of(timer, struct ufs_hba,
 +                                         clk_gating.gate_hrtimer);
 +
 +      queue_work(hba->clk_gating.clk_gating_workq,
 +                              &hba->clk_gating.gate_work);
 +
 +      return HRTIMER_NORESTART;
 +}
 +
 +static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 +{
 +      struct ufs_clk_gating *gating = &hba->clk_gating;
 +      char wq_name[sizeof("ufs_clk_gating_00")];
 +
 +      hba->clk_gating.state = CLKS_ON;
 +
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +
 +      /*
 +       * Disable hibern8 during clk gating if
 +       * auto hibern8 is supported
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba))
 +              hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 +
 +      INIT_WORK(&gating->gate_work, ufshcd_gate_work);
 +      INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
 +      /*
 +       * Clock gating work must be executed only after auto hibern8
 +       * timeout has expired in the hardware or after aggressive
 +       * hibern8 on idle software timeout. Using jiffy based low
 +       * resolution delayed work is not reliable to guarantee this,
 +       * hence use a high resolution timer to make sure we schedule
 +       * the gate work precisely more than hibern8 timeout.
 +       *
 +       * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
 +       */
 +      hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +      gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
 +
 +      snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
 +                      hba->host->host_no);
 +      hba->clk_gating.clk_gating_workq =
 +              create_singlethread_workqueue(wq_name);
 +
 +      gating->is_enabled = true;
 +
 +      gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
 +      gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
 +
 +      /* start with performance mode */
 +      gating->delay_ms = gating->delay_ms_perf;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              goto scaling_not_supported;
 +
 +      gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
 +      gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
 +      sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
 +      gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
 +      gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
 +
 +      gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
 +      gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
 +      sysfs_attr_init(&gating->delay_perf_attr.attr);
 +      gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
 +      gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->delay_perf_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
 +
 +      goto add_clkgate_enable;
 +
 +scaling_not_supported:
 +      hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 +      hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 +      sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
 +      hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
 +      hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
 +
 +add_clkgate_enable:
 +      gating->enable_attr.show = ufshcd_clkgate_enable_show;
 +      gating->enable_attr.store = ufshcd_clkgate_enable_store;
 +      sysfs_attr_init(&gating->enable_attr.attr);
 +      gating->enable_attr.attr.name = "clkgate_enable";
 +      gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
 +}
 +
 +static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 +{
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev,
 +                                 &hba->clk_gating.delay_pwr_save_attr);
 +              device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
 +      } else {
 +              device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
        }
 -unblock_reqs:
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 -      scsi_unblock_requests(hba->host);
 +      device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
 +      ufshcd_cancel_gate_work(hba);
 +      cancel_work_sync(&hba->clk_gating.ungate_work);
 +      destroy_workqueue(hba->clk_gating.clk_gating_workq);
 +}
 +
 +static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
 +{
 +      ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
 +                       AUTO_HIBERN8_IDLE_TIMER_MASK,
 +                      AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
 +                      REG_AUTO_HIBERN8_IDLE_TIMER);
 +      /* Make sure the timer gets applied before further operations */
 +      mb();
  }
  
  /**
 - * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
 - * Also, exit from hibern8 mode and set the link as active.
 + * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
 + *
   * @hba: per adapter instance
 - * @async: This indicates whether caller should ungate clocks asynchronously.
 + * @async: This indicates whether caller wants to exit hibern8 asynchronously.
 + *
 + * Exit from hibern8 mode and set the link as active.
 + *
 + * Return 0 on success, non-zero on failure.
   */
 -int ufshcd_hold(struct ufs_hba *hba, bool async)
 +static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
  {
        int rc = 0;
        unsigned long flags;
  
 -      if (!ufshcd_is_clkgating_allowed(hba))
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba))
                goto out;
 +
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->clk_gating.active_reqs++;
 +      hba->hibern8_on_idle.active_reqs++;
 +
 +      if (ufshcd_eh_in_progress(hba)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return 0;
 +      }
  
  start:
 -      switch (hba->clk_gating.state) {
 -      case CLKS_ON:
 -              /*
 -               * Wait for the ungate work to complete if in progress.
 -               * Though the clocks may be in ON state, the link could
 -               * still be in hibner8 state if hibern8 is allowed
 -               * during clock gating.
 -               * Make sure we exit hibern8 state also in addition to
 -               * clocks being ON.
 -               */
 -              if (ufshcd_can_hibern8_during_gating(hba) &&
 -                  ufshcd_is_link_hibern8(hba)) {
 -                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -                      flush_work(&hba->clk_gating.ungate_work);
 -                      spin_lock_irqsave(hba->host->host_lock, flags);
 -                      goto start;
 -              }
 +      switch (hba->hibern8_on_idle.state) {
 +      case HIBERN8_EXITED:
                break;
 -      case REQ_CLKS_OFF:
 -              if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 -                      hba->clk_gating.state = CLKS_ON;
 +      case REQ_HIBERN8_ENTER:
 +              if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +                      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                              hba->hibern8_on_idle.state);
                        break;
                }
                /*
 -               * If we here, it means gating work is either done or
 -               * currently running. Hence, fall through to cancel gating
 -               * work and to enable clocks.
 +               * If we here, it means Hibern8 enter work is either done or
 +               * currently running. Hence, fall through to cancel hibern8
 +               * work and exit hibern8.
                 */
 -      case CLKS_OFF:
 -              scsi_block_requests(hba->host);
 -              hba->clk_gating.state = REQ_CLKS_ON;
 -              schedule_work(&hba->clk_gating.ungate_work);
 +      case HIBERN8_ENTERED:
 +              __ufshcd_scsi_block_requests(hba);
 +              hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +              schedule_work(&hba->hibern8_on_idle.exit_work);
                /*
                 * fall through to check if we should wait for this
                 * work to be done or not.
                 */
 -      case REQ_CLKS_ON:
 +      case REQ_HIBERN8_EXIT:
                if (async) {
                        rc = -EAGAIN;
 -                      hba->clk_gating.active_reqs--;
 +                      hba->hibern8_on_idle.active_reqs--;
                        break;
 +              } else {
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      flush_work(&hba->hibern8_on_idle.exit_work);
 +                      /* Make sure state is HIBERN8_EXITED before returning */
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      goto start;
                }
 -
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              flush_work(&hba->clk_gating.ungate_work);
 -              /* Make sure state is CLKS_ON before returning */
 -              spin_lock_irqsave(hba->host->host_lock, flags);
 -              goto start;
        default:
 -              dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 -                              __func__, hba->clk_gating.state);
 +              dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
 +                              __func__, hba->hibern8_on_idle.state);
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
        return rc;
  }
 -EXPORT_SYMBOL_GPL(ufshcd_hold);
  
 -static void ufshcd_gate_work(struct work_struct *work)
 +/* host lock must be held before calling this variant */
 +static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long delay_in_jiffies;
 +
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba))
 +              return;
 +
 +      hba->hibern8_on_idle.active_reqs--;
 +      BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
 +
 +      if (hba->hibern8_on_idle.active_reqs
 +              || hba->hibern8_on_idle.is_suspended
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done
 +              || ufshcd_eh_in_progress(hba) || no_sched)
 +              return;
 +
 +      hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
 +      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +              hba->hibern8_on_idle.state);
 +      /*
 +       * Scheduling the delayed work after 1 jiffies will make the work to
 +       * get schedule any time from 0ms to 1000/HZ ms which is not desirable
 +       * for hibern8 enter work as it may impact the performance if it gets
 +       * scheduled almost immediately. Hence make sure that hibern8 enter
 +       * work gets scheduled atleast after 2 jiffies (any time between
 +       * 1000/HZ ms to 2000/HZ ms).
 +       */
 +      delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
 +      if (delay_in_jiffies == 1)
 +              delay_in_jiffies++;
 +
 +      schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
 +                            delay_in_jiffies);
 +}
 +
 +static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_hibern8_release(hba, no_sched);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_hibern8_enter_work(struct work_struct *work)
  {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
 -                      clk_gating.gate_work.work);
 +                                         hibern8_on_idle.enter_work.work);
        unsigned long flags;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.is_suspended) {
 -              hba->clk_gating.state = CLKS_ON;
 +      if (hba->hibern8_on_idle.is_suspended) {
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
                goto rel_lock;
        }
  
 -      if (hba->clk_gating.active_reqs
 +      if (hba->hibern8_on_idle.active_reqs
                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
                || hba->lrb_in_use || hba->outstanding_tasks
                || hba->active_uic_cmd || hba->uic_async_done)
  
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 -      /* put the link into hibern8 mode before turning off clocks */
 -      if (ufshcd_can_hibern8_during_gating(hba)) {
 -              if (ufshcd_uic_hibern8_enter(hba)) {
 -                      hba->clk_gating.state = CLKS_ON;
 -                      goto out;
 -              }
 -              ufshcd_set_link_hibern8(hba);
 -      }
 -
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 +      if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
 +              /* Enter failed */
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +              goto out;
        }
 -
 -      if (!ufshcd_is_link_active(hba))
 -              ufshcd_setup_clocks(hba, false);
 -      else
 -              /* If link is active, device ref_clk can't be switched off */
 -              __ufshcd_setup_clocks(hba, false, true);
 +      ufshcd_set_link_hibern8(hba);
  
        /*
 -       * In case you are here to cancel this work the gating state
 -       * would be marked as REQ_CLKS_ON. In this case keep the state
 -       * as REQ_CLKS_ON which would anyway imply that clocks are off
 -       * and a request to turn them on is pending. By doing this way,
 +       * In case you are here to cancel this work the hibern8_on_idle.state
 +       * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
 +       * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
 +       * and a request to exit from it is pending. By doing this way,
         * we keep the state machine in tact and this would ultimately
         * prevent from doing cancel work multiple times when there are
         * new requests arriving before the current cancel work is done.
         */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.state == REQ_CLKS_OFF)
 -              hba->clk_gating.state = CLKS_OFF;
 -
 +      if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +      }
  rel_lock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
        return;
  }
  
 -/* host lock must be held before calling this variant */
 -static void __ufshcd_release(struct ufs_hba *hba)
 +static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
 +                                          unsigned long delay_ms)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 -              return;
 -
 -      hba->clk_gating.active_reqs--;
 -
 -      if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 -              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 -              || hba->lrb_in_use || hba->outstanding_tasks
 -              || hba->active_uic_cmd || hba->uic_async_done)
 -              return;
 -
 -      hba->clk_gating.state = REQ_CLKS_OFF;
 -      schedule_delayed_work(&hba->clk_gating.gate_work,
 -                      msecs_to_jiffies(hba->clk_gating.delay_ms));
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold_all(hba);
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      /* wait for all the outstanding requests to finish */
 +      ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      ufshcd_set_auto_hibern8_timer(hba, delay_ms);
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
 +      ufshcd_release_all(hba);
 +      pm_runtime_put_sync(hba->dev);
  }
  
 -void ufshcd_release(struct ufs_hba *hba)
 +static void ufshcd_hibern8_exit_work(struct work_struct *work)
  {
 +      int ret;
        unsigned long flags;
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         hibern8_on_idle.exit_work);
 +
 +      cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      __ufshcd_release(hba);
 +      if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
 +           || ufshcd_is_link_active(hba)) {
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              goto unblock_reqs;
 +      }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /* Exit from hibern8 */
 +      if (ufshcd_is_link_hibern8(hba)) {
 +              hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
 +              ufshcd_hold(hba, false);
 +              ret = ufshcd_uic_hibern8_exit(hba);
 +              hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
 +              ufshcd_release(hba, false);
 +              if (!ret) {
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      ufshcd_set_link_active(hba);
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +                      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                              hba->hibern8_on_idle.state);
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              }
 +      }
 +unblock_reqs:
 +      ufshcd_scsi_unblock_requests(hba);
  }
 -EXPORT_SYMBOL_GPL(ufshcd_release);
  
 -static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 +static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
        struct ufs_hba *hba = dev_get_drvdata(dev);
  
 -      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
  }
  
 -static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
 +static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
  {
        struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags, value;
 +      bool change = true;
  
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->clk_gating.delay_ms = value;
 +      if (hba->hibern8_on_idle.delay_ms == value)
 +              change = false;
 +
 +      if (value >= hba->clk_gating.delay_ms_pwr_save ||
 +          value >= hba->clk_gating.delay_ms_perf) {
 +              dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
 +                      value, hba->clk_gating.delay_ms_pwr_save,
 +                      hba->clk_gating.delay_ms_perf);
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return -EINVAL;
 +      }
 +
 +      hba->hibern8_on_idle.delay_ms = value;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /* Update auto hibern8 timer value if supported */
 +      if (change && ufshcd_is_auto_hibern8_supported(hba) &&
 +          hba->hibern8_on_idle.is_enabled)
 +              __ufshcd_set_auto_hibern8_timer(hba,
 +                                              hba->hibern8_on_idle.delay_ms);
 +
        return count;
  }
  
 -static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 +static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 -              return;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
  
 -      hba->clk_gating.delay_ms = 150;
 -      INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
 -      INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 +      return snprintf(buf, PAGE_SIZE, "%d\n",
 +                      hba->hibern8_on_idle.is_enabled);
 +}
  
 -      hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 -      hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 -      sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
 -      hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
 -      hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 -      if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 -              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
 +static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags;
 +      u32 value;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->hibern8_on_idle.is_enabled)
 +              goto out;
 +
 +      /* Update auto hibern8 timer value if supported */
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              __ufshcd_set_auto_hibern8_timer(hba,
 +                      value ? hba->hibern8_on_idle.delay_ms : value);
 +              goto update;
 +      }
 +
 +      if (value) {
 +              /*
 +               * As clock gating work would wait for the hibern8 enter work
 +               * to finish, clocks would remain on during hibern8 enter work.
 +               */
 +              ufshcd_hold(hba, false);
 +              ufshcd_release_all(hba);
 +      } else {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->hibern8_on_idle.active_reqs++;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      }
 +
 +update:
 +      hba->hibern8_on_idle.is_enabled = value;
 +out:
 +      return count;
  }
  
 -static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 +static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 +      /* initialize the state variable here */
 +      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          !ufshcd_is_auto_hibern8_supported(hba))
                return;
 -      device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
 -      cancel_work_sync(&hba->clk_gating.ungate_work);
 -      cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 +
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              hba->hibern8_on_idle.delay_ms = 1;
 +              hba->hibern8_on_idle.state = AUTO_HIBERN8;
 +              /*
 +               * Disable SW hibern8 enter on idle in case
 +               * auto hibern8 is supported
 +               */
 +              hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
 +      } else {
 +              hba->hibern8_on_idle.delay_ms = 10;
 +              INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
 +                                ufshcd_hibern8_enter_work);
 +              INIT_WORK(&hba->hibern8_on_idle.exit_work,
 +                        ufshcd_hibern8_exit_work);
 +      }
 +
 +      hba->hibern8_on_idle.is_enabled = true;
 +
 +      hba->hibern8_on_idle.delay_attr.show =
 +                                      ufshcd_hibern8_on_idle_delay_show;
 +      hba->hibern8_on_idle.delay_attr.store =
 +                                      ufshcd_hibern8_on_idle_delay_store;
 +      sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
 +      hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
 +      hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
 +
 +      hba->hibern8_on_idle.enable_attr.show =
 +                                      ufshcd_hibern8_on_idle_enable_show;
 +      hba->hibern8_on_idle.enable_attr.store =
 +                                      ufshcd_hibern8_on_idle_enable_store;
 +      sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
 +      hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
 +      hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
 +}
 +
 +static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
 +{
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          !ufshcd_is_auto_hibern8_supported(hba))
 +              return;
 +      device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
 +      device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
 +}
 +
 +static void ufshcd_hold_all(struct ufs_hba *hba)
 +{
 +      ufshcd_hold(hba, false);
 +      ufshcd_hibern8_hold(hba, false);
 +}
 +
 +static void ufshcd_release_all(struct ufs_hba *hba)
 +{
 +      ufshcd_hibern8_release(hba, false);
 +      ufshcd_release(hba, false);
  }
  
  /* Must be called with host lock acquired */
  static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
  {
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      bool queue_resume_work = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return;
  
 +      if (!hba->clk_scaling.active_reqs++)
 +              queue_resume_work = true;
 +
 +      if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
 +              return;
 +
 +      if (queue_resume_work)
 +              queue_work(hba->clk_scaling.workq,
 +                         &hba->clk_scaling.resume_work);
 +
 +      if (!hba->clk_scaling.window_start_t) {
 +              hba->clk_scaling.window_start_t = jiffies;
 +              hba->clk_scaling.tot_busy_t = 0;
 +              hba->clk_scaling.is_busy_started = false;
 +      }
 +
        if (!hba->clk_scaling.is_busy_started) {
                hba->clk_scaling.busy_start_t = ktime_get();
                hba->clk_scaling.is_busy_started = true;
@@@ -2315,7 -797,7 +2315,7 @@@ static void ufshcd_clk_scaling_update_b
  {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return;
  
        if (!hba->outstanding_reqs && scaling->is_busy_started) {
                scaling->is_busy_started = false;
        }
  }
 +
  /**
   * ufshcd_send_command - Send SCSI or device management commands
   * @hba: per adapter instance
   * @task_tag: Task tag of the command
   */
  static inline
 -void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 +int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  {
 +      int ret = 0;
 +
 +      hba->lrb[task_tag].issue_time_stamp = ktime_get();
 +      hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      /* Make sure that doorbell is committed immediately */
 +      wmb();
 +      ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
 +      ufshcd_update_tag_stats(hba, task_tag);
 +      return ret;
  }
  
  /**
@@@ -2364,7 -836,7 +2364,7 @@@ static inline void ufshcd_copy_sense_da
  
                memcpy(lrbp->sense_buffer,
                        lrbp->ucd_rsp_ptr->sr.sense_data,
 -                      min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
 +                      min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
        }
  }
  
@@@ -2461,7 -933,6 +2461,7 @@@ ufshcd_dispatch_uic_cmd(struct ufs_hba 
  
        hba->active_uic_cmd = uic_cmd;
  
 +      ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
        /* Write Args */
        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@@ -2492,11 -963,6 +2492,11 @@@ ufshcd_wait_for_uic_cmd(struct ufs_hba 
        else
                ret = -ETIMEDOUT;
  
 +      if (ret)
 +              ufsdbg_set_err_state(hba);
 +
 +      ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
 +
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->active_uic_cmd = NULL;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
   * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
   * @hba: per adapter instance
   * @uic_cmd: UIC command
 + * @completion: initialize the completion only if this is set to true
   *
   * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
   * with mutex held and host_lock locked.
   * Returns 0 only if success.
   */
  static int
 -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
 +                    bool completion)
  {
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
                return -EIO;
        }
  
 -      init_completion(&uic_cmd->done);
 +      if (completion)
 +              init_completion(&uic_cmd->done);
  
        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  
@@@ -2545,25 -1008,19 +2545,25 @@@ ufshcd_send_uic_cmd(struct ufs_hba *hba
        int ret;
        unsigned long flags;
  
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->uic_cmd_mutex);
        ufshcd_add_delay_before_dme_cmd(hba);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
 +      ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (!ret)
                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  
 +      ufshcd_save_tstamp_of_last_dme_cmd(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 +      ufshcd_release_all(hba);
 +      hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_UIC, 0, &ret);
  
 -      ufshcd_release(hba);
        return ret;
  }
  
@@@ -2599,7 -1056,6 +2599,7 @@@ static int ufshcd_map_sg(struct ufshcd_
                                cpu_to_le32(lower_32_bits(sg->dma_address));
                        prd_table[i].upper_addr =
                                cpu_to_le32(upper_32_bits(sg->dma_address));
 +                      prd_table[i].reserved = 0;
                }
        } else {
                lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@@ -2650,52 -1106,15 +2650,52 @@@ static void ufshcd_disable_intr(struct 
        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  }
  
 +static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
 +              struct ufshcd_lrb *lrbp)
 +{
 +      struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
 +      u8 cc_index = 0;
 +      bool enable = false;
 +      u64 dun = 0;
 +      int ret;
 +
 +      /*
 +       * Call vendor specific code to get crypto info for this request:
 +       * enable, crypto config. index, DUN.
 +       * If bypass is set, don't bother setting the other fields.
 +       */
 +      ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
 +      if (ret) {
 +              if (ret != -EAGAIN) {
 +                      dev_err(hba->dev,
 +                              "%s: failed to setup crypto request (%d)\n",
 +                              __func__, ret);
 +              }
 +
 +              return ret;
 +      }
 +
 +      if (!enable)
 +              goto out;
 +
 +      req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
 +      req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
 +      req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
 +out:
 +      return 0;
 +}
 +
  /**
   * ufshcd_prepare_req_desc_hdr() - Fills the requests header
   * descriptor according to request
 + * @hba: per adapter instance
   * @lrbp: pointer to local reference block
   * @upiu_flags: flags required in the header
   * @cmd_dir: requests data direction
   */
 -static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 -              u32 *upiu_flags, enum dma_data_direction cmd_dir)
 +static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
 +      struct ufshcd_lrb *lrbp, u32 *upiu_flags,
 +      enum dma_data_direction cmd_dir)
  {
        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
        u32 data_direction;
  
        /* Transfer request descriptor header fields */
        req_desc->header.dword_0 = cpu_to_le32(dword_0);
 -
 +      /* dword_1 is reserved, hence it is set to 0 */
 +      req_desc->header.dword_1 = 0;
        /*
         * assigning invalid value for command status. Controller
         * updates OCS on command completion, with the command
         */
        req_desc->header.dword_2 =
                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
 +      /* dword_3 is reserved, hence it is set to 0 */
 +      req_desc->header.dword_3 = 0;
 +
 +      req_desc->prd_table_length = 0;
 +
 +      if (ufshcd_is_crypto_supported(hba))
 +              return ufshcd_prepare_crypto_utrd(hba, lrbp);
 +
 +      return 0;
  }
  
  /**
@@@ -2749,7 -1158,6 +2749,7 @@@ stati
  void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
  {
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
 +      unsigned short cdb_len;
  
        /* command descriptor fields */
        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
        ucd_req_ptr->sc.exp_data_transfer_len =
                cpu_to_be32(lrbp->cmd->sdb.length);
  
 -      memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
 -              (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
 +      cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
 +      memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
 +      if (cdb_len < MAX_CDB_SIZE)
 +              memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
 +                     (MAX_CDB_SIZE - cdb_len));
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  /**
@@@ -2806,7 -1210,6 +2806,7 @@@ static void ufshcd_prepare_utp_query_re
        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
                memcpy(descp, query->descriptor, len);
  
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
        ucd_req_ptr->header.dword_0 =
                UPIU_HEADER_DWORD(
                        UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
 +      /* clear rest of the fields of basic header */
 +      ucd_req_ptr->header.dword_1 = 0;
 +      ucd_req_ptr->header.dword_2 = 0;
 +
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  /**
@@@ -2839,16 -1237,15 +2839,16 @@@ static int ufshcd_compose_upiu(struct u
        switch (lrbp->command_type) {
        case UTP_CMD_TYPE_SCSI:
                if (likely(lrbp->cmd)) {
 -                      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
 -                                      lrbp->cmd->sc_data_direction);
 +                      ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
 +                              &upiu_flags, lrbp->cmd->sc_data_direction);
                        ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
                } else {
                        ret = -EINVAL;
                }
                break;
        case UTP_CMD_TYPE_DEV_MANAGE:
 -              ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
 +              ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
 +                      DMA_NONE);
                if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
                        ufshcd_prepare_utp_query_req_upiu(
                                        hba, lrbp, upiu_flags);
@@@ -2885,73 -1282,18 +2885,73 @@@ static inline u8 ufshcd_scsi_to_upiu_lu
                return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
                        | UFS_UPIU_WLUN_ID;
        else
 -              return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
 +              return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
 +}
 +
 +/**
 + * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
 + * @scsi_lun: UPIU W-LUN id
 + *
 + * Returns SCSI W-LUN id
 + */
 +static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
 +{
 +      return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
 +}
 +
 +/**
 + * ufshcd_get_write_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Lock is predominantly held by shutdown context thus, ensuring
 + * that no requests from any other context may sneak through.
 + */
 +static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
 +{
 +      down_write(&hba->lock);
 +}
 +
 +/**
 + * ufshcd_get_read_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Returns 1 if acquired, < 0 on contention
 + *
 + * After shutdown's initiated, allow requests only directed to the
 + * well known device lun. The sync between scaling & issue is maintained
 + * as is and this restructuring syncs shutdown with these too.
 + */
 +static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
 +{
 +      int err = 0;
 +
 +      err = down_read_trylock(&hba->lock);
 +      if (err > 0)
 +              goto out;
 +      /* let requests for well known device lun to go through */
 +      if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
 +              return 0;
 +      else if (!ufshcd_is_shutdown_ongoing(hba))
 +              return -EAGAIN;
 +      else
 +              return -EPERM;
 +
 +out:
 +      return err;
  }
  
  /**
 - * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
 - * @scsi_lun: UPIU W-LUN id
 + * ufshcd_put_read_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
   *
 - * Returns SCSI W-LUN id
 + * Returns none
   */
 -static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
 +static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
  {
 -      return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
 +      up_read(&hba->lock);
  }
  
  /**
@@@ -2968,42 -1310,12 +2968,42 @@@ static int ufshcd_queuecommand(struct S
        unsigned long flags;
        int tag;
        int err = 0;
 +      bool has_read_lock = false;
  
        hba = shost_priv(host);
  
 +      if (!cmd || !cmd->request || !hba)
 +              return -EINVAL;
 +
        tag = cmd->request->tag;
 +      if (!ufshcd_valid_tag(hba, tag)) {
 +              dev_err(hba->dev,
 +                      "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
 +                      __func__, tag, cmd, cmd->request);
 +              BUG();
 +      }
 +
 +      err = ufshcd_get_read_lock(hba, cmd->device->lun);
 +      if (unlikely(err < 0)) {
 +              if (err == -EPERM) {
 +                      set_host_byte(cmd, DID_ERROR);
 +                      cmd->scsi_done(cmd);
 +                      return 0;
 +              }
 +              if (err == -EAGAIN)
 +                      return SCSI_MLQUEUE_HOST_BUSY;
 +      } else if (err == 1) {
 +              has_read_lock = true;
 +      }
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      /* if error handling is in progress, return host busy */
 +      if (ufshcd_eh_in_progress(hba)) {
 +              err = SCSI_MLQUEUE_HOST_BUSY;
 +              goto out_unlock;
 +      }
 +
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      hba->req_abort_count = 0;
 +
        /* acquire the tag to make sure device cmds don't use it */
        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
                /*
                goto out;
        }
  
 +      hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
        err = ufshcd_hold(hba, true);
        if (err) {
                err = SCSI_MLQUEUE_HOST_BUSY;
                clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
 +      if (ufshcd_is_clkgating_allowed(hba))
 +              WARN_ON(hba->clk_gating.state != CLKS_ON);
 +
 +      err = ufshcd_hibern8_hold(hba, true);
 +      if (err) {
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              err = SCSI_MLQUEUE_HOST_BUSY;
 +              hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
 +              ufshcd_release(hba, true);
 +              goto out;
 +      }
 +      if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +              WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
 +
 +      /* Vote PM QoS for the request */
 +      ufshcd_vops_pm_qos_req_start(hba, cmd->request);
  
        /* IO svc time latency histogram */
 -      if (hba != NULL && cmd->request != NULL) {
 -              if (hba->latency_hist_enabled &&
 -                  (cmd->request->cmd_type == REQ_TYPE_FS)) {
 -                      cmd->request->lat_hist_io_start = ktime_get();
 -                      cmd->request->lat_hist_enabled = 1;
 -              } else
 -                      cmd->request->lat_hist_enabled = 0;
 +      if (hba->latency_hist_enabled &&
 +          (cmd->request->cmd_type == REQ_TYPE_FS)) {
 +              cmd->request->lat_hist_io_start = ktime_get();
 +              cmd->request->lat_hist_enabled = 1;
 +      } else {
 +              cmd->request->lat_hist_enabled = 0;
        }
  
        WARN_ON(hba->clk_gating.state != CLKS_ON);
  
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
 -      lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
 +      lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
        lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
        lrbp->command_type = UTP_CMD_TYPE_SCSI;
 +      lrbp->req_abort_skip = false;
  
        /* form UPIU before issuing the command */
 -      ufshcd_compose_upiu(hba, lrbp);
 +      err = ufshcd_compose_upiu(hba, lrbp);
 +      if (err) {
 +              if (err != -EAGAIN)
 +                      dev_err(hba->dev,
 +                              "%s: failed to compose upiu %d\n",
 +                              __func__, err);
 +
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              goto out;
 +      }
 +
        err = ufshcd_map_sg(lrbp);
        if (err) {
                lrbp->cmd = NULL;
                clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              goto out;
 +      }
 +
 +      err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
 +      if (err) {
 +              if (err != -EAGAIN)
 +                      dev_err(hba->dev,
 +                              "%s: failed to configure crypto engine %d\n",
 +                              __func__, err);
 +
 +              scsi_dma_unmap(lrbp->cmd);
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +
                goto out;
        }
  
 +      /* Make sure descriptors are ready before ringing the doorbell */
 +      wmb();
        /* issue command to the controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_send_command(hba, tag);
 +
 +      err = ufshcd_send_command(hba, tag);
 +      if (err) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              scsi_dma_unmap(lrbp->cmd);
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
 +              dev_err(hba->dev, "%s: failed sending command, %d\n",
 +                                                      __func__, err);
 +              err = DID_ERROR;
 +              goto out;
 +      }
 +
  out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
 +      if (has_read_lock)
 +              ufshcd_put_read_lock(hba);
        return err;
  }
  
@@@ -3185,7 -1428,7 +3185,7 @@@ ufshcd_clear_cmd(struct ufs_hba *hba, i
         */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TRANSFER_REQ_DOOR_BELL,
 -                      mask, ~mask, 1000, 1000);
 +                      mask, ~mask, 1000, 1000, true);
  
        return err;
  }
@@@ -3212,7 -1455,6 +3212,7 @@@ ufshcd_dev_cmd_completion(struct ufs_hb
        int resp;
        int err = 0;
  
 +      hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  
        switch (resp) {
@@@ -3265,22 -1507,11 +3265,22 @@@ static int ufshcd_wait_for_dev_cmd(stru
  
        if (!time_left) {
                err = -ETIMEDOUT;
 +              dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
 +                      __func__, lrbp->task_tag);
                if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
 -                      /* sucessfully cleared the command, retry if needed */
 +                      /* successfully cleared the command, retry if needed */
                        err = -EAGAIN;
 +              /*
 +               * in case of an error, after clearing the doorbell,
 +               * we also need to clear the outstanding_request
 +               * field in hba
 +               */
 +              ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
        }
  
 +      if (err)
 +              ufsdbg_set_err_state(hba);
 +
        return err;
  }
  
@@@ -3341,15 -1572,6 +3341,15 @@@ static int ufshcd_exec_dev_cmd(struct u
        unsigned long flags;
  
        /*
 +       * May get invoked from shutdown and IOCTL contexts.
 +       * In shutdown context, it comes in with lock acquired.
 +       * In error recovery context, it may come with lock acquired.
 +       */
 +
 +      if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 +              down_read(&hba->lock);
 +
 +      /*
         * Get free slot, sleep if slots are unavailable.
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by SCSI request timeout.
  
        hba->dev_cmd.complete = &wait;
  
 +      /* Make sure descriptors are ready before ringing the doorbell */
 +      wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_send_command(hba, tag);
 +      err = ufshcd_send_command(hba, tag);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 -
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed sending command, %d\n",
 +                                                      __func__, err);
 +              goto out_put_tag;
 +      }
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
  
  out_put_tag:
        ufshcd_put_dev_cmd_tag(hba, tag);
        wake_up(&hba->dev_cmd.tag_wq);
 +      if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 +              up_read(&hba->lock);
        return err;
  }
  
@@@ -3399,12 -1613,6 +3399,12 @@@ static inline void ufshcd_init_query(st
                struct ufs_query_req **request, struct ufs_query_res **response,
                enum query_opcode opcode, u8 idn, u8 index, u8 selector)
  {
 +      int idn_t = (int)idn;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
 +      idn = idn_t;
 +
        *request = &hba->dev_cmd.query.request;
        *response = &hba->dev_cmd.query.response;
        memset(*request, 0, sizeof(struct ufs_query_req));
        (*request)->upiu_req.idn = idn;
        (*request)->upiu_req.index = index;
        (*request)->upiu_req.selector = selector;
 +
 +      ufshcd_update_query_stats(hba, opcode, idn);
 +}
 +
 +static int ufshcd_query_flag_retry(struct ufs_hba *hba,
 +      enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
 +{
 +      int ret;
 +      int retries;
 +
 +      for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
 +              ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
 +              if (ret)
 +                      dev_dbg(hba->dev,
 +                              "%s: failed with error %d, retries %d\n",
 +                              __func__, ret, retries);
 +              else
 +                      break;
 +      }
 +
 +      if (ret)
 +              dev_err(hba->dev,
 +                      "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
 +                      __func__, opcode, idn, ret, retries);
 +      return ret;
  }
  
  /**
   *
   * Returns 0 for success, non-zero in case of failure
   */
 -static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
                        enum flag_idn idn, bool *flag_res)
  {
        struct ufs_query_req *request = NULL;
        struct ufs_query_res *response = NULL;
        int err, index = 0, selector = 0;
 +      int timeout = QUERY_REQ_TIMEOUT;
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
                        selector);
                goto out_unlock;
        }
  
 -      err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 +      err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
  
        if (err) {
                dev_err(hba->dev,
                        "%s: Sending flag query for idn %d failed, err = %d\n",
 -                      __func__, idn, err);
 +                      __func__, request->upiu_req.idn, err);
                goto out_unlock;
        }
  
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_flag);
  
  /**
   * ufshcd_query_attr - API function for sending attribute requests
   *
   * Returns 0 for success, non-zero in case of failure
  */
 -static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                        enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  {
        struct ufs_query_req *request = NULL;
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        if (!attr_val) {
                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
                                __func__, opcode);
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
 -              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
 -                              __func__, opcode, idn, err);
 +              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
 +                              __func__, opcode,
 +                              request->upiu_req.idn, index, err);
                goto out_unlock;
        }
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
  out:
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_attr);
  
  /**
 - * ufshcd_query_descriptor - API function for sending descriptor requests
 - * hba: per-adapter instance
 - * opcode: attribute opcode
 - * idn: attribute idn to access
 - * index: index field
 - * selector: selector field
 - * desc_buf: the buffer that contains the descriptor
 - * buf_len: length parameter passed to the device
 + * ufshcd_query_attr_retry() - API function for sending query
 + * attribute with retries
 + * @hba: per-adapter instance
 + * @opcode: attribute opcode
 + * @idn: attribute idn to access
 + * @index: index field
 + * @selector: selector field
 + * @attr_val: the attribute value after the query request
 + * completes
   *
 - * Returns 0 for success, non-zero in case of failure.
 - * The buf_len parameter will contain, on return, the length parameter
 - * received on the response.
 - */
 -static int ufshcd_query_descriptor(struct ufs_hba *hba,
 + * Returns 0 for success, non-zero in case of failure
 +*/
 +static int ufshcd_query_attr_retry(struct ufs_hba *hba,
 +      enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
 +      u32 *attr_val)
 +{
 +      int ret = 0;
 +      u32 retries;
 +
 +       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 +              ret = ufshcd_query_attr(hba, opcode, idn, index,
 +                                              selector, attr_val);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
 +                              __func__, ret, retries);
 +              else
 +                      break;
 +      }
 +
 +      if (ret)
 +              dev_err(hba->dev,
 +                      "%s: query attribute, idn %d, failed with error %d after %d retires\n",
 +                      __func__, idn, ret, retries);
 +      return ret;
 +}
 +
 +static int __ufshcd_query_descriptor(struct ufs_hba *hba,
                        enum query_opcode opcode, enum desc_idn idn, u8 index,
                        u8 selector, u8 *desc_buf, int *buf_len)
  {
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        if (!desc_buf) {
                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
                                __func__, opcode);
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
 -              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
 -                              __func__, opcode, idn, err);
 +              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
 +                              __func__, opcode,
 +                              request->upiu_req.idn, index, err);
                goto out_unlock;
        }
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
  out:
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
 +      return err;
 +}
 +
 +/**
 + * ufshcd_query_descriptor - API function for sending descriptor requests
 + * hba: per-adapter instance
 + * opcode: attribute opcode
 + * idn: attribute idn to access
 + * index: index field
 + * selector: selector field
 + * desc_buf: the buffer that contains the descriptor
 + * buf_len: length parameter passed to the device
 + *
 + * Returns 0 for success, non-zero in case of failure.
 + * The buf_len parameter will contain, on return, the length parameter
 + * received on the response.
 + */
 +int ufshcd_query_descriptor(struct ufs_hba *hba,
 +                      enum query_opcode opcode, enum desc_idn idn, u8 index,
 +                      u8 selector, u8 *desc_buf, int *buf_len)
 +{
 +      int err;
 +      int retries;
 +
 +      for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 +              err = __ufshcd_query_descriptor(hba, opcode, idn, index,
 +                                              selector, desc_buf, buf_len);
 +              if (!err || err == -EINVAL)
 +                      break;
 +      }
 +
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_descriptor);
  
  /**
   * ufshcd_read_desc_param - read the specified descriptor parameter
@@@ -3754,41 -1877,18 +3754,41 @@@ static int ufshcd_read_desc_param(struc
                                      desc_id, desc_index, 0, desc_buf,
                                      &buff_len);
  
 -      if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
 -          (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
 -           ufs_query_desc_max_size[desc_id])
 -          || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
 -              dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
 -                      __func__, desc_id, param_offset, buff_len, ret);
 -              if (!ret)
 -                      ret = -EINVAL;
 +      if (ret) {
 +              dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
 +                      __func__, desc_id, desc_index, param_offset, ret);
  
                goto out;
        }
  
 +      /* Sanity check */
 +      if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
 +              dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
 +                      __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      /*
 +       * While reading variable size descriptors (like string descriptor),
 +       * some UFS devices may report the "LENGTH" (field in "Transaction
 +       * Specific fields" of Query Response UPIU) same as what was requested
 +       * in Query Request UPIU instead of reporting the actual size of the
 +       * variable size descriptor.
 +       * Although it's safe to ignore the "LENGTH" field for variable size
 +       * descriptors as we can always derive the length of the descriptor from
 +       * the descriptor header fields. Hence this change impose the length
 +       * match check only for fixed size descriptors (for which we always
 +       * request the correct size as part of Query Request UPIU).
 +       */
 +      if ((desc_id != QUERY_DESC_IDN_STRING) &&
 +          (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
 +              dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
 +                      __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
        if (is_kmalloc)
                memcpy(param_read_buf, &desc_buf[param_offset], param_size);
  out:
@@@ -3813,82 -1913,6 +3813,82 @@@ static inline int ufshcd_read_power_des
        return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
  }
  
 +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
 +{
 +      return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
 +}
 +
 +/**
 + * ufshcd_read_string_desc - read string descriptor
 + * @hba: pointer to adapter instance
 + * @desc_index: descriptor index
 + * @buf: pointer to buffer where descriptor would be read
 + * @size: size of buf
 + * @ascii: if true convert from unicode to ascii characters
 + *
 + * Return 0 in case of success, non-zero otherwise
 + */
 +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
 +                              u32 size, bool ascii)
 +{
 +      int err = 0;
 +
 +      err = ufshcd_read_desc(hba,
 +                              QUERY_DESC_IDN_STRING, desc_index, buf, size);
 +
 +      if (err) {
 +              dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
 +                      __func__, QUERY_REQ_RETRIES, err);
 +              goto out;
 +      }
 +
 +      if (ascii) {
 +              int desc_len;
 +              int ascii_len;
 +              int i;
 +              char *buff_ascii;
 +
 +              desc_len = buf[0];
 +              /* remove header and divide by 2 to move from UTF16 to UTF8 */
 +              ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
 +              if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
 +                      dev_err(hba->dev, "%s: buffer allocated size is too small\n",
 +                                      __func__);
 +                      err = -ENOMEM;
 +                      goto out;
 +              }
 +
 +              buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
 +              if (!buff_ascii) {
 +                      dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 +                                      __func__, ascii_len);
 +                      err = -ENOMEM;
 +                      goto out_free_buff;
 +              }
 +
 +              /*
 +               * the descriptor contains string in UTF16 format
 +               * we need to convert to utf-8 so it can be displayed
 +               */
 +              utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
 +                              desc_len - QUERY_DESC_HDR_SIZE,
 +                              UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
 +
 +              /* replace non-printable or non-ASCII characters with spaces */
 +              for (i = 0; i < ascii_len; i++)
 +                      ufshcd_remove_non_printable(&buff_ascii[i]);
 +
 +              memset(buf + QUERY_DESC_HDR_SIZE, 0,
 +                              size - QUERY_DESC_HDR_SIZE);
 +              memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
 +              buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
 +out_free_buff:
 +              kfree(buff_ascii);
 +      }
 +out:
 +      return err;
 +}
 +
  /**
   * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
   * @hba: Pointer to adapter instance
@@@ -3909,7 -1933,7 +3909,7 @@@ static inline int ufshcd_read_unit_desc
         * Unit descriptors are only available for general purpose LUs (LUN id
         * from 0 to 7) and RPMB Well known LU.
         */
 -      if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
 +      if (!ufs_is_valid_unit_desc_lun(lun))
                return -EOPNOTSUPP;
  
        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@@ -4051,19 -2075,12 +4051,19 @@@ static void ufshcd_host_memory_configur
                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  
                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
 +              hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
 +                              (i * sizeof(struct utp_transfer_req_desc));
                hba->lrb[i].ucd_req_ptr =
                        (struct utp_upiu_req *)(cmd_descp + i);
 +              hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
                hba->lrb[i].ucd_rsp_ptr =
                        (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
 +              hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
 +                              response_offset;
                hba->lrb[i].ucd_prdt_ptr =
                        (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
 +              hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
 +                              prdt_offset;
        }
  }
  
@@@ -4087,7 -2104,7 +4087,7 @@@ static int ufshcd_dme_link_startup(stru
  
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
 -              dev_err(hba->dev,
 +              dev_dbg(hba->dev,
                        "dme-link-startup: error code %d\n", ret);
        return ret;
  }
@@@ -4123,13 -2140,6 +4123,13 @@@ static inline void ufshcd_add_delay_bef
        usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
  }
  
 +static inline void ufshcd_save_tstamp_of_last_dme_cmd(
 +                      struct ufs_hba *hba)
 +{
 +      if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
 +              hba->last_dme_cmd_tstamp = ktime_get();
 +}
 +
  /**
   * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
   * @hba: per adapter instance
@@@ -4150,10 -2160,6 +4150,10 @@@ int ufshcd_dme_set_attr(struct ufs_hba 
        };
        const char *set = action[!!peer];
        int ret;
 +      int retries = UFS_UIC_COMMAND_RETRIES;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
  
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
        uic_cmd.argument3 = mib_val;
  
 -      ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +      do {
 +              /* for peer attributes we retry upon failure */
 +              ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 +                              set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 +      } while (ret && peer && --retries);
 +
        if (ret)
 -              dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 -                      set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 +              dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
 +                      set, UIC_GET_ATTR_ID(attr_sel), mib_val,
 +                      UFS_UIC_COMMAND_RETRIES - retries);
  
        return ret;
  }
@@@ -4197,7 -2195,6 +4197,7 @@@ int ufshcd_dme_get_attr(struct ufs_hba 
        };
        const char *get = action[!!peer];
        int ret;
 +      int retries = UFS_UIC_COMMAND_RETRIES;
        struct ufs_pa_layer_attr orig_pwr_info;
        struct ufs_pa_layer_attr temp_pwr_info;
        bool pwr_mode_change = false;
  
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
 +
        uic_cmd.argument1 = attr_sel;
  
 -      ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 -      if (ret) {
 -              dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
 -                      get, UIC_GET_ATTR_ID(attr_sel), ret);
 -              goto out;
 -      }
 +      do {
 +              /* for peer attributes we retry upon failure */
 +              ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
 +                              get, UIC_GET_ATTR_ID(attr_sel), ret);
 +      } while (ret && peer && --retries);
  
 -      if (mib_val)
 +      if (ret)
 +              dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
 +                      get, UIC_GET_ATTR_ID(attr_sel),
 +                      UFS_UIC_COMMAND_RETRIES - retries);
 +
 +      if (mib_val && !ret)
                *mib_val = uic_cmd.argument3;
  
        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@@ -4278,7 -2265,6 +4278,7 @@@ static int ufshcd_uic_pwr_ctrl(struct u
        unsigned long flags;
        u8 status;
        int ret;
 +      bool reenable_intr = false;
  
        mutex_lock(&hba->uic_cmd_mutex);
        init_completion(&uic_async_done);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->uic_async_done = &uic_async_done;
 -      ret = __ufshcd_send_uic_cmd(hba, cmd);
 +      if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
 +              ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
 +              /*
 +               * Make sure UIC command completion interrupt is disabled before
 +               * issuing UIC command.
 +               */
 +              wmb();
 +              reenable_intr = true;
 +      }
 +      ret = __ufshcd_send_uic_cmd(hba, cmd, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (ret) {
                dev_err(hba->dev,
                        cmd->command, cmd->argument3, ret);
                goto out;
        }
 -      ret = ufshcd_wait_for_uic_cmd(hba, cmd);
 -      if (ret) {
 +
 +      if (!wait_for_completion_timeout(hba->uic_async_done,
 +                                       msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
                dev_err(hba->dev,
 -                      "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
 -                      cmd->command, cmd->argument3, ret);
 +                      "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
 +                      cmd->command, cmd->argument3);
 +              ret = -ETIMEDOUT;
                goto out;
        }
  
 -      if (!wait_for_completion_timeout(hba->uic_async_done,
 -                                       msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
 -              dev_err(hba->dev,
 -                      "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
 -                      cmd->command, cmd->argument3);
 -              ret = -ETIMEDOUT;
 -              goto out;
 -      }
 +      status = ufshcd_get_upmcrs(hba);
 +      if (status != PWR_LOCAL) {
 +              dev_err(hba->dev,
 +                      "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
 +                      cmd->command, status);
 +              ret = (status != PWR_OK) ? status : -1;
 +      }
 +      ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
 +
 +out:
 +      if (ret) {
 +              ufsdbg_set_err_state(hba);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_cmd_log(hba);
 +      }
 +
 +      ufshcd_save_tstamp_of_last_dme_cmd(hba);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->active_uic_cmd = NULL;
 +      hba->uic_async_done = NULL;
 +      if (reenable_intr)
 +              ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      mutex_unlock(&hba->uic_cmd_mutex);
 +      return ret;
 +}
 +
 +int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
 +{
 +      unsigned long flags;
 +      int ret = 0;
 +      u32 tm_doorbell;
 +      u32 tr_doorbell;
 +      bool timeout = false, do_last_check = false;
 +      ktime_t start;
 +
 +      ufshcd_hold_all(hba);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * Wait for all the outstanding tasks/transfer requests.
 +       * Verify by checking the doorbell registers are clear.
 +       */
 +      start = ktime_get();
 +      do {
 +              if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
 +                      ret = -EBUSY;
 +                      goto out;
 +              }
 +
 +              tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 +              tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +              if (!tm_doorbell && !tr_doorbell) {
 +                      timeout = false;
 +                      break;
 +              } else if (do_last_check) {
 +                      break;
 +              }
  
 -      status = ufshcd_get_upmcrs(hba);
 -      if (status != PWR_LOCAL) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              schedule();
 +              if (ktime_to_us(ktime_sub(ktime_get(), start)) >
 +                  wait_timeout_us) {
 +                      timeout = true;
 +                      /*
 +                       * We might have scheduled out for long time so make
 +                       * sure to check if doorbells are cleared by this time
 +                       * or not.
 +                       */
 +                      do_last_check = true;
 +              }
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (tm_doorbell || tr_doorbell);
 +
 +      if (timeout) {
                dev_err(hba->dev,
 -                      "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
 -                      cmd->command, status);
 -              ret = (status != PWR_OK) ? status : -1;
 +                      "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
 +                      __func__, tm_doorbell, tr_doorbell);
 +              ret = -EBUSY;
        }
  out:
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->uic_async_done = NULL;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      mutex_unlock(&hba->uic_cmd_mutex);
 -
 +      ufshcd_release_all(hba);
        return ret;
  }
  
@@@ -4426,149 -2339,33 +4426,149 @@@ static int ufshcd_uic_change_pwr_mode(s
        uic_cmd.command = UIC_CMD_DME_SET;
        uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
        uic_cmd.argument3 = mode;
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
 +      ufshcd_hold_all(hba);
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 -      ufshcd_release(hba);
 -
 +      hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
 +      ufshcd_release_all(hba);
  out:
        return ret;
  }
  
 -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 +static int ufshcd_link_recovery(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      unsigned long flags;
 +
 +      /*
 +       * Check if there is any race with fatal error handling.
 +       * If so, wait for it to complete. Even though fatal error
 +       * handling does reset and restore in some cases, don't assume
 +       * anything out of it. We are just avoiding race here.
 +       */
 +      do {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              flush_work(&hba->eh_work);
 +      } while (1);
 +
 +
 +      /*
 +       * we don't know if previous reset had really reset the host controller
 +       * or not. So let's force reset here to be sure.
 +       */
 +      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +      hba->force_host_reset = true;
 +      schedule_work(&hba->eh_work);
 +
 +      /* wait for the reset work to finish */
 +      do {
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              flush_work(&hba->eh_work);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (1);
 +
 +      if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
 +            ufshcd_is_link_active(hba)))
 +              ret = -ENOLINK;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      return ret;
 +}
 +
 +static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  {
 +      int ret;
        struct uic_command uic_cmd = {0};
 +      ktime_t start = ktime_get();
  
        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 +      ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
 +                           ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 +
 +      /*
 +       * Do full reinit if enter failed or if LINERESET was detected during
 +       * Hibern8 operation. After LINERESET, link moves to default PWM-G1
 +       * mode hence full reinit is required to move link to HS speeds.
 +       */
 +      if (ret || hba->full_init_linereset) {
 +              int err;
 +
 +              hba->full_init_linereset = false;
 +              ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
 +              dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 +                      __func__, ret);
 +              /*
 +               * If link recovery fails then return error code (-ENOLINK)
 +               * returned ufshcd_link_recovery().
 +               * If link recovery succeeds then return -EAGAIN to attempt
 +               * hibern8 enter retry again.
 +               */
 +              err = ufshcd_link_recovery(hba);
 +              if (err) {
 +                      dev_err(hba->dev, "%s: link recovery failed", __func__);
 +                      ret = err;
 +              } else {
 +                      ret = -EAGAIN;
 +              }
 +      } else {
 +              dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
 +                      ktime_to_us(ktime_get()));
 +      }
 +
 +      return ret;
 +}
 +
 +int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 +{
 +      int ret = 0, retries;
  
 -      return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 +              ret = __ufshcd_uic_hibern8_enter(hba);
 +              if (!ret)
 +                      goto out;
 +              else if (ret != -EAGAIN)
 +                      /* Unable to recover the link, so no point proceeding */
 +                      BUG();
 +      }
 +out:
 +      return ret;
  }
  
 -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
  {
        struct uic_command uic_cmd = {0};
        int ret;
 +      ktime_t start = ktime_get();
  
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
 +                           ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 +
 +      /* Do full reinit if exit failed */
        if (ret) {
 -              ufshcd_set_link_off(hba);
 -              ret = ufshcd_host_reset_and_restore(hba);
 +              ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
 +              dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
 +                      __func__, ret);
 +              ret = ufshcd_link_recovery(hba);
 +              /* Unable to recover the link, so no point proceeding */
 +              if (ret)
 +                      BUG();
 +      } else {
 +              dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
 +                      ktime_to_us(ktime_get()));
 +              hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
 +              hba->ufs_stats.hibern8_exit_cnt++;
        }
  
        return ret;
@@@ -4601,8 -2398,8 +4601,8 @@@ static int ufshcd_get_max_pwr_mode(stru
        if (hba->max_pwr_info.is_valid)
                return 0;
  
 -      pwr_info->pwr_tx = FASTAUTO_MODE;
 -      pwr_info->pwr_rx = FASTAUTO_MODE;
 +      pwr_info->pwr_tx = FAST_MODE;
 +      pwr_info->pwr_rx = FAST_MODE;
        pwr_info->hs_rate = PA_HS_MODE_B;
  
        /* Get the connected lane count */
                                __func__, pwr_info->gear_rx);
                        return -EINVAL;
                }
 -              pwr_info->pwr_rx = SLOWAUTO_MODE;
 +              pwr_info->pwr_rx = SLOW_MODE;
        }
  
        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
                                __func__, pwr_info->gear_tx);
                        return -EINVAL;
                }
 -              pwr_info->pwr_tx = SLOWAUTO_MODE;
 +              pwr_info->pwr_tx = SLOW_MODE;
        }
  
        hba->max_pwr_info.is_valid = true;
        return 0;
  }
  
 -static int ufshcd_change_power_mode(struct ufs_hba *hba,
 +int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode)
  {
 -      int ret;
 +      int ret = 0;
  
        /* if already configured to the requested pwr_mode */
 -      if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 -          pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
 +      if (!hba->restore_needed &&
 +              pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 +              pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
                return 0;
        }
  
 +      ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
 +      if (ret)
 +              return ret;
 +
        /*
         * Configure attributes for power mode change with below.
         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
                                                pwr_mode->hs_rate);
  
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
 +                      DL_FC0ProtectionTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
 +                      DL_TC0ReplayTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
 +                      DL_AFC0ReqTimeOutVal_Default);
 +
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
 +                      DL_FC0ProtectionTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
 +                      DL_TC0ReplayTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
 +                      DL_AFC0ReqTimeOutVal_Default);
 +
        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
                        | pwr_mode->pwr_tx);
  
        if (ret) {
 +              ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
                dev_err(hba->dev,
                        "%s: power mode change failed %d\n", __func__, ret);
        } else {
  
                memcpy(&hba->pwr_info, pwr_mode,
                        sizeof(struct ufs_pa_layer_attr));
 +              hba->ufs_stats.power_mode_change_cnt++;
        }
  
        return ret;
@@@ -4757,8 -2533,6 +4757,8 @@@ static int ufshcd_config_pwr_mode(struc
                memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
  
        ret = ufshcd_change_power_mode(hba, &final_params);
 +      if (!ret)
 +              ufshcd_print_pwr_info(hba);
  
        return ret;
  }
   */
  static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  {
 -      int i, retries, err = 0;
 +      int i;
 +      int err;
        bool flag_res = 1;
  
 -      for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 -              /* Set the fDeviceInit flag */
 -              err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 -                                      QUERY_FLAG_IDN_FDEVICEINIT, NULL);
 -              if (!err || err == -ETIMEDOUT)
 -                      break;
 -              dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
 -      }
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 +              QUERY_FLAG_IDN_FDEVICEINIT, NULL);
        if (err) {
                dev_err(hba->dev,
                        "%s setting fDeviceInit flag failed with error %d\n",
                goto out;
        }
  
 -      /* poll for max. 100 iterations for fDeviceInit flag to clear */
 -      for (i = 0; i < 100 && !err && flag_res; i++) {
 -              for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 -                      err = ufshcd_query_flag(hba,
 -                                      UPIU_QUERY_OPCODE_READ_FLAG,
 -                                      QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
 -                      if (!err || err == -ETIMEDOUT)
 -                              break;
 -                      dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
 -                                      err);
 -              }
 -      }
 +      /* poll for max. 1000 iterations for fDeviceInit flag to clear */
 +      for (i = 0; i < 1000 && !err && flag_res; i++)
 +              err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 +                      QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
 +
        if (err)
                dev_err(hba->dev,
                        "%s reading fDeviceInit flag failed with error %d\n",
@@@ -4809,7 -2595,7 +4809,7 @@@ out
   * To bring UFS host controller to operational state,
   * 1. Enable required interrupts
   * 2. Configure interrupt aggregation
 - * 3. Program UTRL and UTMRL base addres
 + * 3. Program UTRL and UTMRL base address
   * 4. Configure run-stop-registers
   *
   * Returns 0 on success, non-zero value on failure
@@@ -4839,13 -2625,8 +4839,13 @@@ static int ufshcd_make_hba_operational(
                        REG_UTP_TASK_REQ_LIST_BASE_H);
  
        /*
 +       * Make sure base address and interrupt setup are updated before
 +       * enabling the run/stop registers below.
 +       */
 +      wmb();
 +
 +      /*
         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
 -       * DEI, HEI bits must be 0
         */
        reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
        if (!(ufshcd_get_lists_status(reg))) {
  }
  
  /**
 + * ufshcd_hba_stop - Send controller to reset state
 + * @hba: per adapter instance
 + * @can_sleep: perform sleep or just spin
 + */
 +static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
 +{
 +      int err;
 +
 +      ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
 +      err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
 +                                      CONTROLLER_ENABLE, CONTROLLER_DISABLE,
 +                                      10, 1, can_sleep);
 +      if (err)
 +              dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
 +}
 +
 +/**
   * ufshcd_hba_enable - initialize the controller
   * @hba: per adapter instance
   *
@@@ -4898,9 -2662,18 +4898,9 @@@ static int ufshcd_hba_enable(struct ufs
         * development and testing of this driver. msleep can be changed to
         * mdelay and retry count can be reduced based on the controller.
         */
 -      if (!ufshcd_is_hba_active(hba)) {
 -
 +      if (!ufshcd_is_hba_active(hba))
                /* change controller state to "reset state" */
 -              ufshcd_hba_stop(hba);
 -
 -              /*
 -               * This delay is based on the testing done with UFS host
 -               * controller FPGA. The delay can be changed based on the
 -               * host controller used.
 -               */
 -              msleep(5);
 -      }
 +              ufshcd_hba_stop(hba, true);
  
        /* UniPro link is disabled at this point */
        ufshcd_set_link_off(hba);
@@@ -4974,11 -2747,6 +4974,11 @@@ static int ufshcd_disable_tx_lcc(struc
        return err;
  }
  
 +static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
 +{
 +      return ufshcd_disable_tx_lcc(hba, false);
 +}
 +
  static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
  {
        return ufshcd_disable_tx_lcc(hba, true);
@@@ -4994,26 -2762,14 +4994,26 @@@ static int ufshcd_link_startup(struct u
  {
        int ret;
        int retries = DME_LINKSTARTUP_RETRIES;
 +      bool link_startup_again = false;
 +
 +      /*
 +       * If UFS device isn't active then we will have to issue link startup
 +       * 2 times to make sure the device state move to active.
 +       */
 +      if (!ufshcd_is_ufs_dev_active(hba))
 +              link_startup_again = true;
  
 +link_startup:
        do {
                ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
  
                ret = ufshcd_dme_link_startup(hba);
 +              if (ret)
 +                      ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
  
                /* check if device is detected by inter-connect layer */
                if (!ret && !ufshcd_is_device_present(hba)) {
 +                      ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
                        dev_err(hba->dev, "%s: Device not present\n", __func__);
                        ret = -ENXIO;
                        goto out;
                /* failed to get the link up... retire */
                goto out;
  
 +      if (link_startup_again) {
 +              link_startup_again = false;
 +              retries = DME_LINKSTARTUP_RETRIES;
 +              goto link_startup;
 +      }
 +
 +      /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
 +      ufshcd_init_pwr_info(hba);
 +      ufshcd_print_pwr_info(hba);
 +
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
                ret = ufshcd_disable_device_tx_lcc(hba);
                if (ret)
                        goto out;
        }
  
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
 +              ret = ufshcd_disable_host_tx_lcc(hba);
 +              if (ret)
 +                      goto out;
 +      }
 +
        /* Include any host controller configuration via UIC commands */
        ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
        if (ret)
  
        ret = ufshcd_make_hba_operational(hba);
  out:
 -      if (ret)
 +      if (ret) {
                dev_err(hba->dev, "link startup failed %d\n", ret);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_host_regs(hba);
 +      }
        return ret;
  }
  
@@@ -5085,7 -2821,7 +5085,7 @@@ static int ufshcd_verify_dev_init(struc
        int err = 0;
        int retries;
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
        }
        mutex_unlock(&hba->dev_cmd.lock);
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
  
        if (err)
                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@@ -5123,10 -2859,10 +5123,10 @@@ static void ufshcd_set_queue_depth(stru
  
        lun_qdepth = hba->nutrs;
        ret = ufshcd_read_unit_desc_param(hba,
 -                                        ufshcd_scsi_to_upiu_lun(sdev->lun),
 -                                        UNIT_DESC_PARAM_LU_Q_DEPTH,
 -                                        &lun_qdepth,
 -                                        sizeof(lun_qdepth));
 +                        ufshcd_scsi_to_upiu_lun(sdev->lun),
 +                        UNIT_DESC_PARAM_LU_Q_DEPTH,
 +                        &lun_qdepth,
 +                        sizeof(lun_qdepth));
  
        /* Some WLUN doesn't support unit descriptor */
        if (ret == -EOPNOTSUPP)
@@@ -5256,9 -2992,6 +5256,9 @@@ static int ufshcd_slave_configure(struc
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
        blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
  
 +      sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
 +      sdev->use_rpm_auto = 1;
 +
        return 0;
  }
  
@@@ -5368,7 -3101,6 +5368,7 @@@ ufshcd_transfer_rsp_status(struct ufs_h
        int result = 0;
        int scsi_status;
        int ocs;
 +      bool print_prdt;
  
        /* overall command status of utrd */
        ocs = ufshcd_get_tr_ocs(lrbp);
        switch (ocs) {
        case OCS_SUCCESS:
                result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 -
 +              hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
                switch (result) {
                case UPIU_TRANSACTION_RESPONSE:
                        /*
                        scsi_status = result & MASK_SCSI_STATUS;
                        result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  
 -                      if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
 +                      /*
 +                       * Currently we are only supporting BKOPs exception
 +                       * events hence we can ignore BKOPs exception event
 +                       * during power management callbacks. BKOPs exception
 +                       * event is not expected to be raised in runtime suspend
 +                       * callback as it allows the urgent bkops.
 +                       * During system suspend, we are anyway forcefully
 +                       * disabling the bkops and if urgent bkops is needed
 +                       * it will be enabled on system resume. Long term
 +                       * solution could be to abort the system suspend if
 +                       * UFS device needs urgent BKOPs.
 +                       */
 +                      if (!hba->pm_op_in_progress &&
 +                          ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
                                schedule_work(&hba->eeh_work);
                        break;
                case UPIU_TRANSACTION_REJECT_UPIU:
        case OCS_MISMATCH_RESP_UPIU_SIZE:
        case OCS_PEER_COMM_FAILURE:
        case OCS_FATAL_ERROR:
 +      case OCS_DEVICE_FATAL_ERROR:
 +      case OCS_INVALID_CRYPTO_CONFIG:
 +      case OCS_GENERAL_CRYPTO_ERROR:
        default:
                result |= DID_ERROR << 16;
                dev_err(hba->dev,
 -              "OCS error from controller = %x\n", ocs);
 +                              "OCS error from controller = %x for tag %d\n",
 +                              ocs, lrbp->task_tag);
 +              /*
 +               * This is called in interrupt context, hence avoid sleep
 +               * while printing debug registers. Also print only the minimum
 +               * debug registers needed to debug OCS failure.
 +               */
 +              __ufshcd_print_host_regs(hba, true);
 +              ufshcd_print_host_state(hba);
                break;
        } /* end of switch */
  
 +      if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
 +              print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
 +                      ocs == OCS_MISMATCH_DATA_BUF_SIZE);
 +              ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
 +      }
 +
 +      if ((host_byte(result) == DID_ERROR) ||
 +          (host_byte(result) == DID_ABORT))
 +              ufsdbg_set_err_state(hba);
 +
        return result;
  }
  
   * ufshcd_uic_cmd_compl - handle completion of uic command
   * @hba: per adapter instance
   * @intr_status: interrupt status generated by the controller
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
  {
 +      irqreturn_t retval = IRQ_NONE;
 +
        if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
                hba->active_uic_cmd->argument2 |=
                        ufshcd_get_uic_cmd_result(hba);
                hba->active_uic_cmd->argument3 =
                        ufshcd_get_dme_attr_val(hba);
                complete(&hba->active_uic_cmd->done);
 +              retval = IRQ_HANDLED;
        }
  
 -      if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
 -              complete(hba->uic_async_done);
 +      if (intr_status & UFSHCD_UIC_PWR_MASK) {
 +              if (hba->uic_async_done) {
 +                      complete(hba->uic_async_done);
 +                      retval = IRQ_HANDLED;
 +              } else if (ufshcd_is_auto_hibern8_supported(hba)) {
 +                      /*
 +                       * If uic_async_done flag is not set then this
 +                       * is an Auto hibern8 err interrupt.
 +                       * Perform a host reset followed by a full
 +                       * link recovery.
 +                       */
 +                      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +                      hba->force_host_reset = true;
 +                      dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
 +                              __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
 +                              "Enter" : "Exit",
 +                              intr_status, ufshcd_get_upmcrs(hba));
 +                      __ufshcd_print_host_regs(hba, true);
 +                      ufshcd_print_host_state(hba);
 +                      schedule_work(&hba->eh_work);
 +                      retval = IRQ_HANDLED;
 +              }
 +      }
 +      return retval;
  }
  
  /**
 - * ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
   * @hba: per adapter instance
 + * @result: error result to inform scsi layer about
   */
 -static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
 +void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
 +{
 +      u8 index;
 +      struct ufshcd_lrb *lrbp;
 +      struct scsi_cmnd *cmd;
 +
 +      if (!hba->outstanding_reqs)
 +              return;
 +
 +      for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
 +              lrbp = &hba->lrb[index];
 +              cmd = lrbp->cmd;
 +              if (cmd) {
 +                      ufshcd_cond_add_cmd_trace(hba, index, "failed");
 +                      ufshcd_update_error_stats(hba,
 +                                      UFS_ERR_INT_FATAL_ERRORS);
 +                      scsi_dma_unmap(cmd);
 +                      cmd->result = result;
 +                      /* Clear pending transfer requests */
 +                      ufshcd_clear_cmd(hba, index);
 +                      ufshcd_outstanding_req_clear(hba, index);
 +                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      lrbp->complete_time_stamp = ktime_get();
 +                      update_req_stats(hba, lrbp);
 +                      /* Mark completed command as NULL in LRB */
 +                      lrbp->cmd = NULL;
 +                      ufshcd_release_all(hba);
 +                      if (cmd->request) {
 +                              /*
 +                               * As we are accessing the "request" structure,
 +                               * this must be called before calling
 +                               * ->scsi_done() callback.
 +                               */
 +                              ufshcd_vops_pm_qos_req_end(hba, cmd->request,
 +                                      true);
 +                              ufshcd_vops_crypto_engine_cfg_end(hba,
 +                                              lrbp, cmd->request);
 +                      }
 +                      /* Do not touch lrbp after scsi done */
 +                      cmd->scsi_done(cmd);
 +              } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 +                      if (hba->dev_cmd.complete) {
 +                              ufshcd_cond_add_cmd_trace(hba, index,
 +                                                      "dev_failed");
 +                              ufshcd_outstanding_req_clear(hba, index);
 +                              complete(hba->dev_cmd.complete);
 +                      }
 +              }
 +              if (ufshcd_is_clkscaling_supported(hba))
 +                      hba->clk_scaling.active_reqs--;
 +      }
 +}
 +
 +/**
 + * __ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * @hba: per adapter instance
 + * @completed_reqs: requests to complete
 + */
 +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 +                                      unsigned long completed_reqs)
  {
        struct ufshcd_lrb *lrbp;
        struct scsi_cmnd *cmd;
 -      unsigned long completed_reqs;
 -      u32 tr_doorbell;
        int result;
        int index;
        struct request *req;
  
 -      /* Resetting interrupt aggregation counters first and reading the
 -       * DOOR_BELL afterward allows us to handle all the completed requests.
 -       * In order to prevent other interrupts starvation the DB is read once
 -       * after reset. The down side of this solution is the possibility of
 -       * false interrupt if device completes another request after resetting
 -       * aggregation and before reading the DB.
 -       */
 -      if (ufshcd_is_intr_aggr_allowed(hba))
 -              ufshcd_reset_intr_aggr(hba);
 -
 -      tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 -      completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
 -
        for_each_set_bit(index, &completed_reqs, hba->nutrs) {
                lrbp = &hba->lrb[index];
                cmd = lrbp->cmd;
                if (cmd) {
 +                      ufshcd_cond_add_cmd_trace(hba, index, "complete");
 +                      ufshcd_update_tag_stats_completion(hba, cmd);
                        result = ufshcd_transfer_rsp_status(hba, lrbp);
                        scsi_dma_unmap(cmd);
                        cmd->result = result;
 +                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      lrbp->complete_time_stamp = ktime_get();
 +                      update_req_stats(hba, lrbp);
                        /* Mark completed command as NULL in LRB */
                        lrbp->cmd = NULL;
 -                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
 +                      __ufshcd_release(hba, false);
 +                      __ufshcd_hibern8_release(hba, false);
 +                      if (cmd->request) {
 +                              /*
 +                               * As we are accessing the "request" structure,
 +                               * this must be called before calling
 +                               * ->scsi_done() callback.
 +                               */
 +                              ufshcd_vops_pm_qos_req_end(hba, cmd->request,
 +                                      false);
 +                              ufshcd_vops_crypto_engine_cfg_end(hba,
 +                                      lrbp, cmd->request);
 +                      }
 +
                        req = cmd->request;
                        if (req) {
                                /* Update IO svc time latency histogram */
                        }
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
 -                      __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 -                      if (hba->dev_cmd.complete)
 +                      if (hba->dev_cmd.complete) {
 +                              ufshcd_cond_add_cmd_trace(hba, index,
 +                                              "dcmp");
                                complete(hba->dev_cmd.complete);
 +                      }
                }
 +              if (ufshcd_is_clkscaling_supported(hba))
 +                      hba->clk_scaling.active_reqs--;
        }
  
        /* clear corresponding bits of completed commands */
  }
  
  /**
 + * ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * @hba: per adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
 + */
 +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
 +{
 +      unsigned long completed_reqs;
 +      u32 tr_doorbell;
 +
 +      /* Resetting interrupt aggregation counters first and reading the
 +       * DOOR_BELL afterward allows us to handle all the completed requests.
 +       * In order to prevent other interrupts starvation the DB is read once
 +       * after reset. The down side of this solution is the possibility of
 +       * false interrupt if device completes another request after resetting
 +       * aggregation and before reading the DB.
 +       */
 +      if (ufshcd_is_intr_aggr_allowed(hba))
 +              ufshcd_reset_intr_aggr(hba);
 +
 +      tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
 +
 +      if (completed_reqs) {
 +              __ufshcd_transfer_req_compl(hba, completed_reqs);
 +              return IRQ_HANDLED;
 +      } else {
 +              return IRQ_NONE;
 +      }
 +}
 +
 +/**
   * ufshcd_disable_ee - disable exception event
   * @hba: per-adapter instance
   * @mask: exception event to disable
@@@ -5706,7 -3273,7 +5706,7 @@@ static int ufshcd_disable_ee(struct ufs
  
        val = hba->ee_ctrl_mask & ~mask;
        val &= 0xFFFF; /* 2 bytes */
 -      err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +      err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask &= ~mask;
@@@ -5734,7 -3301,7 +5734,7 @@@ static int ufshcd_enable_ee(struct ufs_
  
        val = hba->ee_ctrl_mask | mask;
        val &= 0xFFFF; /* 2 bytes */
 -      err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +      err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask |= mask;
@@@ -5760,7 -3327,7 +5760,7 @@@ static int ufshcd_enable_auto_bkops(str
        if (hba->auto_bkops_enabled)
                goto out;
  
 -      err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to enable bkops %d\n",
        }
  
        hba->auto_bkops_enabled = true;
 +      trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
  
        /* No need of URGENT_BKOPS exception from the device */
        err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@@ -5810,7 -3376,7 +5810,7 @@@ static int ufshcd_disable_auto_bkops(st
                goto out;
        }
  
 -      err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to disable bkops %d\n",
        }
  
        hba->auto_bkops_enabled = false;
 +      trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
  out:
        return err;
  }
@@@ -5849,7 -3414,7 +5849,7 @@@ static void ufshcd_force_reset_auto_bko
  
  static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
  {
 -      return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 +      return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
                        QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
  }
  
   */
  static int ufshcd_urgent_bkops(struct ufs_hba *hba)
  {
 -      return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
 +      return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
 +}
 +
 +static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
 +{
 +      return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 +                      QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
  }
  
 -static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
 -{
 -      return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 -                      QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
 +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
 +{
 +      int err;
 +      u32 curr_status = 0;
 +
 +      if (hba->is_urgent_bkops_lvl_checked)
 +              goto enable_auto_bkops;
 +
 +      err = ufshcd_get_bkops_status(hba, &curr_status);
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
 +                              __func__, err);
 +              goto out;
 +      }
 +
 +      /*
 +       * We are seeing that some devices are raising the urgent bkops
 +       * exception events even when BKOPS status doesn't indicate performace
 +       * impacted or critical. Handle these device by determining their urgent
 +       * bkops status at runtime.
 +       */
 +      if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
 +              dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
 +                              __func__, curr_status);
 +              /* update the current status as the urgent bkops level */
 +              hba->urgent_bkops_lvl = curr_status;
 +              hba->is_urgent_bkops_lvl_checked = true;
 +      }
 +
 +enable_auto_bkops:
 +      err = ufshcd_enable_auto_bkops(hba);
 +out:
 +      if (err < 0)
 +              dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 +                              __func__, err);
  }
  
  /**
@@@ -5968,7 -3496,7 +5968,7 @@@ static void ufshcd_exception_event_hand
        hba = container_of(work, struct ufs_hba, eeh_work);
  
        pm_runtime_get_sync(hba->dev);
 -      scsi_block_requests(hba->host);
 +      ufshcd_scsi_block_requests(hba);
        err = ufshcd_get_ee_status(hba, &status);
        if (err) {
                dev_err(hba->dev, "%s: failed to get exception status %d\n",
        }
  
        status &= hba->ee_ctrl_mask;
 -      if (status & MASK_EE_URGENT_BKOPS) {
 -              err = ufshcd_urgent_bkops(hba);
 -              if (err < 0)
 -                      dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 -                                      __func__, err);
 -      }
 +
 +      if (status & MASK_EE_URGENT_BKOPS)
 +              ufshcd_bkops_exception_event_handler(hba);
 +
  out:
 -      scsi_unblock_requests(hba->host);
 -      pm_runtime_put_sync(hba->dev);
 +      ufshcd_scsi_unblock_requests(hba);
 +      pm_runtime_put(hba->dev);
        return;
  }
  
 +/* Complete requests that have door-bell cleared */
 +static void ufshcd_complete_requests(struct ufs_hba *hba)
 +{
 +      ufshcd_transfer_req_compl(hba);
 +      ufshcd_tmc_handler(hba);
 +}
 +
 +/**
 + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
 + *                            to recover from the DL NAC errors or not.
 + * @hba: per-adapter instance
 + *
 + * Returns true if error handling is required, false otherwise
 + */
 +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool err_handling = true;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
 +       * device fatal error and/or DL NAC & REPLAY timeout errors.
 +       */
 +      if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
 +              goto out;
 +
 +      if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
 +          ((hba->saved_err & UIC_ERROR) &&
 +           (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
 +              /*
 +               * we have to do error recovery but atleast silence the error
 +               * logs.
 +               */
 +              hba->silence_err_logs = true;
 +              goto out;
 +      }
 +
 +      if ((hba->saved_err & UIC_ERROR) &&
 +          (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
 +              int err;
 +              /*
 +               * wait for 50ms to see if we can get any other errors or not.
 +               */
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              msleep(50);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +              /*
 +               * now check if we have got any other severe errors other than
 +               * DL NAC error?
 +               */
 +              if ((hba->saved_err & INT_FATAL_ERRORS) ||
 +                  ((hba->saved_err & UIC_ERROR) &&
 +                  (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
 +                      if (((hba->saved_err & INT_FATAL_ERRORS) ==
 +                              DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
 +                                      ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
 +                              hba->silence_err_logs = true;
 +                      goto out;
 +              }
 +
 +              /*
 +               * As DL NAC is the only error received so far, send out NOP
 +               * command to confirm if link is still active or not.
 +               *   - If we don't get any response then do error recovery.
 +               *   - If we get response then clear the DL NAC error bit.
 +               */
 +
 +              /* silence the error logs from NOP command */
 +              hba->silence_err_logs = true;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              err = ufshcd_verify_dev_init(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->silence_err_logs = false;
 +
 +              if (err) {
 +                      hba->silence_err_logs = true;
 +                      goto out;
 +              }
 +
 +              /* Link seems to be alive hence ignore the DL NAC errors */
 +              if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
 +                      hba->saved_err &= ~UIC_ERROR;
 +              /* clear NAC error */
 +              hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
 +              if (!hba->saved_uic_err) {
 +                      err_handling = false;
 +                      goto out;
 +              }
 +              /*
 +               * there seems to be some errors other than NAC, so do error
 +               * recovery
 +               */
 +              hba->silence_err_logs = true;
 +      }
 +out:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return err_handling;
 +}
 +
  /**
   * ufshcd_err_handler - handle UFS errors that require s/w attention
   * @work: pointer to work structure
@@@ -6096,149 -3525,51 +6096,149 @@@ static void ufshcd_err_handler(struct w
  {
        struct ufs_hba *hba;
        unsigned long flags;
 -      u32 err_xfer = 0;
 -      u32 err_tm = 0;
 +      bool err_xfer = false, err_tm = false;
        int err = 0;
        int tag;
 +      bool needs_reset = false;
 +      bool clks_enabled = false;
  
        hba = container_of(work, struct ufs_hba, eh_work);
  
 -      pm_runtime_get_sync(hba->dev);
 -      ufshcd_hold(hba, false);
 -
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufsdbg_set_err_state(hba);
 +
 +      if (hba->ufshcd_state == UFSHCD_STATE_RESET)
                goto out;
 +
 +      /*
 +       * Make sure the clocks are ON before we proceed with err
 +       * handling. For the majority of cases err handler would be
 +       * run with clocks ON. There is a possibility that the err
 +       * handler was scheduled due to auto hibern8 error interrupt,
 +       * in which case the clocks could be gated or be in the
 +       * process of gating when the err handler runs.
 +       */
 +      if (unlikely((hba->clk_gating.state != CLKS_ON) &&
 +          ufshcd_is_auto_hibern8_supported(hba))) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
 +              ufshcd_hold(hba, false);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              clks_enabled = true;
        }
  
        hba->ufshcd_state = UFSHCD_STATE_RESET;
        ufshcd_set_eh_in_progress(hba);
  
        /* Complete requests that have door-bell cleared by h/w */
 -      ufshcd_transfer_req_compl(hba);
 -      ufshcd_tmc_handler(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_complete_requests(hba);
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 +              bool ret;
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
 +              ret = ufshcd_quirk_dl_nac_errors(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              if (!ret)
 +                      goto skip_err_handling;
 +      }
 +
 +      /*
 +       * Dump controller state before resetting. Transfer requests state
 +       * will be dump as part of the request completion.
 +       */
 +      if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
 +              dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
 +                      __func__, hba->saved_err, hba->saved_uic_err);
 +              if (!hba->silence_err_logs) {
 +                      /* release lock as print host regs sleeps */
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      ufshcd_print_host_regs(hba);
 +                      ufshcd_print_host_state(hba);
 +                      ufshcd_print_pwr_info(hba);
 +                      ufshcd_print_tmrs(hba, hba->outstanding_tasks);
 +                      ufshcd_print_cmd_log(hba);
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +              }
 +      }
 +
 +      if ((hba->saved_err & INT_FATAL_ERRORS)
 +          || hba->saved_ce_err || hba->force_host_reset ||
 +          ((hba->saved_err & UIC_ERROR) &&
 +          (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
 +                                 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
 +                                 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
 +              needs_reset = true;
 +
 +      /*
 +       * if host reset is required then skip clearing the pending
 +       * transfers forcefully because they will automatically get
 +       * cleared after link startup.
 +       */
 +      if (needs_reset)
 +              goto skip_pending_xfer_clear;
  
 +      /* release lock as clear command might sleep */
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
        /* Clear pending transfer requests */
 -      for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
 -              if (ufshcd_clear_cmd(hba, tag))
 -                      err_xfer |= 1 << tag;
 +      for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
 +              if (ufshcd_clear_cmd(hba, tag)) {
 +                      err_xfer = true;
 +                      goto lock_skip_pending_xfer_clear;
 +              }
 +      }
  
        /* Clear pending task management requests */
 -      for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
 -              if (ufshcd_clear_tm_cmd(hba, tag))
 -                      err_tm |= 1 << tag;
 +      for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
 +              if (ufshcd_clear_tm_cmd(hba, tag)) {
 +                      err_tm = true;
 +                      goto lock_skip_pending_xfer_clear;
 +              }
 +      }
  
 -      /* Complete the requests that are cleared by s/w */
 +lock_skip_pending_xfer_clear:
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_transfer_req_compl(hba);
 -      ufshcd_tmc_handler(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      /* Complete the requests that are cleared by s/w */
 +      ufshcd_complete_requests(hba);
 +
 +      if (err_xfer || err_tm)
 +              needs_reset = true;
 +
 +skip_pending_xfer_clear:
        /* Fatal errors need reset */
 -      if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
 -                      ((hba->saved_err & UIC_ERROR) &&
 -                       (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
 +      if (needs_reset) {
 +              unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
 +
 +              if (hba->saved_err & INT_FATAL_ERRORS)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_INT_FATAL_ERRORS);
 +              if (hba->saved_ce_err)
 +                      ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
 +
 +              if (hba->saved_err & UIC_ERROR)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_INT_UIC_ERROR);
 +
 +              if (err_xfer || err_tm)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_CLEAR_PEND_XFER_TM);
 +
 +              /*
 +               * ufshcd_reset_and_restore() does the link reinitialization
 +               * which will need atleast one empty doorbell slot to send the
 +               * device management commands (NOP and query commands).
 +               * If there is no slot empty at this moment then free up last
 +               * slot forcefully.
 +               */
 +              if (hba->outstanding_reqs == max_doorbells)
 +                      __ufshcd_transfer_req_compl(hba,
 +                                                  (1UL << (hba->nutrs - 1)));
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
                err = ufshcd_reset_and_restore(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
                if (err) {
                        dev_err(hba->dev, "%s: reset and restore failed\n",
                                        __func__);
                scsi_report_bus_reset(hba->host, 0);
                hba->saved_err = 0;
                hba->saved_uic_err = 0;
 +              hba->saved_ce_err = 0;
 +              hba->force_host_reset = false;
 +      }
 +
 +skip_err_handling:
 +      if (!needs_reset) {
 +              hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 +              if (hba->saved_err || hba->saved_uic_err)
 +                      dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
 +                          __func__, hba->saved_err, hba->saved_uic_err);
        }
 +
 +      hba->silence_err_logs = false;
 +
 +      if (clks_enabled) {
 +              __ufshcd_release(hba, false);
 +              hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
 +      }
 +out:
        ufshcd_clear_eh_in_progress(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
 +              u32 reg)
 +{
 +      reg_hist->reg[reg_hist->pos] = reg;
 +      reg_hist->tstamp[reg_hist->pos] = ktime_get();
 +      reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
 +}
 +
 +static void ufshcd_rls_handler(struct work_struct *work)
 +{
 +      struct ufs_hba *hba;
 +      int ret = 0;
 +      u32 mode;
 +
 +      hba = container_of(work, struct ufs_hba, rls_work);
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_scsi_block_requests(hba);
 +      ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      if (ret) {
 +              dev_err(hba->dev,
 +                      "Timed out (%d) waiting for DB to clear\n",
 +                      ret);
 +              goto out;
 +      }
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
 +      if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
 +              hba->restore_needed = true;
 +
 +      if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
 +              hba->restore_needed = true;
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
 +      if (hba->pwr_info.gear_rx != mode)
 +              hba->restore_needed = true;
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
 +      if (hba->pwr_info.gear_tx != mode)
 +              hba->restore_needed = true;
 +
 +      if (hba->restore_needed)
 +              ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
 +
 +      if (ret)
 +              dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 +                      __func__, ret);
 +      else
 +              hba->restore_needed = false;
  
  out:
 -      scsi_unblock_requests(hba->host);
 -      ufshcd_release(hba);
 +      ufshcd_scsi_unblock_requests(hba);
        pm_runtime_put_sync(hba->dev);
  }
  
  /**
   * ufshcd_update_uic_error - check and set fatal UIC error flags.
   * @hba: per-adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_update_uic_error(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
  {
        u32 reg;
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      /* PHY layer lane error */
 +      reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
 +      if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
 +          (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
 +              /*
 +               * To know whether this error is fatal or not, DB timeout
 +               * must be checked but this error is handled separately.
 +               */
 +              dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
 +                              __func__, reg);
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
 +
 +              /*
 +               * Don't ignore LINERESET indication during hibern8
 +               * enter operation.
 +               */
 +              if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
 +                      struct uic_command *cmd = hba->active_uic_cmd;
 +
 +                      if (cmd) {
 +                              if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
 +                                      dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
 +                                              __func__, reg);
 +                                      hba->full_init_linereset = true;
 +                              }
 +                      }
 +                      if (!hba->full_init_linereset)
 +                              schedule_work(&hba->rls_work);
 +              }
 +              retval |= IRQ_HANDLED;
 +      }
  
        /* PA_INIT_ERROR is fatal and needs UIC reset */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
 -      if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
 -              hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
 +      if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
 +          (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
 +
 +              if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
 +                      hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
 +              } else if (hba->dev_quirks &
 +                         UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 +                      if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
 +                              hba->uic_error |=
 +                                      UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
 +                      else if (reg &
 +                               UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
 +                              hba->uic_error |=
 +                                      UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
 +              }
 +              retval |= IRQ_HANDLED;
 +      }
  
        /* UIC NL/TL/DME errors needs software retry */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
 -      if (reg)
 +      if ((reg & UIC_NETWORK_LAYER_ERROR) &&
 +          (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
                hba->uic_error |= UFSHCD_UIC_NL_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
 -      if (reg)
 +      if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
 +          (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
                hba->uic_error |= UFSHCD_UIC_TL_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
 -      if (reg)
 +      if ((reg & UIC_DME_ERROR) &&
 +          (reg & UIC_DME_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
                hba->uic_error |= UFSHCD_UIC_DME_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
                        __func__, hba->uic_error);
 +      return retval;
  }
  
  /**
   * ufshcd_check_errors - Check for errors that need s/w attention
   * @hba: per-adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_check_errors(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
  {
        bool queue_eh_work = false;
 +      irqreturn_t retval = IRQ_NONE;
  
 -      if (hba->errors & INT_FATAL_ERRORS)
 +      if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
                queue_eh_work = true;
  
        if (hba->errors & UIC_ERROR) {
                hba->uic_error = 0;
 -              ufshcd_update_uic_error(hba);
 +              retval = ufshcd_update_uic_error(hba);
                if (hba->uic_error)
                        queue_eh_work = true;
        }
  
        if (queue_eh_work) {
 +              /*
 +               * update the transfer error masks to sticky bits, let's do this
 +               * irrespective of current ufshcd_state.
 +               */
 +              hba->saved_err |= hba->errors;
 +              hba->saved_uic_err |= hba->uic_error;
 +              hba->saved_ce_err |= hba->ce_error;
 +
                /* handle fatal errors only when link is functional */
                if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
 -                      /* block commands from scsi mid-layer */
 -                      scsi_block_requests(hba->host);
 -
 -                      /* transfer error masks to sticky bits */
 -                      hba->saved_err |= hba->errors;
 -                      hba->saved_uic_err |= hba->uic_error;
 +                      /*
 +                       * Set error handling in progress flag early so that we
 +                       * don't issue new requests any more.
 +                       */
 +                      ufshcd_set_eh_in_progress(hba);
  
                        hba->ufshcd_state = UFSHCD_STATE_ERROR;
                        schedule_work(&hba->eh_work);
                }
 +              retval |= IRQ_HANDLED;
        }
        /*
         * if (!queue_eh_work) -
         * itself without s/w intervention or errors that will be
         * handled by the SCSI core layer.
         */
 +      return retval;
  }
  
  /**
   * ufshcd_tmc_handler - handle task management function completion
   * @hba: per adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_tmc_handler(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
  {
        u32 tm_doorbell;
  
        tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
        hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
 -      wake_up(&hba->tm_wq);
 +      if (hba->tm_condition) {
 +              wake_up(&hba->tm_wq);
 +              return IRQ_HANDLED;
 +      } else {
 +              return IRQ_NONE;
 +      }
  }
  
  /**
   * ufshcd_sl_intr - Interrupt service routine
   * @hba: per adapter instance
   * @intr_status: contains interrupts generated by the controller
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  {
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_INTR, intr_status, &intr_status);
 +
 +      ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
 +
        hba->errors = UFSHCD_ERROR_MASK & intr_status;
 -      if (hba->errors)
 -              ufshcd_check_errors(hba);
 +      if (hba->errors || hba->ce_error)
 +              retval |= ufshcd_check_errors(hba);
  
        if (intr_status & UFSHCD_UIC_MASK)
 -              ufshcd_uic_cmd_compl(hba, intr_status);
 +              retval |= ufshcd_uic_cmd_compl(hba, intr_status);
  
        if (intr_status & UTP_TASK_REQ_COMPL)
 -              ufshcd_tmc_handler(hba);
 +              retval |= ufshcd_tmc_handler(hba);
  
        if (intr_status & UTP_TRANSFER_REQ_COMPL)
 -              ufshcd_transfer_req_compl(hba);
 +              retval |= ufshcd_transfer_req_compl(hba);
 +
 +      return retval;
  }
  
  /**
   * @irq: irq number
   * @__hba: pointer to adapter instance
   *
 - * Returns IRQ_HANDLED - If interrupt is valid
 - *            IRQ_NONE - If invalid interrupt
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
  static irqreturn_t ufshcd_intr(int irq, void *__hba)
  {
 -      u32 intr_status;
 +      u32 intr_status, enabled_intr_status;
        irqreturn_t retval = IRQ_NONE;
        struct ufs_hba *hba = __hba;
 +      int retries = hba->nutrs;
  
        spin_lock(hba->host->host_lock);
        intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +      hba->ufs_stats.last_intr_status = intr_status;
 +      hba->ufs_stats.last_intr_ts = ktime_get();
 +      /*
 +       * There could be max of hba->nutrs reqs in flight and in worst case
 +       * if the reqs get finished 1 by 1 after the interrupt status is
 +       * read, make sure we handle them by checking the interrupt status
 +       * again in a loop until we process all of the reqs before returning.
 +       */
 +      do {
 +              enabled_intr_status =
 +                      intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 +              if (intr_status)
 +                      ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 +              if (enabled_intr_status)
 +                      retval |= ufshcd_sl_intr(hba, enabled_intr_status);
  
 -      if (intr_status) {
 -              ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 -              ufshcd_sl_intr(hba, intr_status);
 -              retval = IRQ_HANDLED;
 +              intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +      } while (intr_status && --retries);
 +
 +      if (retval == IRQ_NONE) {
 +              dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
 +                                      __func__, intr_status);
 +              ufshcd_hex_dump("host regs: ", hba->mmio_base,
 +                                      UFSHCI_REG_SPACE_SIZE);
        }
 +
        spin_unlock(hba->host->host_lock);
        return retval;
  }
@@@ -6598,7 -3737,7 +6598,7 @@@ static int ufshcd_clear_tm_cmd(struct u
        /* poll for max. 1 sec to clear door bell register by h/w */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TASK_REQ_DOOR_BELL,
 -                      mask, 0, 1000, 1000);
 +                      mask, 0, 1000, 1000, true);
  out:
        return err;
  }
@@@ -6632,8 -3771,7 +6632,8 @@@ static int ufshcd_issue_tm_cmd(struct u
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
        wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
 +      ufshcd_hold_all(hba);
  
        spin_lock_irqsave(host->host_lock, flags);
        task_req_descp = hba->utmrdl_base_addr;
  
        /* send command to the controller */
        __set_bit(free_slot, &hba->outstanding_tasks);
 +
 +      /* Make sure descriptors are ready before ringing the task doorbell */
 +      wmb();
 +
        ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
 +      /* Make sure that doorbell is committed immediately */
 +      wmb();
  
        spin_unlock_irqrestore(host->host_lock, flags);
  
        clear_bit(free_slot, &hba->tm_condition);
        ufshcd_put_tm_slot(hba, free_slot);
        wake_up(&hba->tm_tag_wq);
 +      hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
  
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
  
@@@ -6718,7 -3849,6 +6718,7 @@@ static int ufshcd_eh_device_reset_handl
        hba = shost_priv(host);
        tag = cmd->request->tag;
  
 +      ufshcd_print_cmd_log(hba);
        lrbp = &hba->lrb[tag];
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
        spin_lock_irqsave(host->host_lock, flags);
        ufshcd_transfer_req_compl(hba);
        spin_unlock_irqrestore(host->host_lock, flags);
 +
  out:
 +      hba->req_abort_count = 0;
        if (!err) {
                err = SUCCESS;
        } else {
        return err;
  }
  
 +static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      int tag;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutrs) {
 +              lrbp = &hba->lrb[tag];
 +              lrbp->req_abort_skip = true;
 +      }
 +}
 +
  /**
   * ufshcd_abort - abort a specific command
   * @cmd: SCSI command pointer
@@@ -6788,87 -3905,31 +6788,87 @@@ static int ufshcd_abort(struct scsi_cmn
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
 +      if (!ufshcd_valid_tag(hba, tag)) {
 +              dev_err(hba->dev,
 +                      "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
 +                      __func__, tag, cmd, cmd->request);
 +              BUG();
 +      }
  
 -      ufshcd_hold(hba, false);
 +      lrbp = &hba->lrb[tag];
 +
 +      ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
 +
 +      /*
 +       * Task abort to the device W-LUN is illegal. When this command
 +       * will fail, due to spec violation, scsi err handling next step
 +       * will be to send LU reset which, again, is a spec violation.
 +       * To avoid these unnecessary/illegal step we skip to the last error
 +       * handling stage: reset and restore.
 +       */
 +      if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
 +              return ufshcd_eh_host_reset_handler(cmd);
 +
 +      ufshcd_hold_all(hba);
 +      reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* If command is already aborted/completed, return SUCCESS */
 -      if (!(test_bit(tag, &hba->outstanding_reqs)))
 +      if (!(test_bit(tag, &hba->outstanding_reqs))) {
 +              dev_err(hba->dev,
 +                      "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
 +                      __func__, tag, hba->outstanding_reqs, reg);
                goto out;
 +      }
  
 -      reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        if (!(reg & (1 << tag))) {
                dev_err(hba->dev,
                "%s: cmd was completed, but without a notifying intr, tag = %d",
                __func__, tag);
        }
  
 -      lrbp = &hba->lrb[tag];
 +      /* Print Transfer Request of aborted task */
 +      dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
 +
 +      /*
 +       * Print detailed info about aborted request.
 +       * As more than one request might get aborted at the same time,
 +       * print full information only for the first aborted request in order
 +       * to reduce repeated printouts. For other aborted requests only print
 +       * basic details.
 +       */
 +      scsi_print_command(cmd);
 +      if (!hba->req_abort_count) {
 +              ufshcd_print_fsm_state(hba);
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_trs(hba, 1 << tag, true);
 +      } else {
 +              ufshcd_print_trs(hba, 1 << tag, false);
 +      }
 +      hba->req_abort_count++;
 +
 +
 +      /* Skip task abort in case previous aborts failed and report failure */
 +      if (lrbp->req_abort_skip) {
 +              err = -EIO;
 +              goto out;
 +      }
 +
        for (poll_cnt = 100; poll_cnt; poll_cnt--) {
                err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                                UFS_QUERY_TASK, &resp);
                if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
                        /* cmd pending in the device */
 +                      dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
 +                              __func__, tag);
                        break;
                } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                        /*
                         * cmd not pending in the device, check if it is
                         * in transition.
                         */
 +                      dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
 +                              __func__, tag);
                        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
                        if (reg & (1 << tag)) {
                                /* sleep for max. 200us to stabilize */
                                continue;
                        }
                        /* command completed already */
 +                      dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
 +                              __func__, tag);
                        goto out;
                } else {
 +                      dev_err(hba->dev,
 +                              "%s: no response from device. tag = %d, err %d",
 +                              __func__, tag, err);
                        if (!err)
                                err = resp; /* service response error */
                        goto out;
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                        UFS_ABORT_TASK, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
 -              if (!err)
 +              if (!err) {
                        err = resp; /* service response error */
 +                      dev_err(hba->dev, "%s: issued. tag = %d, err %d",
 +                              __func__, tag, err);
 +              }
                goto out;
        }
  
        err = ufshcd_clear_cmd(hba, tag);
 -      if (err)
 +      if (err) {
 +              dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
 +                      __func__, tag, err);
                goto out;
 +      }
  
        scsi_dma_unmap(cmd);
  
        spin_lock_irqsave(host->host_lock, flags);
 -      __clear_bit(tag, &hba->outstanding_reqs);
 +      ufshcd_outstanding_req_clear(hba, tag);
        hba->lrb[tag].cmd = NULL;
        spin_unlock_irqrestore(host->host_lock, flags);
  
                err = SUCCESS;
        } else {
                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
 +              ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
                err = FAILED;
        }
  
        /*
 -       * This ufshcd_release() corresponds to the original scsi cmd that got
 -       * aborted here (as we won't get any IRQ for it).
 +       * This ufshcd_release_all() corresponds to the original scsi cmd that
 +       * got aborted here (as we won't get any IRQ for it).
         */
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
  
@@@ -6956,12 -4005,9 +6956,12 @@@ static int ufshcd_host_reset_and_restor
  
        /* Reset the host controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_hba_stop(hba);
 +      ufshcd_hba_stop(hba, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      /* scale up clocks to max frequency before full reinitialization */
 +      ufshcd_set_clk_freq(hba, true);
 +
        err = ufshcd_hba_enable(hba);
        if (err)
                goto out;
        /* Establish the link again and restore the device */
        err = ufshcd_probe_hba(hba);
  
 -      if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
 +      if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
                err = -EIO;
 +              goto out;
 +      }
 +
 +      if (!err) {
 +              err = ufshcd_vops_crypto_engine_reset(hba);
 +              if (err) {
 +                      dev_err(hba->dev,
 +                              "%s: failed to reset crypto engine %d\n",
 +                              __func__, err);
 +                      goto out;
 +              }
 +      }
 +
  out:
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@@ -7006,27 -4039,11 +7006,27 @@@ static int ufshcd_reset_and_restore(str
        unsigned long flags;
        int retries = MAX_HOST_RESET_RETRIES;
  
 -      do {
 +      do {
 +              err = ufshcd_vops_full_reset(hba);
 +              if (err)
 +                      dev_warn(hba->dev, "%s: full reset returned %d\n",
 +                               __func__, err);
 +
 +              err = ufshcd_reset_device(hba);
 +              if (err)
 +                      dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 +                               __func__, err);
 +
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
  
        /*
 +       * There is no point proceeding even after failing
 +       * to recover after multiple retries.
 +       */
 +      if (err)
 +              BUG();
 +      /*
         * After reset the door-bell might be cleared, complete
         * outstanding requests in s/w here.
         */
   */
  static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
  {
 -      int err;
 +      int err = SUCCESS;
        unsigned long flags;
        struct ufs_hba *hba;
  
        hba = shost_priv(cmd->device->host);
  
 -      ufshcd_hold(hba, false);
        /*
         * Check if there is any race with fatal error handling.
         * If so, wait for it to complete. Even though fatal error
                                hba->ufshcd_state == UFSHCD_STATE_RESET))
                        break;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
                flush_work(&hba->eh_work);
        } while (1);
  
 -      hba->ufshcd_state = UFSHCD_STATE_RESET;
 -      ufshcd_set_eh_in_progress(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      /*
 +       * we don't know if previous reset had really reset the host controller
 +       * or not. So let's force reset here to be sure.
 +       */
 +      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +      hba->force_host_reset = true;
 +      schedule_work(&hba->eh_work);
  
 -      err = ufshcd_reset_and_restore(hba);
 +      /* wait for the reset work to finish */
 +      do {
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
 +              flush_work(&hba->eh_work);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (1);
  
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (!err) {
 -              err = SUCCESS;
 -              hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 -      } else {
 +      if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
 +            ufshcd_is_link_active(hba))) {
                err = FAILED;
                hba->ufshcd_state = UFSHCD_STATE_ERROR;
        }
 -      ufshcd_clear_eh_in_progress(hba);
 +
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 -      ufshcd_release(hba);
        return err;
  }
  
@@@ -7206,9 -4215,9 +7206,9 @@@ static void ufshcd_init_icc_levels(stru
        dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
                        __func__, hba->init_prefetch_data.icc_level);
  
 -      ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 -                      QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 -                      &hba->init_prefetch_data.icc_level);
 +      ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +              QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 +              &hba->init_prefetch_data.icc_level);
  
        if (ret)
                dev_err(hba->dev,
  }
  
  /**
 + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
 + * @hba: per-adapter instance
 + *
 + * PA_TActivate parameter can be tuned manually if UniPro version is less than
 + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
 + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
 + * the hibern8 exit latency.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
 +
 +      if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
 +              return 0;
 +
 +      ret = ufshcd_dme_peer_get(hba,
 +                                UIC_ARG_MIB_SEL(
 +                                      RX_MIN_ACTIVATETIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                                &peer_rx_min_activatetime);
 +      if (ret)
 +              goto out;
 +
 +      /* make sure proper unit conversion is applied */
 +      tuned_pa_tactivate =
 +              ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
 +               / PA_TACTIVATE_TIME_UNIT_US);
 +      ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                           tuned_pa_tactivate);
 +
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
 + * @hba: per-adapter instance
 + *
 + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
 + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
 + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
 + * This optimal value can help reduce the hibern8 exit latency.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
 +      u32 max_hibern8_time, tuned_pa_hibern8time;
 +
 +      ret = ufshcd_dme_get(hba,
 +                           UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 +                                &local_tx_hibern8_time_cap);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba,
 +                                UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                                &peer_rx_hibern8_time_cap);
 +      if (ret)
 +              goto out;
 +
 +      max_hibern8_time = max(local_tx_hibern8_time_cap,
 +                             peer_rx_hibern8_time_cap);
 +      /* make sure proper unit conversion is applied */
 +      tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
 +                              / PA_HIBERN8_TIME_UNIT_US);
 +      ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
 +                           tuned_pa_hibern8time);
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
 + * less than device PA_TACTIVATE time.
 + * @hba: per-adapter instance
 + *
 + * Some UFS devices require host PA_TACTIVATE to be lower than device
 + * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
 + * for such devices.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 granularity, peer_granularity;
 +      u32 pa_tactivate, peer_pa_tactivate;
 +      u32 pa_tactivate_us, peer_pa_tactivate_us;
 +      u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
 +
 +      ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 +                                &granularity);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 +                                &peer_granularity);
 +      if (ret)
 +              goto out;
 +
 +      if ((granularity < PA_GRANULARITY_MIN_VAL) ||
 +          (granularity > PA_GRANULARITY_MAX_VAL)) {
 +              dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
 +                      __func__, granularity);
 +              return -EINVAL;
 +      }
 +
 +      if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
 +          (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
 +              dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
 +                      __func__, peer_granularity);
 +              return -EINVAL;
 +      }
 +
 +      ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                                &peer_pa_tactivate);
 +      if (ret)
 +              goto out;
 +
 +      pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
 +      peer_pa_tactivate_us = peer_pa_tactivate *
 +                           gran_to_us_table[peer_granularity - 1];
 +
 +      if (pa_tactivate_us > peer_pa_tactivate_us) {
 +              u32 new_peer_pa_tactivate;
 +
 +              new_peer_pa_tactivate = pa_tactivate_us /
 +                                    gran_to_us_table[peer_granularity - 1];
 +              new_peer_pa_tactivate++;
 +              ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                                        new_peer_pa_tactivate);
 +      }
 +
 +out:
 +      return ret;
 +}
 +
 +static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 +{
 +      if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
 +              ufshcd_tune_pa_tactivate(hba);
 +              ufshcd_tune_pa_hibern8time(hba);
 +      }
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
 +              /* set 1ms timeout for PA_TACTIVATE */
 +              ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
 +              ufshcd_quirk_tune_host_pa_tactivate(hba);
 +
 +      ufshcd_vops_apply_dev_quirks(hba);
 +}
 +
 +static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
 +{
 +      int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
 +
 +      memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
 +
 +      hba->req_abort_count = 0;
 +}
 +
 +static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
 +{
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
 +              if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
 +                  UIC_LINK_OFF_STATE) {
 +                      hba->rpm_lvl =
 +                              ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                              UFS_SLEEP_PWR_MODE,
 +                                              UIC_LINK_HIBERN8_STATE);
 +                      dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
 +                              hba->rpm_lvl);
 +              }
 +              if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
 +                  UIC_LINK_OFF_STATE) {
 +                      hba->spm_lvl =
 +                              ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                              UFS_SLEEP_PWR_MODE,
 +                                              UIC_LINK_HIBERN8_STATE);
 +                      dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
 +                              hba->spm_lvl);
 +              }
 +      }
 +}
 +
 +/**
   * ufshcd_probe_hba - probe hba to detect device and initialize
   * @hba: per-adapter instance
   *
  static int ufshcd_probe_hba(struct ufs_hba *hba)
  {
        int ret;
 +      ktime_t start = ktime_get();
  
        ret = ufshcd_link_startup(hba);
        if (ret)
                goto out;
  
 -      ufshcd_init_pwr_info(hba);
 +      /* Debug counters initialization */
 +      ufshcd_clear_dbg_ufs_stats(hba);
 +      /* set the default level for urgent bkops */
 +      hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
 +      hba->is_urgent_bkops_lvl_checked = false;
  
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
        if (ret)
                goto out;
  
 +      ufs_advertise_fixup_device(hba);
 +      ufshcd_tune_unipro_params(hba);
 +
 +      ufshcd_apply_pm_quirks(hba);
 +      ret = ufshcd_set_vccq_rail_unused(hba,
 +              (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
 +      if (ret)
 +              goto out;
 +
        /* UFS device is also active now */
        ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
 -      hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        hba->wlun_dev_clr_ua = true;
  
        if (ufshcd_get_max_pwr_mode(hba)) {
                        __func__);
        } else {
                ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
 -              if (ret)
 +              if (ret) {
                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
                                        __func__, ret);
 +                      goto out;
 +              }
        }
  
 +      /* set the state as operational after switching to desired gear */
 +      hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        /*
         * If we are in error handling context or in power management callbacks
         * context, no need to scan the host
  
                /* clear any previous UFS device information */
                memset(&hba->dev_info, 0, sizeof(hba->dev_info));
 -              if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 -                                     QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
 +              if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 +                              QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
                        hba->dev_info.f_power_on_wp_en = flag;
  
                if (!hba->is_init_prefetch)
                if (ufshcd_scsi_add_wlus(hba))
                        goto out;
  
 +              /* Initialize devfreq after UFS device is detected */
 +              if (ufshcd_is_clkscaling_supported(hba)) {
 +                      memcpy(&hba->clk_scaling.saved_pwr_info.info,
 +                          &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
 +                      hba->clk_scaling.saved_pwr_info.is_valid = true;
 +                      hba->clk_scaling.is_scaled_up = true;
 +                      if (!hba->devfreq) {
 +                              hba->devfreq = devfreq_add_device(hba->dev,
 +                                                      &ufs_devfreq_profile,
 +                                                      "simple_ondemand",
 +                                                      gov_data);
 +                              if (IS_ERR(hba->devfreq)) {
 +                                      ret = PTR_ERR(hba->devfreq);
 +                                      dev_err(hba->dev, "Unable to register with devfreq %d\n",
 +                                              ret);
 +                                      goto out;
 +                              }
 +                      }
 +                      hba->clk_scaling.is_allowed = true;
 +              }
 +
                scsi_scan_host(hba->host);
                pm_runtime_put_sync(hba->dev);
        }
        if (!hba->is_init_prefetch)
                hba->is_init_prefetch = true;
  
 -      /* Resume devfreq after UFS device is detected */
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 -
 +      /*
 +       * Enable auto hibern8 if supported, after full host and
 +       * device initialization.
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba))
 +              ufshcd_set_auto_hibern8_timer(hba,
 +                                    hba->hibern8_on_idle.delay_ms);
  out:
        /*
         * If we failed to initialize the device or the device is not
                ufshcd_hba_exit(hba);
        }
  
 -      return ret;
 -}
 +      trace_ufshcd_init(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_async_scan - asynchronous execution for probing hba
 + * @data: data pointer to pass to this function
 + * @cookie: cookie data
 + */
 +static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 +{
 +      struct ufs_hba *hba = (struct ufs_hba *)data;
 +
 +      /*
 +       * Don't allow clock gating and hibern8 enter for faster device
 +       * detection.
 +       */
 +      ufshcd_hold_all(hba);
 +      ufshcd_probe_hba(hba);
 +      ufshcd_release_all(hba);
 +}
 +
 +/**
 + * ufshcd_query_ioctl - perform user read queries
 + * @hba: per-adapter instance
 + * @lun: used for lun specific queries
 + * @buffer: user space buffer for reading and submitting query data and params
 + * @return: 0 for success negative error code otherwise
 + *
 + * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
 + * It will read the opcode, idn and buf_length parameters, and, put the
 + * response in the buffer field while updating the used size in buf_length.
 + */
 +static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
 +{
 +      struct ufs_ioctl_query_data *ioctl_data;
 +      int err = 0;
 +      int length = 0;
 +      void *data_ptr;
 +      bool flag;
 +      u32 att;
 +      u8 index;
 +      u8 *desc = NULL;
 +
 +      ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
 +      if (!ioctl_data) {
 +              dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
 +                              sizeof(struct ufs_ioctl_query_data));
 +              err = -ENOMEM;
 +              goto out;
 +      }
 +
 +      /* extract params from user buffer */
 +      err = copy_from_user(ioctl_data, buffer,
 +                      sizeof(struct ufs_ioctl_query_data));
 +      if (err) {
 +              dev_err(hba->dev,
 +                      "%s: Failed copying buffer from user, err %d\n",
 +                      __func__, err);
 +              goto out_release_mem;
 +      }
 +
 +      /* verify legal parameters & send query */
 +      switch (ioctl_data->opcode) {
 +      case UPIU_QUERY_OPCODE_READ_DESC:
 +              switch (ioctl_data->idn) {
 +              case QUERY_DESC_IDN_DEVICE:
 +              case QUERY_DESC_IDN_CONFIGURAION:
 +              case QUERY_DESC_IDN_INTERCONNECT:
 +              case QUERY_DESC_IDN_GEOMETRY:
 +              case QUERY_DESC_IDN_POWER:
 +                      index = 0;
 +                      break;
 +              case QUERY_DESC_IDN_UNIT:
 +                      if (!ufs_is_valid_unit_desc_lun(lun)) {
 +                              dev_err(hba->dev,
 +                                      "%s: No unit descriptor for lun 0x%x\n",
 +                                      __func__, lun);
 +                              err = -EINVAL;
 +                              goto out_release_mem;
 +                      }
 +                      index = lun;
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              length = min_t(int, QUERY_DESC_MAX_SIZE,
 +                              ioctl_data->buf_size);
 +              desc = kzalloc(length, GFP_KERNEL);
 +              if (!desc) {
 +                      dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 +                                      __func__, length);
 +                      err = -ENOMEM;
 +                      goto out_release_mem;
 +              }
 +              err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
 +                              ioctl_data->idn, index, 0, desc, &length);
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_ATTR:
 +              switch (ioctl_data->idn) {
 +              case QUERY_ATTR_IDN_BOOT_LU_EN:
 +              case QUERY_ATTR_IDN_POWER_MODE:
 +              case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
 +              case QUERY_ATTR_IDN_OOO_DATA_EN:
 +              case QUERY_ATTR_IDN_BKOPS_STATUS:
 +              case QUERY_ATTR_IDN_PURGE_STATUS:
 +              case QUERY_ATTR_IDN_MAX_DATA_IN:
 +              case QUERY_ATTR_IDN_MAX_DATA_OUT:
 +              case QUERY_ATTR_IDN_REF_CLK_FREQ:
 +              case QUERY_ATTR_IDN_CONF_DESC_LOCK:
 +              case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
 +              case QUERY_ATTR_IDN_EE_CONTROL:
 +              case QUERY_ATTR_IDN_EE_STATUS:
 +              case QUERY_ATTR_IDN_SECONDS_PASSED:
 +                      index = 0;
 +                      break;
 +              case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
 +              case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
 +                      index = lun;
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
 +                                      index, 0, &att);
 +              break;
 +
 +      case UPIU_QUERY_OPCODE_WRITE_ATTR:
 +              err = copy_from_user(&att,
 +                              buffer + sizeof(struct ufs_ioctl_query_data),
 +                              sizeof(u32));
 +              if (err) {
 +                      dev_err(hba->dev,
 +                              "%s: Failed copying buffer from user, err %d\n",
 +                              __func__, err);
 +                      goto out_release_mem;
 +              }
 +
 +              switch (ioctl_data->idn) {
 +              case QUERY_ATTR_IDN_BOOT_LU_EN:
 +                      index = 0;
 +                      if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
 +                              dev_err(hba->dev,
 +                                      "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
 +                                      __func__, ioctl_data->opcode,
 +                                      (unsigned int)ioctl_data->idn, att);
 +                              err = -EINVAL;
 +                              goto out_release_mem;
 +                      }
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_attr(hba, ioctl_data->opcode,
 +                                      ioctl_data->idn, index, 0, &att);
 +              break;
 +
 +      case UPIU_QUERY_OPCODE_READ_FLAG:
 +              switch (ioctl_data->idn) {
 +              case QUERY_FLAG_IDN_FDEVICEINIT:
 +              case QUERY_FLAG_IDN_PERMANENT_WPE:
 +              case QUERY_FLAG_IDN_PWR_ON_WPE:
 +              case QUERY_FLAG_IDN_BKOPS_EN:
 +              case QUERY_FLAG_IDN_PURGE_ENABLE:
 +              case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
 +              case QUERY_FLAG_IDN_BUSY_RTC:
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
 +                              ioctl_data->idn, &flag);
 +              break;
 +      default:
 +              goto out_einval;
 +      }
 +
 +      if (err) {
 +              dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
 +                              ioctl_data->idn);
 +              goto out_release_mem;
 +      }
 +
 +      /*
 +       * copy response data
 +       * As we might end up reading less data then what is specified in
 +       * "ioctl_data->buf_size". So we are updating "ioctl_data->
 +       * buf_size" to what exactly we have read.
 +       */
 +      switch (ioctl_data->opcode) {
 +      case UPIU_QUERY_OPCODE_READ_DESC:
 +              ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
 +              data_ptr = desc;
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_ATTR:
 +              ioctl_data->buf_size = sizeof(u32);
 +              data_ptr = &att;
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_FLAG:
 +              ioctl_data->buf_size = 1;
 +              data_ptr = &flag;
 +              break;
 +      case UPIU_QUERY_OPCODE_WRITE_ATTR:
 +              goto out_release_mem;
 +      default:
 +              goto out_einval;
 +      }
 +
 +      /* copy to user */
 +      err = copy_to_user(buffer, ioctl_data,
 +                      sizeof(struct ufs_ioctl_query_data));
 +      if (err)
 +              dev_err(hba->dev, "%s: Failed copying back to user.\n",
 +                      __func__);
 +      err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
 +                      data_ptr, ioctl_data->buf_size);
 +      if (err)
 +              dev_err(hba->dev, "%s: err %d copying back to user.\n",
 +                              __func__, err);
 +      goto out_release_mem;
 +
 +out_einval:
 +      dev_err(hba->dev,
 +              "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
 +              __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
 +      err = -EINVAL;
 +out_release_mem:
 +      kfree(ioctl_data);
 +      kfree(desc);
 +out:
 +      return err;
 +}
 +
 +/**
 + * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
 + * @dev: scsi device required for per LUN queries
 + * @cmd: command opcode
 + * @buffer: user space buffer for transferring data
 + *
 + * Supported commands:
 + * UFS_IOCTL_QUERY
 + */
 +static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
 +{
 +      struct ufs_hba *hba = shost_priv(dev->host);
 +      int err = 0;
 +
 +      BUG_ON(!hba);
 +      if (!buffer) {
 +              dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
 +              return -EINVAL;
 +      }
 +
 +      switch (cmd) {
 +      case UFS_IOCTL_QUERY:
 +              pm_runtime_get_sync(hba->dev);
 +              err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
 +                              buffer);
 +              pm_runtime_put_sync(hba->dev);
 +              break;
 +      default:
 +              err = -ENOIOCTLCMD;
 +              dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
 +                      cmd);
 +              break;
 +      }
 +
 +      return err;
 +}
 +
 +static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
 +{
 +      unsigned long flags;
 +      struct Scsi_Host *host;
 +      struct ufs_hba *hba;
 +      int index;
 +      bool found = false;
 +
 +      if (!scmd || !scmd->device || !scmd->device->host)
 +              return BLK_EH_NOT_HANDLED;
 +
 +      host = scmd->device->host;
 +      hba = shost_priv(host);
 +      if (!hba)
 +              return BLK_EH_NOT_HANDLED;
 +
 +      spin_lock_irqsave(host->host_lock, flags);
  
 -/**
 - * ufshcd_async_scan - asynchronous execution for probing hba
 - * @data: data pointer to pass to this function
 - * @cookie: cookie data
 - */
 -static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 -{
 -      struct ufs_hba *hba = (struct ufs_hba *)data;
 +      for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
 +              if (hba->lrb[index].cmd == scmd) {
 +                      found = true;
 +                      break;
 +              }
 +      }
  
 -      ufshcd_probe_hba(hba);
 +      spin_unlock_irqrestore(host->host_lock, flags);
 +
 +      /*
 +       * Bypass SCSI error handling and reset the block layer timer if this
 +       * SCSI command was not actually dispatched to UFS driver, otherwise
 +       * let SCSI layer handle the error as usual.
 +       */
 +      return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
  }
  
  static struct scsi_host_template ufshcd_driver_template = {
        .eh_abort_handler       = ufshcd_abort,
        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
 +      .eh_timed_out           = ufshcd_eh_timed_out,
 +      .ioctl                  = ufshcd_ioctl,
 +#ifdef CONFIG_COMPAT
 +      .compat_ioctl           = ufshcd_ioctl,
 +#endif
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@@ -7974,13 -4441,7 +7974,13 @@@ static int ufshcd_config_vreg_load(stru
  static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
  {
 -      return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 +      if (!vreg)
 +              return 0;
 +      else if (vreg->unused)
 +              return 0;
 +      else
 +              return ufshcd_config_vreg_load(hba->dev, vreg,
 +                                             UFS_VREG_LPM_LOAD_UA);
  }
  
  static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
  {
        if (!vreg)
                return 0;
 -
 -      return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 +      else if (vreg->unused)
 +              return 0;
 +      else
 +              return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
  }
  
  static int ufshcd_config_vreg(struct device *dev,
        name = vreg->name;
  
        if (regulator_count_voltages(reg) > 0) {
 +              uA_load = on ? vreg->max_uA : 0;
 +              ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
 +              if (ret)
 +                      goto out;
 +
                if (vreg->min_uV && vreg->max_uV) {
                        min_uV = on ? vreg->min_uV : 0;
                        ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
                                goto out;
                        }
                }
 -
 -              uA_load = on ? vreg->max_uA : 0;
 -              ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
 -              if (ret)
 -                      goto out;
        }
  out:
        return ret;
@@@ -8032,9 -4491,7 +8032,9 @@@ static int ufshcd_enable_vreg(struct de
  {
        int ret = 0;
  
 -      if (!vreg || vreg->enabled)
 +      if (!vreg)
 +              goto out;
 +      else if (vreg->enabled || vreg->unused)
                goto out;
  
        ret = ufshcd_config_vreg(dev, vreg, true);
@@@ -8054,9 -4511,7 +8054,9 @@@ static int ufshcd_disable_vreg(struct d
  {
        int ret = 0;
  
 -      if (!vreg || !vreg->enabled)
 +      if (!vreg)
 +              goto out;
 +      else if (!vreg->enabled || vreg->unused)
                goto out;
  
        ret = regulator_disable(vreg->reg);
  static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
  {
        struct ufs_vreg_info *info = &hba->vreg_info;
 +      int ret = 0;
  
 -      if (info)
 -              return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 +      if (info->vdd_hba) {
 +              ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
  
 -      return 0;
 +              if (!ret)
 +                      ufshcd_vops_update_sec_cfg(hba, on);
 +      }
 +
 +      return ret;
  }
  
  static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@@ -8167,73 -4617,22 +8167,73 @@@ static int ufshcd_init_hba_vreg(struct 
        return 0;
  }
  
 -static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 -                                      bool skip_ref_clk)
 +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
 +{
 +      int ret = 0;
 +      struct ufs_vreg_info *info = &hba->vreg_info;
 +
 +      if (!info)
 +              goto out;
 +      else if (!info->vccq)
 +              goto out;
 +
 +      if (unused) {
 +              /* shut off the rail here */
 +              ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
 +              /*
 +               * Mark this rail as no longer used, so it doesn't get enabled
 +               * later by mistake
 +               */
 +              if (!ret)
 +                      info->vccq->unused = true;
 +      } else {
 +              /*
 +               * rail should have been already enabled hence just make sure
 +               * that unused flag is cleared.
 +               */
 +              info->vccq->unused = false;
 +      }
 +out:
 +      return ret;
 +}
 +
 +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 +                             bool skip_ref_clk, bool is_gating_context)
  {
        int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
        unsigned long flags;
 +      ktime_t start = ktime_get();
 +      bool clk_state_changed = false;
  
        if (!head || list_empty(head))
                goto out;
  
 +      /* call vendor specific bus vote before enabling the clocks */
 +      if (on) {
 +              ret = ufshcd_vops_set_bus_vote(hba, on);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      /*
 +       * vendor specific setup_clocks ops may depend on clocks managed by
 +       * this standard driver hence call the vendor specific setup_clocks
 +       * before disabling the clocks managed here.
 +       */
 +      if (!on) {
 +              ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
 +              if (ret)
 +                      return ret;
 +      }
 +
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
                                continue;
  
 +                      clk_state_changed = on ^ clki->enabled;
                        if (on && !clki->enabled) {
                                ret = clk_prepare_enable(clki->clk);
                                if (ret) {
                }
        }
  
 -      ret = ufshcd_vops_setup_clocks(hba, on);
 +      /*
 +       * vendor specific setup_clocks ops may depend on clocks managed by
 +       * this standard driver hence call the vendor specific setup_clocks
 +       * after enabling the clocks managed here.
 +       */
 +      if (on) {
 +              ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
 +              if (ret)
 +                      goto out;
 +      }
 +
 +      /*
 +       * call vendor specific bus vote to remove the vote after
 +       * disabling the clocks.
 +       */
 +      if (!on)
 +              ret = ufshcd_vops_set_bus_vote(hba, on);
 +
  out:
        if (ret) {
 +              if (on)
 +                      /* Can't do much if this fails */
 +                      (void) ufshcd_vops_set_bus_vote(hba, false);
                list_for_each_entry(clki, head, list) {
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
 -      } else if (on) {
 +      } else if (!ret && on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              /* restore the secure configuration as clocks are enabled */
 +              ufshcd_vops_update_sec_cfg(hba, true);
        }
 +
 +      if (clk_state_changed)
 +              trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
 +                      (on ? "on" : "off"),
 +                      ktime_to_us(ktime_sub(ktime_get(), start)), ret);
        return ret;
  }
  
 -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 +static int ufshcd_enable_clocks(struct ufs_hba *hba)
 +{
 +      return  ufshcd_setup_clocks(hba, true, false, false);
 +}
 +
 +static int ufshcd_disable_clocks(struct ufs_hba *hba,
 +                               bool is_gating_context)
 +{
 +      return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
 +}
 +
 +static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 +                                            bool is_gating_context)
  {
 -      return  __ufshcd_setup_clocks(hba, on, false);
 +      return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
  }
  
  static int ufshcd_init_clocks(struct ufs_hba *hba)
@@@ -8354,7 -4712,7 +8354,7 @@@ static int ufshcd_variant_hba_init(stru
  {
        int err = 0;
  
 -      if (!hba->vops)
 +      if (!hba->var || !hba->var->vops)
                goto out;
  
        err = ufshcd_vops_init(hba);
@@@ -8378,9 -4736,11 +8378,9 @@@ out
  
  static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
  {
 -      if (!hba->vops)
 +      if (!hba->var || !hba->var->vops)
                return;
  
 -      ufshcd_vops_setup_clocks(hba, false);
 -
        ufshcd_vops_setup_regulators(hba, false);
  
        ufshcd_vops_exit(hba);
@@@ -8409,7 -4769,7 +8409,7 @@@ static int ufshcd_hba_init(struct ufs_h
        if (err)
                goto out_disable_hba_vreg;
  
 -      err = ufshcd_setup_clocks(hba, true);
 +      err = ufshcd_enable_clocks(hba);
        if (err)
                goto out_disable_hba_vreg;
  
  out_disable_vreg:
        ufshcd_setup_vreg(hba, false);
  out_disable_clks:
 -      ufshcd_setup_clocks(hba, false);
 +      ufshcd_disable_clocks(hba, false);
  out_disable_hba_vreg:
        ufshcd_setup_hba_vreg(hba, false);
  out:
@@@ -8443,13 -4803,7 +8443,13 @@@ static void ufshcd_hba_exit(struct ufs_
        if (hba->is_powered) {
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
 -              ufshcd_setup_clocks(hba, false);
 +              if (ufshcd_is_clkscaling_supported(hba)) {
 +                      if (hba->devfreq)
 +                              ufshcd_suspend_clkscaling(hba);
 +                      if (hba->clk_scaling.workq)
 +                              destroy_workqueue(hba->clk_scaling.workq);
 +              }
 +              ufshcd_disable_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
        }
@@@ -8462,19 -4816,19 +8462,19 @@@ ufshcd_send_request_sense(struct ufs_hb
                                0,
                                0,
                                0,
 -                              SCSI_SENSE_BUFFERSIZE,
 +                              UFSHCD_REQ_SENSE_SIZE,
                                0};
        char *buffer;
        int ret;
  
 -      buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
 +      buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
                goto out;
        }
  
        ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
 -                              SCSI_SENSE_BUFFERSIZE, NULL,
 +                              UFSHCD_REQ_SENSE_SIZE, NULL,
                                msecs_to_jiffies(1000), 3, NULL, REQ_PM);
        if (ret)
                pr_err("%s: failed with err %d\n", __func__, ret);
@@@ -8582,20 -4936,10 +8582,20 @@@ static int ufshcd_link_state_transition
                   (!check_for_bkops || (check_for_bkops &&
                    !hba->auto_bkops_enabled))) {
                /*
 +               * Let's make sure that link is in low power mode, we are doing
 +               * this currently by putting the link in Hibern8. Otherway to
 +               * put the link in low power mode is to send the DME end point
 +               * to device and then send the DME reset command to local
 +               * unipro. But putting the link in hibern8 is much faster.
 +               */
 +              ret = ufshcd_uic_hibern8_enter(hba);
 +              if (ret)
 +                      goto out;
 +              /*
                 * Change controller state to "reset state" which
                 * should also put the link in off/reset state
                 */
 -              ufshcd_hba_stop(hba);
 +              ufshcd_hba_stop(hba, true);
                /*
                 * TODO: Check if we need any delay to make sure that
                 * controller is reset
  static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
  {
        /*
 +       * It seems some UFS devices may keep drawing more than sleep current
 +       * (atleast for 500us) from UFS rails (especially from VCCQ rail).
 +       * To avoid this situation, add 2ms delay before putting these UFS
 +       * rails in LPM mode.
 +       */
 +      if (!ufshcd_is_link_active(hba))
 +              usleep_range(2000, 2100);
 +
 +      /*
         * If UFS device is either in UFS_Sleep turn off VCC rail to save some
         * power.
         *
@@@ -8650,6 -4985,7 +8650,6 @@@ static int ufshcd_vreg_set_hpm(struct u
            !hba->dev_info.is_lu_power_on_wp) {
                ret = ufshcd_setup_vreg(hba, true);
        } else if (!ufshcd_is_ufs_dev_active(hba)) {
 -              ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
                if (!ret && !ufshcd_is_link_active(hba)) {
                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
                        if (ret)
                        if (ret)
                                goto vccq_lpm;
                }
 +              ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
        }
        goto out;
  
  
  static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
  {
 -      if (ufshcd_is_link_off(hba))
 +      if (ufshcd_is_link_off(hba) ||
 +          (ufshcd_is_link_hibern8(hba)
 +           && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, false);
  }
  
  static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
  {
 -      if (ufshcd_is_link_off(hba))
 +      if (ufshcd_is_link_off(hba) ||
 +          (ufshcd_is_link_hibern8(hba)
 +           && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, true);
  }
  
@@@ -8724,17 -5055,8 +8724,17 @@@ static int ufshcd_suspend(struct ufs_hb
         * If we can't transition into any of the low power modes
         * just gate the clocks.
         */
 -      ufshcd_hold(hba, false);
 +      WARN_ON(hba->hibern8_on_idle.is_enabled &&
 +              hba->hibern8_on_idle.active_reqs);
 +      ufshcd_hold_all(hba);
        hba->clk_gating.is_suspended = true;
 +      hba->hibern8_on_idle.is_suspended = true;
 +
 +      if (hba->clk_scaling.is_allowed) {
 +              cancel_work_sync(&hba->clk_scaling.suspend_work);
 +              cancel_work_sync(&hba->clk_scaling.resume_work);
 +              ufshcd_suspend_clkscaling(hba);
 +      }
  
        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
                        req_link_state == UIC_LINK_ACTIVE_STATE) {
  
        if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
            (req_link_state == hba->uic_link_state))
 -              goto out;
 +              goto enable_gating;
  
        /* UFS device & link must be active before we enter in this function */
        if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
                ret = -EINVAL;
 -              goto out;
 +              goto enable_gating;
        }
  
        if (ufshcd_is_runtime_pm(pm_op)) {
        if (ret)
                goto set_dev_active;
  
 +      if (ufshcd_is_link_hibern8(hba) &&
 +          ufshcd_is_hibern8_on_idle_allowed(hba))
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 +
        ufshcd_vreg_set_lpm(hba);
  
  disable_clks:
        /*
 -       * The clock scaling needs access to controller registers. Hence, Wait
 -       * for pending clock scaling work to be done before clocks are
 -       * turned off.
 -       */
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 -      }
 -      /*
         * Call vendor specific suspend callback. As these callbacks may access
         * vendor specific host controller register space call them before the
         * host clocks are ON.
        if (ret)
                goto set_link_active;
  
 -      ret = ufshcd_vops_setup_clocks(hba, false);
 -      if (ret)
 -              goto vops_resume;
 -
        if (!ufshcd_is_link_active(hba))
 -              ufshcd_setup_clocks(hba, false);
 +              ret = ufshcd_disable_clocks(hba, false);
        else
                /* If link is active, device ref_clk can't be switched off */
 -              __ufshcd_setup_clocks(hba, false, true);
 +              ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
 +      if (ret)
 +              goto set_link_active;
  
 -      hba->clk_gating.state = CLKS_OFF;
 +      if (ufshcd_is_clkgating_allowed(hba)) {
 +              hba->clk_gating.state = CLKS_OFF;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                                      hba->clk_gating.state);
 +      }
        /*
         * Disable the host irq as host controller as there won't be any
         * host controller transaction expected till resume.
        ufshcd_hba_vreg_set_lpm(hba);
        goto out;
  
 -vops_resume:
 -      ufshcd_vops_resume(hba, pm_op);
  set_link_active:
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
        ufshcd_vreg_set_hpm(hba);
 -      if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
 +      if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
                ufshcd_set_link_active(hba);
 -      else if (ufshcd_is_link_off(hba))
 +      } else if (ufshcd_is_link_off(hba)) {
 +              ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
                ufshcd_host_reset_and_restore(hba);
 +      }
  set_dev_active:
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
                ufshcd_disable_auto_bkops(hba);
  enable_gating:
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
 +      hba->hibern8_on_idle.is_suspended = false;
        hba->clk_gating.is_suspended = false;
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
  out:
        hba->pm_op_in_progress = 0;
 +
 +      if (ret)
 +              ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
 +
        return ret;
  }
  
@@@ -8867,12 -5183,14 +8867,12 @@@ static int ufshcd_resume(struct ufs_hb
  
        ufshcd_hba_vreg_set_hpm(hba);
        /* Make sure clocks are enabled before accessing controller */
 -      ret = ufshcd_setup_clocks(hba, true);
 +      ret = ufshcd_enable_clocks(hba);
        if (ret)
                goto out;
  
        /* enable the host irq as host controller would be active soon */
 -      ret = ufshcd_enable_irq(hba);
 -      if (ret)
 -              goto disable_irq_and_vops_clks;
 +      ufshcd_enable_irq(hba);
  
        ret = ufshcd_vreg_set_hpm(hba);
        if (ret)
  
        if (ufshcd_is_link_hibern8(hba)) {
                ret = ufshcd_uic_hibern8_exit(hba);
 -              if (!ret)
 +              if (!ret) {
                        ufshcd_set_link_active(hba);
 -              else
 +                      if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +                              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              } else {
                        goto vendor_suspend;
 +              }
        } else if (ufshcd_is_link_off(hba)) {
 -              ret = ufshcd_host_reset_and_restore(hba);
                /*
 -               * ufshcd_host_reset_and_restore() should have already
 +               * A full initialization of the host and the device is required
 +               * since the link was put to off during suspend.
 +               */
 +              ret = ufshcd_reset_and_restore(hba);
 +              /*
 +               * ufshcd_reset_and_restore() should have already
                 * set the link state as active
                 */
                if (ret || !ufshcd_is_link_active(hba))
                        goto vendor_suspend;
 +              /* mark link state as hibern8 exited */
 +              if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
        }
  
        if (!ufshcd_is_ufs_dev_active(hba)) {
                ufshcd_urgent_bkops(hba);
  
        hba->clk_gating.is_suspended = false;
 +      hba->hibern8_on_idle.is_suspended = false;
  
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
  
        /* Schedule clock gating in case of no access to UFS device yet */
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        goto out;
  
  set_old_link_state:
        ufshcd_link_state_transition(hba, old_link_state, 0);
 +      if (ufshcd_is_link_hibern8(hba) &&
 +          ufshcd_is_hibern8_on_idle_allowed(hba))
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
  vendor_suspend:
        ufshcd_vops_suspend(hba, pm_op);
  disable_vreg:
        ufshcd_vreg_set_lpm(hba);
  disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
 -      ufshcd_setup_clocks(hba, false);
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_suspend_clkscaling(hba);
 +      ufshcd_disable_clocks(hba, false);
 +      if (ufshcd_is_clkgating_allowed(hba))
 +              hba->clk_gating.state = CLKS_OFF;
  out:
        hba->pm_op_in_progress = 0;
 +
 +      if (ret)
 +              ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
 +
        return ret;
  }
  
  int ufshcd_system_suspend(struct ufs_hba *hba)
  {
        int ret = 0;
 +      ktime_t start = ktime_get();
  
        if (!hba || !hba->is_powered)
                return 0;
  
 -      if (pm_runtime_suspended(hba->dev)) {
 -              if (hba->rpm_lvl == hba->spm_lvl)
 -                      /*
 -                       * There is possibility that device may still be in
 -                       * active state during the runtime suspend.
 -                       */
 -                      if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
 -                          hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
 -                              goto out;
 +      if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
 +           hba->curr_dev_pwr_mode) &&
 +          (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
 +           hba->uic_link_state))
 +              goto out;
  
 +      if (pm_runtime_suspended(hba->dev)) {
                /*
                 * UFS device and/or UFS link low power states during runtime
                 * suspend seems to be different than what is expected during
  
        ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
  out:
 +      trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
        if (!ret)
                hba->is_sys_suspended = true;
        return ret;
@@@ -9020,9 -5315,6 +9020,9 @@@ EXPORT_SYMBOL(ufshcd_system_suspend)
  
  int ufshcd_system_resume(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
                 * Let the runtime resume take care of resuming
                 * if runtime suspended.
                 */
 -              return 0;
 -
 -      return ufshcd_resume(hba, UFS_SYSTEM_PM);
 +              goto out;
 +      else
 +              ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
 +out:
 +      trace_ufshcd_system_resume(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      return ret;
  }
  EXPORT_SYMBOL(ufshcd_system_resume);
  
   */
  int ufshcd_runtime_suspend(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
        if (!hba->is_powered)
 -              return 0;
 +              goto out;
 +      else
 +              ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
 +out:
 +      trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode,
 +              hba->uic_link_state);
 +      return ret;
  
 -      return ufshcd_suspend(hba, UFS_RUNTIME_PM);
  }
  EXPORT_SYMBOL(ufshcd_runtime_suspend);
  
   */
  int ufshcd_runtime_resume(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
        if (!hba->is_powered)
 -              return 0;
 -
 -      return ufshcd_resume(hba, UFS_RUNTIME_PM);
 +              goto out;
 +      else
 +              ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
 +out:
 +      trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode,
 +              hba->uic_link_state);
 +      return ret;
  }
  EXPORT_SYMBOL(ufshcd_runtime_resume);
  
@@@ -9120,246 -5388,6 +9120,246 @@@ int ufshcd_runtime_idle(struct ufs_hba 
  }
  EXPORT_SYMBOL(ufshcd_runtime_idle);
  
 +static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
 +                                         struct device_attribute *attr,
 +                                         const char *buf, size_t count,
 +                                         bool rpm)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      if (value >= UFS_PM_LVL_MAX)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (rpm)
 +              hba->rpm_lvl = value;
 +      else
 +              hba->spm_lvl = value;
 +      ufshcd_apply_pm_quirks(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
 +}
 +
 +static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      int curr_len;
 +      u8 lvl;
 +
 +      curr_len = snprintf(buf, PAGE_SIZE,
 +                          "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                          hba->rpm_lvl,
 +                          ufschd_ufs_dev_pwr_mode_to_string(
 +                              ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
 +                          ufschd_uic_link_state_to_string(
 +                              ufs_pm_lvl_states[hba->rpm_lvl].link_state));
 +
 +      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                           "\nAll available Runtime PM levels info:\n");
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 +              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                                   "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                                  lvl,
 +                                  ufschd_ufs_dev_pwr_mode_to_string(
 +                                      ufs_pm_lvl_states[lvl].dev_state),
 +                                  ufschd_uic_link_state_to_string(
 +                                      ufs_pm_lvl_states[lvl].link_state));
 +
 +      return curr_len;
 +}
 +
 +static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
 +}
 +
 +static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
 +      hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
 +      sysfs_attr_init(&hba->rpm_lvl_attr.attr);
 +      hba->rpm_lvl_attr.attr.name = "rpm_lvl";
 +      hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
 +}
 +
 +static ssize_t ufshcd_spm_lvl_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      int curr_len;
 +      u8 lvl;
 +
 +      curr_len = snprintf(buf, PAGE_SIZE,
 +                          "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                          hba->spm_lvl,
 +                          ufschd_ufs_dev_pwr_mode_to_string(
 +                              ufs_pm_lvl_states[hba->spm_lvl].dev_state),
 +                          ufschd_uic_link_state_to_string(
 +                              ufs_pm_lvl_states[hba->spm_lvl].link_state));
 +
 +      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                           "\nAll available System PM levels info:\n");
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 +              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                                   "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                                  lvl,
 +                                  ufschd_ufs_dev_pwr_mode_to_string(
 +                                      ufs_pm_lvl_states[lvl].dev_state),
 +                                  ufschd_uic_link_state_to_string(
 +                                      ufs_pm_lvl_states[lvl].link_state));
 +
 +      return curr_len;
 +}
 +
 +static ssize_t ufshcd_spm_lvl_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
 +}
 +
 +static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
 +      hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
 +      sysfs_attr_init(&hba->spm_lvl_attr.attr);
 +      hba->spm_lvl_attr.attr.name = "spm_lvl";
 +      hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->spm_lvl_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
 +}
 +
 +static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
 +                                enum desc_idn desc_id,
 +                                u8 desc_index,
 +                                u8 param_offset,
 +                                u8 *sysfs_buf,
 +                                u8 param_size)
 +{
 +      u8 desc_buf[8] = {0};
 +      int ret;
 +
 +      if (param_size > 8)
 +              return -EINVAL;
 +
 +      pm_runtime_get_sync(hba->dev);
 +      ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
 +                              param_offset, desc_buf, param_size);
 +      pm_runtime_put_sync(hba->dev);
 +
 +      if (ret)
 +              return -EINVAL;
 +      switch (param_size) {
 +      case 1:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%02X\n", *desc_buf);
 +              break;
 +      case 2:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%04X\n",
 +                      get_unaligned_be16(desc_buf));
 +              break;
 +      case 4:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%08X\n",
 +                      get_unaligned_be32(desc_buf));
 +              break;
 +      case 8:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%016llX\n",
 +                      get_unaligned_be64(desc_buf));
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +
 +#define UFS_DESC_PARAM(_name, _puname, _duname, _size)                        \
 +      static ssize_t _name##_show(struct device *dev,                 \
 +              struct device_attribute *attr, char *buf)                       \
 +{                                                                     \
 +      struct ufs_hba *hba = dev_get_drvdata(dev);             \
 +      return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
 +              0, _duname##_DESC_PARAM##_puname, buf, _size);          \
 +}                                                                     \
 +static DEVICE_ATTR_RO(_name)
 +
 +#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size)                   \
 +              UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
 +
 +UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
 +UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
 +UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
 +
 +static struct attribute *ufs_sysfs_health_descriptor[] = {
 +      &dev_attr_eol_info.attr,
 +      &dev_attr_life_time_estimation_a.attr,
 +      &dev_attr_life_time_estimation_b.attr,
 +      NULL,
 +};
 +
 +static const struct attribute_group ufs_sysfs_health_descriptor_group = {
 +      .name = "health_descriptor",
 +      .attrs = ufs_sysfs_health_descriptor,
 +};
 +
 +static const struct attribute_group *ufs_sysfs_groups[] = {
 +      &ufs_sysfs_health_descriptor_group,
 +      NULL,
 +};
 +
 +
 +static void ufshcd_add_desc_sysfs_nodes(struct device *dev)
 +{
 +      int ret;
 +
 +      ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
 +      if (ret)
 +              dev_err(dev,
 +                      "%s: sysfs groups creation failed (err = %d)\n",
 +                      __func__, ret);
 +}
 +
 +static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      ufshcd_add_rpm_lvl_sysfs_nodes(hba);
 +      ufshcd_add_spm_lvl_sysfs_nodes(hba);
 +      ufshcd_add_desc_sysfs_nodes(hba->dev);
 +}
 +
 +static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
 +{
 +      bool suspend = false;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_scaling.is_allowed) {
 +              hba->clk_scaling.is_allowed = false;
 +              suspend = true;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /**
 +       * Scaling may be scheduled before, hence make sure it
 +       * doesn't race with shutdown
 +       */
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 +              cancel_work_sync(&hba->clk_scaling.suspend_work);
 +              cancel_work_sync(&hba->clk_scaling.resume_work);
 +              if (suspend)
 +                      ufshcd_suspend_clkscaling(hba);
 +      }
 +
 +      /* Unregister so that devfreq_monitor can't race with shutdown */
 +      if (hba->devfreq)
 +              devfreq_remove_device(hba->devfreq);
 +}
 +
  /**
   * ufshcd_shutdown - shutdown routine
   * @hba: per adapter instance
@@@ -9372,28 -5400,18 +9372,31 @@@ int ufshcd_shutdown(struct ufs_hba *hba
  {
        int ret = 0;
  
+       if (!hba->is_powered)
+               goto out;
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
  
 -      if (pm_runtime_suspended(hba->dev)) {
 -              ret = ufshcd_runtime_resume(hba);
 -              if (ret)
 -                      goto out;
 -      }
 -
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold_all(hba);
 +      ufshcd_mark_shutdown_ongoing(hba);
 +      ufshcd_shutdown_clkscaling(hba);
 +      /**
 +       * (1) Acquire the lock to stop any more requests
 +       * (2) Wait for all issued requests to complete
 +       */
 +      ufshcd_get_write_lock(hba);
 +      ufshcd_scsi_block_requests(hba);
 +      ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      if (ret)
 +              dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
 +                      __func__, ret);
 +      /* Requests may have errored out above, let it be handled */
 +      flush_work(&hba->eh_work);
 +      /* reqs issued from contexts other than shutdown will fail from now */
 +      ufshcd_scsi_unblock_requests(hba);
 +      ufshcd_release_all(hba);
        ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
  out:
        if (ret)
@@@ -9468,17 -5486,13 +9471,17 @@@ void ufshcd_remove(struct ufs_hba *hba
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
 -      ufshcd_hba_stop(hba);
 +      ufshcd_hba_stop(hba, true);
  
        ufshcd_exit_clk_gating(hba);
 -      ufshcd_exit_latency_hist(hba);
 -      if (ufshcd_is_clkscaling_enabled(hba))
 +      ufshcd_exit_hibern8_on_idle(hba);
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 +              ufshcd_exit_latency_hist(hba);
                devfreq_remove_device(hba->devfreq);
 +      }
        ufshcd_hba_exit(hba);
 +      ufsdbg_remove_debugfs(hba);
  }
  EXPORT_SYMBOL_GPL(ufshcd_remove);
  
@@@ -9544,370 -5558,66 +9547,370 @@@ out_error
  }
  EXPORT_SYMBOL(ufshcd_alloc_host);
  
 -static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 +/**
 + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
 + * @hba: per adapter instance
 + * @scale_up: True if scaling up and false if scaling down
 + *
 + * Returns true if scaling is required, false otherwise.
 + */
 +static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
 +                                             bool scale_up)
  {
 -      int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
  
        if (!head || list_empty(head))
 -              goto out;
 -
 -      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 -      if (ret)
 -              return ret;
 +              return false;
  
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (scale_up && clki->max_freq) {
                                if (clki->curr_freq == clki->max_freq)
                                        continue;
 -                              ret = clk_set_rate(clki->clk, clki->max_freq);
 -                              if (ret) {
 -                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 -                                              __func__, clki->name,
 -                                              clki->max_freq, ret);
 -                                      break;
 -                              }
 -                              clki->curr_freq = clki->max_freq;
 -
 +                              return true;
                        } else if (!scale_up && clki->min_freq) {
                                if (clki->curr_freq == clki->min_freq)
                                        continue;
 -                              ret = clk_set_rate(clki->clk, clki->min_freq);
 -                              if (ret) {
 -                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 -                                              __func__, clki->name,
 -                                              clki->min_freq, ret);
 -                                      break;
 -                              }
 -                              clki->curr_freq = clki->min_freq;
 +                              return true;
                        }
                }
 -              dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 -                              clki->name, clk_get_rate(clki->clk));
        }
  
 -      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 +      return false;
 +}
 +
 +/**
 + * ufshcd_scale_gear - scale up/down UFS gear
 + * @hba: per adapter instance
 + * @scale_up: True for scaling up gear and false for scaling down
 + *
 + * Returns 0 for success,
 + * Returns -EBUSY if scaling can't happen at this time
 + * Returns non-zero for any other errors
 + */
 +static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +      struct ufs_pa_layer_attr new_pwr_info;
 +      u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
 +
 +      BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
 +
 +      if (scale_up) {
 +              memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
 +                     sizeof(struct ufs_pa_layer_attr));
 +              /*
 +               * Some UFS devices may stop responding after switching from
 +               * HS-G1 to HS-G3. Also, it is found that these devices work
 +               * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
 +               * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
 +               * quirk is enabled for such devices, this 2 steps gear switch
 +               * workaround will be applied.
 +               */
 +              if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
 +                  && (hba->pwr_info.gear_tx == UFS_HS_G1)
 +                  && (new_pwr_info.gear_tx == UFS_HS_G3)) {
 +                      /* scale up to G2 first */
 +                      new_pwr_info.gear_tx = UFS_HS_G2;
 +                      new_pwr_info.gear_rx = UFS_HS_G2;
 +                      ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +                      if (ret)
 +                              goto out;
 +
 +                      /* scale up to G3 now */
 +                      new_pwr_info.gear_tx = UFS_HS_G3;
 +                      new_pwr_info.gear_rx = UFS_HS_G3;
 +                      /* now, fall through to set the HS-G3 */
 +              }
 +              ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +              if (ret)
 +                      goto out;
 +      } else {
 +              memcpy(&new_pwr_info, &hba->pwr_info,
 +                     sizeof(struct ufs_pa_layer_attr));
 +
 +              if (hba->pwr_info.gear_tx > scale_down_gear
 +                  || hba->pwr_info.gear_rx > scale_down_gear) {
 +                      /* save the current power mode */
 +                      memcpy(&hba->clk_scaling.saved_pwr_info.info,
 +                              &hba->pwr_info,
 +                              sizeof(struct ufs_pa_layer_attr));
 +
 +                      /* scale down gear */
 +                      new_pwr_info.gear_tx = scale_down_gear;
 +                      new_pwr_info.gear_rx = scale_down_gear;
 +                      if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
 +                              new_pwr_info.pwr_tx = FASTAUTO_MODE;
 +                              new_pwr_info.pwr_rx = FASTAUTO_MODE;
 +                      }
 +              }
 +              ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +      }
 +
 +out:
 +      if (ret)
 +              dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
 +                      __func__, ret,
 +                      hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
 +                      new_pwr_info.gear_tx, new_pwr_info.gear_rx,
 +                      scale_up);
 +
 +      return ret;
 +}
 +
 +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
 +{
 +      #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
 +      int ret = 0;
 +      /*
 +       * make sure that there are no outstanding requests when
 +       * clock scaling is in progress
 +       */
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 +              ret = -EBUSY;
 +              up_write(&hba->lock);
 +              ufshcd_scsi_unblock_requests(hba);
 +      }
 +
 +      return ret;
 +}
 +
 +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
 +{
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
 +}
 +
 +/**
 + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
 + * @hba: per adapter instance
 + * @scale_up: True for scaling up and false for scalin down
 + *
 + * Returns 0 for success,
 + * Returns -EBUSY if scaling can't happen at this time
 + * Returns non-zero for any other errors
 + */
 +static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +
 +      /* let's not get into low power until clock scaling is completed */
 +      hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 +      ufshcd_hold_all(hba);
 +
 +      ret = ufshcd_clock_scaling_prepare(hba);
 +      if (ret)
 +              goto out;
 +
 +      /* scale down the gear before scaling down clocks */
 +      if (!scale_up) {
 +              ret = ufshcd_scale_gear(hba, false);
 +              if (ret)
 +                      goto clk_scaling_unprepare;
 +      }
 +
 +      /*
 +       * If auto hibern8 is supported then put the link in
 +       * hibern8 manually, this is to avoid auto hibern8
 +       * racing during clock frequency scaling sequence.
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              ret = ufshcd_uic_hibern8_enter(hba);
 +              if (ret)
 +                      /* link will be bad state so no need to scale_up_gear */
 +                      return ret;
 +      }
 +
 +      ret = ufshcd_scale_clks(hba, scale_up);
 +      if (ret)
 +              goto scale_up_gear;
 +
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              ret = ufshcd_uic_hibern8_exit(hba);
 +              if (ret)
 +                      /* link will be bad state so no need to scale_up_gear */
 +                      return ret;
 +      }
 +
 +      /* scale up the gear after scaling up clocks */
 +      if (scale_up) {
 +              ret = ufshcd_scale_gear(hba, true);
 +              if (ret) {
 +                      ufshcd_scale_clks(hba, false);
 +                      goto clk_scaling_unprepare;
 +              }
 +      }
 +
 +      if (!ret) {
 +              hba->clk_scaling.is_scaled_up = scale_up;
 +              if (scale_up)
 +                      hba->clk_gating.delay_ms =
 +                              hba->clk_gating.delay_ms_perf;
 +              else
 +                      hba->clk_gating.delay_ms =
 +                              hba->clk_gating.delay_ms_pwr_save;
 +      }
 +
 +      goto clk_scaling_unprepare;
  
 +scale_up_gear:
 +      if (!scale_up)
 +              ufshcd_scale_gear(hba, true);
 +clk_scaling_unprepare:
 +      ufshcd_clock_scaling_unprepare(hba);
  out:
 +      hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
 +      ufshcd_release_all(hba);
        return ret;
  }
  
 +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +
 +      devfreq_suspend_device(hba->devfreq);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_scaling.window_start_t = 0;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool suspend = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (!hba->clk_scaling.is_suspended) {
 +              suspend = true;
 +              hba->clk_scaling.is_suspended = true;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (suspend)
 +              __ufshcd_suspend_clkscaling(hba);
 +}
 +
 +static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool resume = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_scaling.is_suspended) {
 +              resume = true;
 +              hba->clk_scaling.is_suspended = false;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (resume)
 +              devfreq_resume_device(hba->devfreq);
 +}
 +
 +static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
 +}
 +
 +static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      u32 value;
 +      int err;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->clk_scaling.is_allowed)
 +              goto out;
 +
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold(hba, false);
 +
 +      cancel_work_sync(&hba->clk_scaling.suspend_work);
 +      cancel_work_sync(&hba->clk_scaling.resume_work);
 +
 +      hba->clk_scaling.is_allowed = value;
 +
 +      if (value) {
 +              ufshcd_resume_clkscaling(hba);
 +      } else {
 +              ufshcd_suspend_clkscaling(hba);
 +              err = ufshcd_devfreq_scale(hba, true);
 +              if (err)
 +                      dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
 +                                      __func__, err);
 +      }
 +
 +      ufshcd_release(hba, false);
 +      pm_runtime_put_sync(hba->dev);
 +out:
 +      return count;
 +}
 +
 +static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         clk_scaling.suspend_work);
 +      unsigned long irq_flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 +      if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              return;
 +      }
 +      hba->clk_scaling.is_suspended = true;
 +      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +
 +      __ufshcd_suspend_clkscaling(hba);
 +}
 +
 +static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         clk_scaling.resume_work);
 +      unsigned long irq_flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 +      if (!hba->clk_scaling.is_suspended) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              return;
 +      }
 +      hba->clk_scaling.is_suspended = false;
 +      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +
 +      devfreq_resume_device(hba->devfreq);
 +}
 +
  static int ufshcd_devfreq_target(struct device *dev,
                                unsigned long *freq, u32 flags)
  {
 -      int err = 0;
 +      int ret = 0;
        struct ufs_hba *hba = dev_get_drvdata(dev);
 -      bool release_clk_hold = false;
        unsigned long irq_flags;
 +      ktime_t start;
 +      bool scale_up, sched_clk_scaling_suspend_work = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return -EINVAL;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if ((*freq > 0) && (*freq < UINT_MAX)) {
 +              dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
                return -EINVAL;
 +      }
  
        spin_lock_irqsave(hba->host->host_lock, irq_flags);
        if (ufshcd_eh_in_progress(hba)) {
                return 0;
        }
  
 -      if (ufshcd_is_clkgating_allowed(hba) &&
 -          (hba->clk_gating.state != CLKS_ON)) {
 -              if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 -                      /* hold the vote until the scaling work is completed */
 -                      hba->clk_gating.active_reqs++;
 -                      release_clk_hold = true;
 -                      hba->clk_gating.state = CLKS_ON;
 -              } else {
 -                      /*
 -                       * Clock gating work seems to be running in parallel
 -                       * hence skip scaling work to avoid deadlock between
 -                       * current scaling work and gating work.
 -                       */
 -                      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 -                      return 0;
 -              }
 +      if (!hba->clk_scaling.active_reqs)
 +              sched_clk_scaling_suspend_work = true;
 +
 +      scale_up = (*freq == UINT_MAX) ? true : false;
 +      if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              ret = 0;
 +              goto out; /* no state change required */
        }
        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  
 -      if (*freq == UINT_MAX)
 -              err = ufshcd_scale_clks(hba, true);
 -      else if (*freq == 0)
 -              err = ufshcd_scale_clks(hba, false);
 +      start = ktime_get();
 +      ret = ufshcd_devfreq_scale(hba, scale_up);
 +      trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
 +              (scale_up ? "up" : "down"),
 +              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
  
 -      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 -      if (release_clk_hold)
 -              __ufshcd_release(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +out:
 +      if (sched_clk_scaling_suspend_work)
 +              queue_work(hba->clk_scaling.workq,
 +                         &hba->clk_scaling.suspend_work);
  
 -      return err;
 +      return ret;
  }
  
  static int ufshcd_devfreq_get_dev_status(struct device *dev,
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
        unsigned long flags;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
  
        memset(stat, 0, sizeof(*stat));
@@@ -9978,31 -5695,12 +9981,31 @@@ start_window
        return 0;
  }
  
 -static struct devfreq_dev_profile ufs_devfreq_profile = {
 -      .polling_ms     = 100,
 -      .target         = ufshcd_devfreq_target,
 -      .get_dev_status = ufshcd_devfreq_get_dev_status,
 -};
 +static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
 +{
 +      hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
 +      hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
 +      sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
 +      hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
 +      hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
 +}
 +
 +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
 +{
 +      struct device *dev = hba->dev;
 +      int ret;
  
 +      ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
 +              &hba->lanes_per_direction);
 +      if (ret) {
 +              dev_dbg(hba->dev,
 +                      "%s: failed to read lanes-per-direction, ret=%d\n",
 +                      __func__, ret);
 +              hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
 +      }
 +}
  /**
   * ufshcd_init - Driver initialization routine
   * @hba: per-adapter instance
@@@ -10026,8 -5724,6 +10029,8 @@@ int ufshcd_init(struct ufs_hba *hba, vo
        hba->mmio_base = mmio_base;
        hba->irq = irq;
  
 +      ufshcd_init_lanes_per_dir(hba);
 +
        err = ufshcd_hba_init(hba);
        if (err)
                goto out_error;
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
  
 +      /* print error message if ufs_version is not valid */
 +      if ((hba->ufs_version != UFSHCI_VERSION_10) &&
 +          (hba->ufs_version != UFSHCI_VERSION_11) &&
 +          (hba->ufs_version != UFSHCI_VERSION_20) &&
 +          (hba->ufs_version != UFSHCI_VERSION_21))
 +              dev_err(hba->dev, "invalid UFS version 0x%x\n",
 +                      hba->ufs_version);
 +
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
  
 +      /* Enable debug prints */
 +      hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
 +
        err = ufshcd_set_dma_mask(hba);
        if (err) {
                dev_err(hba->dev, "set dma mask failed\n");
        host->max_channel = UFSHCD_MAX_CHANNEL;
        host->unique_id = host->host_no;
        host->max_cmd_len = MAX_CDB_SIZE;
 +      host->set_dbd_for_caching = 1;
  
        hba->max_pwr_info.is_valid = false;
  
        /* Initialize work queues */
        INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 +      INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
  
        /* Initialize UIC command mutex */
        mutex_init(&hba->uic_cmd_mutex);
        /* Initialize mutex for device management commands */
        mutex_init(&hba->dev_cmd.lock);
  
 +      init_rwsem(&hba->lock);
 +
        /* Initialize device management tag acquire wait queue */
        init_waitqueue_head(&hba->dev_cmd.tag_wq);
  
        ufshcd_init_clk_gating(hba);
 +      ufshcd_init_hibern8_on_idle(hba);
 +
 +      /*
 +       * In order to avoid any spurious interrupt immediately after
 +       * registering UFS controller interrupt handler, clear any pending UFS
 +       * interrupt status and disable all the UFS interrupts.
 +       */
 +      ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
 +                    REG_INTERRUPT_STATUS);
 +      ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
 +      /*
 +       * Make sure that UFS interrupts are disabled and any pending interrupt
 +       * status is cleared before registering UFS interrupt handler.
 +       */
 +      mb();
 +
        /* IRQ registration */
        err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
                goto exit_gating;
        }
  
 +      /* Reset controller to power on reset (POR) state */
 +      ufshcd_vops_full_reset(hba);
 +
 +      /* reset connected UFS device */
 +      err = ufshcd_reset_device(hba);
 +      if (err)
 +              dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 +                       __func__, err);
 +
        /* Host controller enable */
        err = ufshcd_hba_enable(hba);
        if (err) {
                dev_err(hba->dev, "Host controller enable failed\n");
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_host_state(hba);
                goto out_remove_scsi_host;
        }
  
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
 -                                                 "simple_ondemand", NULL);
 -              if (IS_ERR(hba->devfreq)) {
 -                      dev_err(hba->dev, "Unable to register with devfreq %ld\n",
 -                                      PTR_ERR(hba->devfreq));
 -                      goto out_remove_scsi_host;
 -              }
 -              /* Suspend devfreq until the UFS device is detected */
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              char wq_name[sizeof("ufs_clkscaling_00")];
 +
 +              INIT_WORK(&hba->clk_scaling.suspend_work,
 +                        ufshcd_clk_scaling_suspend_work);
 +              INIT_WORK(&hba->clk_scaling.resume_work,
 +                        ufshcd_clk_scaling_resume_work);
 +
 +              snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
 +                       host->host_no);
 +              hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
 +
 +              ufshcd_clkscaling_init_sysfs(hba);
        }
  
 +      /*
 +       * If rpm_lvl and and spm_lvl are not already set to valid levels,
 +       * set the default power management level for UFS runtime and system
 +       * suspend. Default power saving mode selected is keeping UFS link in
 +       * Hibern8 state and UFS device in sleep.
 +       */
 +      if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
 +              hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                                      UFS_SLEEP_PWR_MODE,
 +                                                      UIC_LINK_HIBERN8_STATE);
 +      if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
 +              hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                                      UFS_SLEEP_PWR_MODE,
 +                                                      UIC_LINK_HIBERN8_STATE);
 +
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
  
        ufshcd_init_latency_hist(hba);
  
        /*
 -       * The device-initialize-sequence hasn't been invoked yet.
 -       * Set the device to power-off state
 +       * We are assuming that device wasn't put in sleep/power-down
 +       * state exclusively during the boot stage before kernel.
 +       * This assumption helps avoid doing link startup twice during
 +       * ufshcd_probe_hba().
         */
 -      ufshcd_set_ufs_dev_poweroff(hba);
 +      ufshcd_set_ufs_dev_active(hba);
 +
 +      ufshcd_cmd_log_init(hba);
  
        async_schedule(ufshcd_async_scan, hba);
  
 +      ufsdbg_add_debugfs(hba);
 +
 +      ufshcd_add_sysfs_nodes(hba);
 +
        return 0;
  
  out_remove_scsi_host:
diff --combined mm/shmem.c
@@@ -1003,7 -1003,7 +1003,7 @@@ static int shmem_replace_page(struct pa
        copy_highpage(newpage, oldpage);
        flush_dcache_page(newpage);
  
 -      __set_page_locked(newpage);
 +      __SetPageLocked(newpage);
        SetPageUptodate(newpage);
        SetPageSwapBacked(newpage);
        set_page_private(newpage, swap_index);
@@@ -1195,7 -1195,7 +1195,7 @@@ repeat
                }
  
                __SetPageSwapBacked(page);
 -              __set_page_locked(page);
 +              __SetPageLocked(page);
                if (sgp == SGP_WRITE)
                        __SetPageReferenced(page);
  
@@@ -1854,11 -1854,12 +1854,12 @@@ static void shmem_tag_pins(struct addre
        void **slot;
        pgoff_t start;
        struct page *page;
+       unsigned int tagged = 0;
  
        lru_add_drain();
        start = 0;
-       rcu_read_lock();
  
+       spin_lock_irq(&mapping->tree_lock);
  restart:
        radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
                page = radix_tree_deref_slot(slot);
                        if (radix_tree_deref_retry(page))
                                goto restart;
                } else if (page_count(page) - page_mapcount(page) > 1) {
-                       spin_lock_irq(&mapping->tree_lock);
                        radix_tree_tag_set(&mapping->page_tree, iter.index,
                                           SHMEM_TAG_PINNED);
-                       spin_unlock_irq(&mapping->tree_lock);
                }
  
-               if (need_resched()) {
-                       cond_resched_rcu();
-                       start = iter.index + 1;
-                       goto restart;
-               }
+               if (++tagged % 1024)
+                       continue;
+               spin_unlock_irq(&mapping->tree_lock);
+               cond_resched();
+               start = iter.index + 1;
+               spin_lock_irq(&mapping->tree_lock);
+               goto restart;
        }
-       rcu_read_unlock();
+       spin_unlock_irq(&mapping->tree_lock);
  }
  
  /*
diff --combined mm/slub.c
+++ b/mm/slub.c
@@@ -333,13 -333,11 +333,13 @@@ static inline int oo_objects(struct kme
   */
  static __always_inline void slab_lock(struct page *page)
  {
 +      VM_BUG_ON_PAGE(PageTail(page), page);
        bit_spin_lock(PG_locked, &page->flags);
  }
  
  static __always_inline void slab_unlock(struct page *page)
  {
 +      VM_BUG_ON_PAGE(PageTail(page), page);
        __bit_spin_unlock(PG_locked, &page->flags);
  }
  
@@@ -687,21 -685,11 +687,21 @@@ static void print_trailer(struct kmem_c
        dump_stack();
  }
  
 +#ifdef CONFIG_SLUB_DEBUG_PANIC_ON
 +static void slab_panic(const char *cause)
 +{
 +      panic("%s\n", cause);
 +}
 +#else
 +static inline void slab_panic(const char *cause) {}
 +#endif
 +
  void object_err(struct kmem_cache *s, struct page *page,
                        u8 *object, char *reason)
  {
        slab_bug(s, "%s", reason);
        print_trailer(s, page, object);
 +      slab_panic(reason);
  }
  
  static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
        slab_bug(s, "%s", buf);
        print_page_info(page);
        dump_stack();
 +      slab_panic("slab error");
  }
  
  static void init_object(struct kmem_cache *s, void *object, u8 val)
  static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
                                                void *from, void *to)
  {
 +      slab_panic("object poison overwritten");
        slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
        memset(from, data, to - from);
  }
@@@ -1591,7 -1577,6 +1591,7 @@@ static void __free_slab(struct kmem_cac
        page_mapcount_reset(page);
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
 +      kasan_alloc_pages(page, order);
        __free_kmem_pages(page, order);
  }
  
@@@ -3758,7 -3743,6 +3758,7 @@@ void kfree(const void *x
        if (unlikely(!PageSlab(page))) {
                BUG_ON(!PageCompound(page));
                kfree_hook(x);
 +              kasan_alloc_pages(page, compound_order(page));
                __free_kmem_pages(page, compound_order(page));
                return;
        }
@@@ -4655,7 -4639,17 +4655,17 @@@ static ssize_t show_slab_objects(struc
                }
        }
  
-       get_online_mems();
+       /*
+        * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+        * already held which will conflict with an existing lock order:
+        *
+        * mem_hotplug_lock->slab_mutex->kernfs_mutex
+        *
+        * We don't really need mem_hotplug_lock (to hold off
+        * slab_mem_going_offline_callback) here because slab's memory hot
+        * unplug code doesn't destroy the kmem_cache->node[] data.
+        */
  #ifdef CONFIG_SLUB_DEBUG
        if (flags & SO_ALL) {
                struct kmem_cache_node *n;
                        x += sprintf(buf + x, " N%d=%lu",
                                        node, nodes[node]);
  #endif
-       put_online_mems();
        kfree(nodes);
        return x + sprintf(buf + x, "\n");
  }
diff --combined net/mac80211/mlme.c
@@@ -2431,7 -2431,8 +2431,8 @@@ struct sk_buff *ieee80211_ap_probereq_g
  
        rcu_read_lock();
        ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
-       if (WARN_ON_ONCE(ssid == NULL))
+       if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+                     "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
                ssid_len = 0;
        else
                ssid_len = ssid[1];
@@@ -4566,20 -4567,20 +4567,20 @@@ int ieee80211_mgd_auth(struct ieee80211
                return -EOPNOTSUPP;
        }
  
 -      auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
 +      auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
                            req->ie_len, GFP_KERNEL);
        if (!auth_data)
                return -ENOMEM;
  
        auth_data->bss = req->bss;
  
 -      if (req->sae_data_len >= 4) {
 -              __le16 *pos = (__le16 *) req->sae_data;
 +      if (req->auth_data_len >= 4) {
 +              __le16 *pos = (__le16 *) req->auth_data;
                auth_data->sae_trans = le16_to_cpu(pos[0]);
                auth_data->sae_status = le16_to_cpu(pos[1]);
 -              memcpy(auth_data->data, req->sae_data + 4,
 -                     req->sae_data_len - 4);
 -              auth_data->data_len += req->sae_data_len - 4;
 +              memcpy(auth_data->data, req->auth_data + 4,
 +                     req->auth_data_len - 4);
 +              auth_data->data_len += req->auth_data_len - 4;
        }
  
        if (req->ie && req->ie_len) {
@@@ -4669,7 -4670,7 +4670,7 @@@ int ieee80211_mgd_assoc(struct ieee8021
  
        rcu_read_lock();
        ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
-       if (!ssidie) {
+       if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
                rcu_read_unlock();
                kfree(assoc_data);
                return -EINVAL;
  #include <linux/proc_fs.h>
  #include <linux/skbuff.h>
  #include <linux/spinlock.h>
 +#include <linux/workqueue.h>
  #include <asm/atomic.h>
  #include <net/netlink.h>
  
  #include <linux/netfilter/x_tables.h>
  #include <linux/netfilter/xt_quota2.h>
  
 +#define QUOTA2_SYSFS_WORK_MAX_SIZE 64
 +#define QUOTA2_SYSFS_NUM_ENVP 3
 +
  #ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
  /* For compatibility, these definitions are copied from the
   * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
@@@ -58,16 -54,17 +58,16 @@@ struct xt_quota_counter 
        atomic_t ref;
        char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
        struct proc_dir_entry *procfs_entry;
 +      char last_iface[QUOTA2_SYSFS_WORK_MAX_SIZE];
 +      char last_prefix[QUOTA2_SYSFS_WORK_MAX_SIZE];
 +      struct work_struct work;
  };
  
 -#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
 -/* Harald's favorite number +1 :D From ipt_ULOG.C */
 -static int qlog_nl_event = 112;
 -module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
 -MODULE_PARM_DESC(event_num,
 -               "Event number for NETLINK_NFLOG message. 0 disables log."
 -               "111 is what ipt_ULOG uses.");
 -static struct sock *nflognl;
 -#endif
 +#define to_quota_counter(x) container_of(x, struct xt_quota_counter, work)
 +
 +static struct class *quota_class;
 +static struct device *quota_device;
 +static struct kobject *quota_kobj;
  
  static LIST_HEAD(counter_list);
  static DEFINE_SPINLOCK(counter_list_lock);
@@@ -78,39 -75,68 +78,39 @@@ static kuid_t quota_list_uid = KUIDT_IN
  static kgid_t quota_list_gid = KGIDT_INIT(0);
  module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
  
 -#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
 -static void quota2_log(unsigned int hooknum,
 -                     const struct sk_buff *skb,
 -                     const struct net_device *in,
 +static void quota2_work(struct work_struct *work)
 +{
 +      char alert_msg[QUOTA2_SYSFS_WORK_MAX_SIZE];
 +      char iface_name[QUOTA2_SYSFS_WORK_MAX_SIZE];
 +      char *envp[QUOTA2_SYSFS_NUM_ENVP] = {alert_msg, iface_name,  NULL};
 +      struct xt_quota_counter *counter = to_quota_counter(work);
 +
 +      snprintf(alert_msg, sizeof(alert_msg), "ALERT_NAME=%s", counter->name);
 +      snprintf(iface_name, sizeof(iface_name), "INTERFACE=%s",
 +               counter->last_iface);
 +
 +      kobject_uevent_env(quota_kobj, KOBJ_CHANGE, envp);
 +}
 +
 +static void quota2_log(const struct net_device *in,
                       const struct net_device *out,
 +                     struct  xt_quota_counter *q,
                       const char *prefix)
  {
 -      ulog_packet_msg_t *pm;
 -      struct sk_buff *log_skb;
 -      size_t size;
 -      struct nlmsghdr *nlh;
 -
 -      if (!qlog_nl_event)
 +      if (!prefix)
                return;
  
 -      size = NLMSG_SPACE(sizeof(*pm));
 -      size = max(size, (size_t)NLMSG_GOODSIZE);
 -      log_skb = alloc_skb(size, GFP_ATOMIC);
 -      if (!log_skb) {
 -              pr_err("xt_quota2: cannot alloc skb for logging\n");
 -              return;
 -      }
 +      strlcpy(q->last_prefix, prefix, QUOTA2_SYSFS_WORK_MAX_SIZE);
  
 -      nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
 -                      sizeof(*pm), 0);
 -      if (!nlh) {
 -              pr_err("xt_quota2: nlmsg_put failed\n");
 -              kfree_skb(log_skb);
 -              return;
 -      }
 -      pm = nlmsg_data(nlh);
 -      if (skb->tstamp.tv64 == 0)
 -              __net_timestamp((struct sk_buff *)skb);
 -      pm->data_len = 0;
 -      pm->hook = hooknum;
 -      if (prefix != NULL)
 -              strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
 -      else
 -              *(pm->prefix) = '\0';
        if (in)
 -              strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
 -      else
 -              pm->indev_name[0] = '\0';
 -
 -      if (out)
 -              strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
 +              strlcpy(q->last_iface, in->name, QUOTA2_SYSFS_WORK_MAX_SIZE);
 +      else if (out)
 +              strlcpy(q->last_iface, out->name, QUOTA2_SYSFS_WORK_MAX_SIZE);
        else
 -              pm->outdev_name[0] = '\0';
 +              strlcpy(q->last_iface, "UNKNOWN", QUOTA2_SYSFS_WORK_MAX_SIZE);
  
 -      NETLINK_CB(log_skb).dst_group = 1;
 -      pr_debug("throwing 1 packets to netlink group 1\n");
 -      netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
 -}
 -#else
 -static void quota2_log(unsigned int hooknum,
 -                     const struct sk_buff *skb,
 -                     const struct net_device *in,
 -                     const struct net_device *out,
 -                     const char *prefix)
 -{
 +      schedule_work(&q->work);
  }
 -#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
  
  static ssize_t quota_proc_read(struct file *file, char __user *buf,
                           size_t size, loff_t *ppos)
@@@ -167,9 -193,6 +167,9 @@@ q2_new_counter(const struct xt_quota_mt
                INIT_LIST_HEAD(&e->list);
                atomic_set(&e->ref, 1);
                strlcpy(e->name, q->name, sizeof(e->name));
 +              strlcpy(e->last_prefix, "UNSET", sizeof(e->last_prefix));
 +              strlcpy(e->last_iface, "UNSET", sizeof(e->last_iface));
 +              INIT_WORK(&e->work, quota2_work);
        }
        return e;
  }
@@@ -273,8 -296,8 +273,8 @@@ static void quota_mt2_destroy(const str
        }
  
        list_del(&e->list);
-       remove_proc_entry(e->name, proc_xt_quota);
        spin_unlock_bh(&counter_list_lock);
+       remove_proc_entry(e->name, proc_xt_quota);
        kfree(e);
  }
  
@@@ -303,7 -326,11 +303,7 @@@ quota_mt2(const struct sk_buff *skb, st
                } else {
                        /* We are transitioning, log that fact. */
                        if (e->quota) {
 -                              quota2_log(par->hooknum,
 -                                         skb,
 -                                         par->in,
 -                                         par->out,
 -                                         q->name);
 +                              quota2_log(par->in, par->out, e, q->name);
                        }
                        /* we do not allow even small packets from now on */
                        e->quota = 0;
@@@ -341,25 -368,11 +341,25 @@@ static int __init quota_mt2_init(void
        int ret;
        pr_debug("xt_quota2: init()");
  
 -#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
 -      nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
 -      if (!nflognl)
 -              return -ENOMEM;
 -#endif
 +      quota_class = class_create(THIS_MODULE, "xt_quota2");
 +      ret = PTR_ERR(quota_class);
 +      if (IS_ERR(quota_class)) {
 +              pr_err("xt_quota2: couldn't create class");
 +              class_destroy(quota_class);
 +              return ret;
 +      }
 +
 +      quota_device = device_create(quota_class, NULL, MKDEV(0, 0), NULL,
 +                                   "counters");
 +      ret = PTR_ERR(quota_device);
 +      if (IS_ERR(quota_device)) {
 +              pr_err("xt_quota2: couldn't create device");
 +              device_destroy(quota_class, MKDEV(0, 0));
 +              class_destroy(quota_class);
 +              return ret;
 +      }
 +
 +      quota_kobj = &quota_device->kobj;
  
        proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
        if (proc_xt_quota == NULL)
@@@ -376,8 -389,6 +376,8 @@@ static void __exit quota_mt2_exit(void
  {
        xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
        remove_proc_entry("xt_quota", init_net.proc_net);
 +      device_destroy(quota_class, MKDEV(0, 0));
 +      class_destroy(quota_class);
  }
  
  module_init(quota_mt2_init);
diff --combined net/wireless/nl80211.c
@@@ -104,7 -104,7 +104,7 @@@ __cfg80211_wdev_from_attrs(struct net *
                if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
                        continue;
  
 -              list_for_each_entry(wdev, &rdev->wdev_list, list) {
 +              list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
                        if (have_ifidx && wdev->netdev &&
                            wdev->netdev->ifindex == ifidx) {
                                result = wdev;
@@@ -150,7 -150,7 +150,7 @@@ __cfg80211_rdev_from_attrs(struct net *
                tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
                if (tmp) {
                        /* make sure wdev exists */
 -                      list_for_each_entry(wdev, &tmp->wdev_list, list) {
 +                      list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) {
                                if (wdev->identifier != (u32)wdev_id)
                                        continue;
                                found = true;
@@@ -384,7 -384,7 +384,7 @@@ static const struct nla_policy nl80211_
        [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
        [NL80211_ATTR_WDEV] = { .type = NLA_U64 },
        [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
 -      [NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, },
 +      [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
        [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
        [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
        [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
        [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
        [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
        [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
 +      [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
 +      [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
 +      [NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
 +      [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
 +      [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
 +              .len = sizeof(struct nl80211_bss_select_rssi_adjust)
 +      },
 +      [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
 +                                  .len = FILS_MAX_KEK_LEN },
 +      [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
 +      [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
 +      [NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY,
 +                                           .len = FILS_ERP_MAX_USERNAME_LEN },
 +      [NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY,
 +                                        .len = FILS_ERP_MAX_REALM_LEN },
 +      [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
 +      [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
 +                                      .len = FILS_ERP_MAX_RRK_LEN },
 +      [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
 +      [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
 +      [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
  };
  
  /* policy for the key attributes */
@@@ -519,8 -498,7 +519,8 @@@ nl80211_coalesce_policy[NUM_NL80211_ATT
  /* policy for GTK rekey offload attributes */
  static const struct nla_policy
  nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
 -      [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN },
 +      [NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY,
 +                                   .len = FILS_MAX_KEK_LEN },
        [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN },
        [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
  };
@@@ -538,15 -516,6 +538,15 @@@ nl80211_plan_policy[NL80211_SCHED_SCAN_
        [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
  };
  
 +static const struct nla_policy
 +nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
 +      [NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG },
 +      [NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 },
 +      [NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = {
 +              .len = sizeof(struct nl80211_bss_select_rssi_adjust)
 +      },
 +};
 +
  /* policy for packet pattern attributes */
  static const struct nla_policy
  nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
@@@ -587,7 -556,7 +587,7 @@@ static int nl80211_prepare_wdev_dump(st
                *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
  
 -              list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
 +              list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) {
                        if (tmp->identifier == cb->args[1]) {
                                *wdev = tmp;
                                break;
@@@ -1056,10 -1025,6 +1056,10 @@@ static int nl80211_put_iface_combinatio
                     nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
                                c->radar_detect_regions)))
                        goto nla_put_failure;
 +              if (c->beacon_int_min_gcd &&
 +                  nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
 +                              c->beacon_int_min_gcd))
 +                      goto nla_put_failure;
  
                nla_nest_end(msg, nl_combi);
        }
@@@ -1310,7 -1275,7 +1310,7 @@@ nl80211_send_mgmt_stypes(struct sk_buf
  struct nl80211_dump_wiphy_state {
        s64 filter_wiphy;
        long start;
 -      long split_start, band_start, chan_start;
 +      long split_start, band_start, chan_start, capa_start;
        bool split;
  };
  
@@@ -1604,7 -1569,6 +1604,7 @@@ static int nl80211_send_wiphy(struct cf
                        if (rdev->wiphy.features &
                                        NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)
                                CMD(add_tx_ts, ADD_TX_TS);
 +                      CMD(update_connect_params, UPDATE_CONNECT_PARAMS);
                }
                /* add into the if now */
  #undef CMD
                            rdev->wiphy.ext_features))
                        goto nla_put_failure;
  
 +              state->split_start++;
 +              break;
 +      case 13:
 +              if (rdev->wiphy.num_iftype_ext_capab &&
 +                  rdev->wiphy.iftype_ext_capab) {
 +                      struct nlattr *nested_ext_capab, *nested;
 +
 +                      nested = nla_nest_start(msg,
 +                                              NL80211_ATTR_IFTYPE_EXT_CAPA);
 +                      if (!nested)
 +                              goto nla_put_failure;
 +
 +                      for (i = state->capa_start;
 +                           i < rdev->wiphy.num_iftype_ext_capab; i++) {
 +                              const struct wiphy_iftype_ext_capab *capab;
 +
 +                              capab = &rdev->wiphy.iftype_ext_capab[i];
 +
 +                              nested_ext_capab = nla_nest_start(msg, i);
 +                              if (!nested_ext_capab ||
 +                                  nla_put_u32(msg, NL80211_ATTR_IFTYPE,
 +                                              capab->iftype) ||
 +                                  nla_put(msg, NL80211_ATTR_EXT_CAPA,
 +                                          capab->extended_capabilities_len,
 +                                          capab->extended_capabilities) ||
 +                                  nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
 +                                          capab->extended_capabilities_len,
 +                                          capab->extended_capabilities_mask))
 +                                      goto nla_put_failure;
 +
 +                              nla_nest_end(msg, nested_ext_capab);
 +                              if (state->split)
 +                                      break;
 +                      }
 +                      nla_nest_end(msg, nested);
 +                      if (i < rdev->wiphy.num_iftype_ext_capab) {
 +                              state->capa_start = i + 1;
 +                              break;
 +                      }
 +              }
 +
 +              if (rdev->wiphy.bss_select_support) {
 +                      struct nlattr *nested;
 +                      u32 bss_select_support = rdev->wiphy.bss_select_support;
 +
 +                      nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT);
 +                      if (!nested)
 +                              goto nla_put_failure;
 +
 +                      i = 0;
 +                      while (bss_select_support) {
 +                              if ((bss_select_support & 1) &&
 +                                  nla_put_flag(msg, i))
 +                                      goto nla_put_failure;
 +                              i++;
 +                              bss_select_support >>= 1;
 +                      }
 +                      nla_nest_end(msg, nested);
 +              }
 +
                /* done */
                state->split_start = 0;
                break;
@@@ -2581,7 -2485,7 +2581,7 @@@ static int nl80211_dump_interface(struc
                }
                if_idx = 0;
  
 -              list_for_each_entry(wdev, &rdev->wdev_list, list) {
 +              list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
                        if (if_idx < if_start) {
                                if_idx++;
                                continue;
@@@ -2853,7 -2757,7 +2853,7 @@@ static int nl80211_new_interface(struc
                spin_lock_init(&wdev->mgmt_registrations_lock);
  
                wdev->identifier = ++rdev->wdev_id;
 -              list_add_rcu(&wdev->list, &rdev->wdev_list);
 +              list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
                rdev->devlist_generation++;
                break;
        default:
@@@ -3326,462 -3230,152 +3326,462 @@@ static int nl80211_set_mac_acl(struct s
        return err;
  }
  
 -static int nl80211_parse_beacon(struct nlattr *attrs[],
 -                              struct cfg80211_beacon_data *bcn)
 +static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
 +                         u8 *rates, u8 rates_len)
  {
 -      bool haveinfo = false;
 +      u8 i;
 +      u32 mask = 0;
  
 -      if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) ||
 -          !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) ||
 -          !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
 -          !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP]))
 -              return -EINVAL;
 +      for (i = 0; i < rates_len; i++) {
 +              int rate = (rates[i] & 0x7f) * 5;
 +              int ridx;
  
 -      memset(bcn, 0, sizeof(*bcn));
 +              for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
 +                      struct ieee80211_rate *srate =
 +                              &sband->bitrates[ridx];
 +                      if (rate == srate->bitrate) {
 +                              mask |= 1 << ridx;
 +                              break;
 +                      }
 +              }
 +              if (ridx == sband->n_bitrates)
 +                      return 0; /* rate not found */
 +      }
  
 -      if (attrs[NL80211_ATTR_BEACON_HEAD]) {
 -              int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD]);
 +      return mask;
 +}
  
 -              if (ret)
 -                      return ret;
 +static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
 +                             u8 *rates, u8 rates_len,
 +                             u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
 +{
 +      u8 i;
  
 -              bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
 -              bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
 -              if (!bcn->head_len)
 -                      return -EINVAL;
 -              haveinfo = true;
 -      }
 +      memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
  
 -      if (attrs[NL80211_ATTR_BEACON_TAIL]) {
 -              bcn->tail = nla_data(attrs[NL80211_ATTR_BEACON_TAIL]);
 -              bcn->tail_len = nla_len(attrs[NL80211_ATTR_BEACON_TAIL]);
 -              haveinfo = true;
 -      }
 +      for (i = 0; i < rates_len; i++) {
 +              int ridx, rbit;
  
 -      if (!haveinfo)
 -              return -EINVAL;
 +              ridx = rates[i] / 8;
 +              rbit = BIT(rates[i] % 8);
  
 -      if (attrs[NL80211_ATTR_IE]) {
 -              bcn->beacon_ies = nla_data(attrs[NL80211_ATTR_IE]);
 -              bcn->beacon_ies_len = nla_len(attrs[NL80211_ATTR_IE]);
 -      }
 +              /* check validity */
 +              if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
 +                      return false;
  
 -      if (attrs[NL80211_ATTR_IE_PROBE_RESP]) {
 -              bcn->proberesp_ies =
 -                      nla_data(attrs[NL80211_ATTR_IE_PROBE_RESP]);
 -              bcn->proberesp_ies_len =
 -                      nla_len(attrs[NL80211_ATTR_IE_PROBE_RESP]);
 +              /* check availability */
 +              if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
 +                      mcs[ridx] |= rbit;
 +              else
 +                      return false;
        }
  
 -      if (attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
 -              bcn->assocresp_ies =
 -                      nla_data(attrs[NL80211_ATTR_IE_ASSOC_RESP]);
 -              bcn->assocresp_ies_len =
 -                      nla_len(attrs[NL80211_ATTR_IE_ASSOC_RESP]);
 -      }
 +      return true;
 +}
  
 -      if (attrs[NL80211_ATTR_PROBE_RESP]) {
 -              bcn->probe_resp = nla_data(attrs[NL80211_ATTR_PROBE_RESP]);
 -              bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]);
 +static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
 +{
 +      u16 mcs_mask = 0;
 +
 +      switch (vht_mcs_map) {
 +      case IEEE80211_VHT_MCS_NOT_SUPPORTED:
 +              break;
 +      case IEEE80211_VHT_MCS_SUPPORT_0_7:
 +              mcs_mask = 0x00FF;
 +              break;
 +      case IEEE80211_VHT_MCS_SUPPORT_0_8:
 +              mcs_mask = 0x01FF;
 +              break;
 +      case IEEE80211_VHT_MCS_SUPPORT_0_9:
 +              mcs_mask = 0x03FF;
 +              break;
 +      default:
 +              break;
        }
  
 -      return 0;
 +      return mcs_mask;
  }
  
 -static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
 -                                 struct cfg80211_ap_settings *params)
 +static void vht_build_mcs_mask(u16 vht_mcs_map,
 +                             u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
  {
 -      struct wireless_dev *wdev;
 -      bool ret = false;
 -
 -      list_for_each_entry(wdev, &rdev->wdev_list, list) {
 -              if (wdev->iftype != NL80211_IFTYPE_AP &&
 -                  wdev->iftype != NL80211_IFTYPE_P2P_GO)
 -                      continue;
 -
 -              if (!wdev->preset_chandef.chan)
 -                      continue;
 +      u8 nss;
  
 -              params->chandef = wdev->preset_chandef;
 -              ret = true;
 -              break;
 +      for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
 +              vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
 +              vht_mcs_map >>= 2;
        }
 -
 -      return ret;
  }
  
 -static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
 -                                  enum nl80211_auth_type auth_type,
 -                                  enum nl80211_commands cmd)
 +static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
 +                           struct nl80211_txrate_vht *txrate,
 +                           u16 mcs[NL80211_VHT_NSS_MAX])
  {
 -      if (auth_type > NL80211_AUTHTYPE_MAX)
 +      u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
 +      u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
 +      u8 i;
 +
 +      if (!sband->vht_cap.vht_supported)
                return false;
  
 -      switch (cmd) {
 -      case NL80211_CMD_AUTHENTICATE:
 -              if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
 -                  auth_type == NL80211_AUTHTYPE_SAE)
 -                      return false;
 -              return true;
 -      case NL80211_CMD_CONNECT:
 -      case NL80211_CMD_START_AP:
 -              /* SAE not supported yet */
 -              if (auth_type == NL80211_AUTHTYPE_SAE)
 +      memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
 +
 +      /* Build vht_mcs_mask from VHT capabilities */
 +      vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
 +
 +      for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
 +              if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
 +                      mcs[i] = txrate->mcs[i];
 +              else
                        return false;
 -              return true;
 -      default:
 -              return false;
        }
 +
 +      return true;
  }
  
 -static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
 +static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
 +      [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
 +                                  .len = NL80211_MAX_SUPP_RATES },
 +      [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
 +                              .len = NL80211_MAX_SUPP_HT_RATES },
 +      [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
 +      [NL80211_TXRATE_GI] = { .type = NLA_U8 },
 +};
 +
 +static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
 +                                       struct cfg80211_bitrate_mask *mask)
  {
 +      struct nlattr *tb[NL80211_TXRATE_MAX + 1];
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
 -      struct net_device *dev = info->user_ptr[1];
 -      struct wireless_dev *wdev = dev->ieee80211_ptr;
 -      struct cfg80211_ap_settings params;
 -      int err;
 -
 -      if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
 -          dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
 -              return -EOPNOTSUPP;
 +      int rem, i;
 +      struct nlattr *tx_rates;
 +      struct ieee80211_supported_band *sband;
 +      u16 vht_tx_mcs_map;
  
 -      if (!rdev->ops->start_ap)
 -              return -EOPNOTSUPP;
 +      memset(mask, 0, sizeof(*mask));
 +      /* Default to all rates enabled */
 +      for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
 +              sband = rdev->wiphy.bands[i];
  
 -      if (wdev->beacon_interval)
 -              return -EALREADY;
 +              if (!sband)
 +                      continue;
  
 -      memset(&params, 0, sizeof(params));
 +              mask->control[i].legacy = (1 << sband->n_bitrates) - 1;
 +              memcpy(mask->control[i].ht_mcs,
 +                     sband->ht_cap.mcs.rx_mask,
 +                     sizeof(mask->control[i].ht_mcs));
  
 -      /* these are required for START_AP */
 -      if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] ||
 -          !info->attrs[NL80211_ATTR_DTIM_PERIOD] ||
 -          !info->attrs[NL80211_ATTR_BEACON_HEAD])
 -              return -EINVAL;
 +              if (!sband->vht_cap.vht_supported)
 +                      continue;
  
 -      err = nl80211_parse_beacon(info->attrs, &params.beacon);
 -      if (err)
 -              return err;
 +              vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
 +              vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
 +      }
  
 -      params.beacon_interval =
 -              nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
 -      params.dtim_period =
 -              nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
 +      /* if no rates are given set it back to the defaults */
 +      if (!info->attrs[NL80211_ATTR_TX_RATES])
 +              goto out;
  
 -      err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
 -      if (err)
 -              return err;
 +      /* The nested attribute uses enum nl80211_band as the index. This maps
 +       * directly to the enum nl80211_band values used in cfg80211.
 +       */
 +      BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
 +      nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
 +              enum ieee80211_band band = nla_type(tx_rates);
 +              int err;
 +
 +              if (band < 0 || band >= IEEE80211_NUM_BANDS)
 +                      return -EINVAL;
 +              sband = rdev->wiphy.bands[band];
 +              if (sband == NULL)
 +                      return -EINVAL;
 +              err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
 +                              nla_len(tx_rates), nl80211_txattr_policy);
 +              if (err)
 +                      return err;
 +              if (tb[NL80211_TXRATE_LEGACY]) {
 +                      mask->control[band].legacy = rateset_to_mask(
 +                              sband,
 +                              nla_data(tb[NL80211_TXRATE_LEGACY]),
 +                              nla_len(tb[NL80211_TXRATE_LEGACY]));
 +                      if ((mask->control[band].legacy == 0) &&
 +                          nla_len(tb[NL80211_TXRATE_LEGACY]))
 +                              return -EINVAL;
 +              }
 +              if (tb[NL80211_TXRATE_HT]) {
 +                      if (!ht_rateset_to_mask(
 +                                      sband,
 +                                      nla_data(tb[NL80211_TXRATE_HT]),
 +                                      nla_len(tb[NL80211_TXRATE_HT]),
 +                                      mask->control[band].ht_mcs))
 +                              return -EINVAL;
 +              }
 +              if (tb[NL80211_TXRATE_VHT]) {
 +                      if (!vht_set_mcs_mask(
 +                                      sband,
 +                                      nla_data(tb[NL80211_TXRATE_VHT]),
 +                                      mask->control[band].vht_mcs))
 +                              return -EINVAL;
 +              }
 +              if (tb[NL80211_TXRATE_GI]) {
 +                      mask->control[band].gi =
 +                              nla_get_u8(tb[NL80211_TXRATE_GI]);
 +                      if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI)
 +                              return -EINVAL;
 +              }
 +
 +              if (mask->control[band].legacy == 0) {
 +                      /* don't allow empty legacy rates if HT or VHT
 +                       * are not even supported.
 +                       */
 +                      if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
 +                            rdev->wiphy.bands[band]->vht_cap.vht_supported))
 +                              return -EINVAL;
 +
 +                      for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
 +                              if (mask->control[band].ht_mcs[i])
 +                                      goto out;
 +
 +                      for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
 +                              if (mask->control[band].vht_mcs[i])
 +                                      goto out;
 +
 +                      /* legacy and mcs rates may not be both empty */
 +                      return -EINVAL;
 +              }
 +      }
 +
 +out:
 +      return 0;
 +}
 +
 +static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
 +                                 enum nl80211_band band,
 +                                 struct cfg80211_bitrate_mask *beacon_rate)
 +{
 +      u32 count_ht, count_vht, i;
 +      u32 rate = beacon_rate->control[band].legacy;
 +
 +      /* Allow only one rate */
 +      if (hweight32(rate) > 1)
 +              return -EINVAL;
 +
 +      count_ht = 0;
 +      for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
 +              if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) {
 +                      return -EINVAL;
 +              } else if (beacon_rate->control[band].ht_mcs[i]) {
 +                      count_ht++;
 +                      if (count_ht > 1)
 +                              return -EINVAL;
 +              }
 +              if (count_ht && rate)
 +                      return -EINVAL;
 +      }
 +
 +      count_vht = 0;
 +      for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
 +              if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) {
 +                      return -EINVAL;
 +              } else if (beacon_rate->control[band].vht_mcs[i]) {
 +                      count_vht++;
 +                      if (count_vht > 1)
 +                              return -EINVAL;
 +              }
 +              if (count_vht && rate)
 +                      return -EINVAL;
 +      }
 +
 +      if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
 +              return -EINVAL;
 +
 +      if (rate &&
 +          !wiphy_ext_feature_isset(&rdev->wiphy,
 +                                   NL80211_EXT_FEATURE_BEACON_RATE_LEGACY))
 +              return -EINVAL;
 +      if (count_ht &&
 +          !wiphy_ext_feature_isset(&rdev->wiphy,
 +                                   NL80211_EXT_FEATURE_BEACON_RATE_HT))
 +              return -EINVAL;
 +      if (count_vht &&
 +          !wiphy_ext_feature_isset(&rdev->wiphy,
 +                                   NL80211_EXT_FEATURE_BEACON_RATE_VHT))
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +static int nl80211_parse_beacon(struct nlattr *attrs[],
 +                              struct cfg80211_beacon_data *bcn)
 +{
 +      bool haveinfo = false;
 +
 +      if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) ||
 +          !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) ||
 +          !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
 +          !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP]))
 +              return -EINVAL;
 +
 +      memset(bcn, 0, sizeof(*bcn));
 +
 +      if (attrs[NL80211_ATTR_BEACON_HEAD]) {
 +              int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD]);
 +
 +              if (ret)
 +                      return ret;
 +
 +              bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
 +              bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
 +              if (!bcn->head_len)
 +                      return -EINVAL;
 +              haveinfo = true;
 +      }
 +
 +      if (attrs[NL80211_ATTR_BEACON_TAIL]) {
 +              bcn->tail = nla_data(attrs[NL80211_ATTR_BEACON_TAIL]);
 +              bcn->tail_len = nla_len(attrs[NL80211_ATTR_BEACON_TAIL]);
 +              haveinfo = true;
 +      }
 +
 +      if (!haveinfo)
 +              return -EINVAL;
 +
 +      if (attrs[NL80211_ATTR_IE]) {
 +              bcn->beacon_ies = nla_data(attrs[NL80211_ATTR_IE]);
 +              bcn->beacon_ies_len = nla_len(attrs[NL80211_ATTR_IE]);
 +      }
 +
 +      if (attrs[NL80211_ATTR_IE_PROBE_RESP]) {
 +              bcn->proberesp_ies =
 +                      nla_data(attrs[NL80211_ATTR_IE_PROBE_RESP]);
 +              bcn->proberesp_ies_len =
 +                      nla_len(attrs[NL80211_ATTR_IE_PROBE_RESP]);
 +      }
 +
 +      if (attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
 +              bcn->assocresp_ies =
 +                      nla_data(attrs[NL80211_ATTR_IE_ASSOC_RESP]);
 +              bcn->assocresp_ies_len =
 +                      nla_len(attrs[NL80211_ATTR_IE_ASSOC_RESP]);
 +      }
 +
 +      if (attrs[NL80211_ATTR_PROBE_RESP]) {
 +              bcn->probe_resp = nla_data(attrs[NL80211_ATTR_PROBE_RESP]);
 +              bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]);
 +      }
 +
 +      return 0;
 +}
 +
 +static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
 +                                 struct cfg80211_ap_settings *params)
 +{
 +      struct wireless_dev *wdev;
 +      bool ret = false;
 +
 +      list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 +              if (wdev->iftype != NL80211_IFTYPE_AP &&
 +                  wdev->iftype != NL80211_IFTYPE_P2P_GO)
 +                      continue;
 +
 +              if (!wdev->preset_chandef.chan)
 +                      continue;
 +
 +              params->chandef = wdev->preset_chandef;
 +              ret = true;
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
 +                                  enum nl80211_auth_type auth_type,
 +                                  enum nl80211_commands cmd)
 +{
 +      if (auth_type > NL80211_AUTHTYPE_MAX)
 +              return false;
 +
 +      switch (cmd) {
 +      case NL80211_CMD_AUTHENTICATE:
 +              if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
 +                  auth_type == NL80211_AUTHTYPE_SAE)
 +                      return false;
 +              if (!wiphy_ext_feature_isset(&rdev->wiphy,
 +                                           NL80211_EXT_FEATURE_FILS_STA) &&
 +                  (auth_type == NL80211_AUTHTYPE_FILS_SK ||
 +                   auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
 +                   auth_type == NL80211_AUTHTYPE_FILS_PK))
 +                      return false;
 +              return true;
 +      case NL80211_CMD_CONNECT:
 +              if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
 +                  auth_type == NL80211_AUTHTYPE_SAE)
 +                      return false;
 +              /* FILS with SK PFS or PK not supported yet */
 +              if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
 +                  auth_type == NL80211_AUTHTYPE_FILS_PK)
 +                      return false;
 +              if (!wiphy_ext_feature_isset(
 +                          &rdev->wiphy,
 +                          NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
 +                  auth_type == NL80211_AUTHTYPE_FILS_SK)
 +                      return false;
 +              return true;
 +      case NL80211_CMD_START_AP:
 +              /* SAE not supported yet */
 +              if (auth_type == NL80211_AUTHTYPE_SAE)
 +                      return false;
 +              /* FILS not supported yet */
 +              if (auth_type == NL80211_AUTHTYPE_FILS_SK ||
 +                  auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
 +                  auth_type == NL80211_AUTHTYPE_FILS_PK)
 +                      return false;
 +              return true;
 +      default:
 +              return false;
 +      }
 +}
 +
 +static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct net_device *dev = info->user_ptr[1];
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +      struct cfg80211_ap_settings params;
 +      int err;
 +
 +      if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
 +          dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
 +              return -EOPNOTSUPP;
 +
 +      if (!rdev->ops->start_ap)
 +              return -EOPNOTSUPP;
 +
 +      if (wdev->beacon_interval)
 +              return -EALREADY;
 +
 +      memset(&params, 0, sizeof(params));
 +
 +      /* these are required for START_AP */
 +      if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] ||
 +          !info->attrs[NL80211_ATTR_DTIM_PERIOD] ||
 +          !info->attrs[NL80211_ATTR_BEACON_HEAD])
 +              return -EINVAL;
 +
 +      err = nl80211_parse_beacon(info->attrs, &params.beacon);
 +      if (err)
 +              return err;
 +
 +      params.beacon_interval =
 +              nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
 +      params.dtim_period =
 +              nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
 +
 +      err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype,
 +                                         params.beacon_interval);
 +      if (err)
 +              return err;
  
        /*
         * In theory, some of these attributes should be required here
                                           wdev->iftype))
                return -EINVAL;
  
 +      if (info->attrs[NL80211_ATTR_TX_RATES]) {
 +              err = nl80211_parse_tx_bitrate_mask(info, &params.beacon_rate);
 +              if (err)
 +                      return err;
 +
 +              err = validate_beacon_tx_rate(rdev, params.chandef.chan->band,
 +                                            &params.beacon_rate);
 +              if (err)
 +                      return err;
 +      }
 +
        if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
                params.smps_mode =
                        nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
                params.smps_mode = NL80211_SMPS_OFF;
        }
  
 +      params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
 +      if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
 +              return -EOPNOTSUPP;
 +
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
                params.acl = parse_acl_data(&rdev->wiphy, info);
                if (IS_ERR(params.acl))
                        return PTR_ERR(params.acl);
        }
  
 +      if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
 +              params.flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
 +
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
@@@ -5240,6 -4816,9 +5240,9 @@@ static int nl80211_del_mpath(struct sk_
        if (!rdev->ops->del_mpath)
                return -EOPNOTSUPP;
  
+       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+               return -EOPNOTSUPP;
        return rdev_del_mpath(rdev, dev, dst);
  }
  
@@@ -6202,80 -5781,13 +6205,80 @@@ static int validate_scan_freqs(struct n
        return n_channels;
  }
  
 -static int nl80211_parse_random_mac(struct nlattr **attrs,
 -                                  u8 *mac_addr, u8 *mac_addr_mask)
 +static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b)
  {
 +      return b < IEEE80211_NUM_BANDS && wiphy->bands[b];
 +}
 +
 +static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
 +                          struct cfg80211_bss_selection *bss_select)
 +{
 +      struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1];
 +      struct nlattr *nest;
 +      int err;
 +      bool found = false;
        int i;
  
 -      if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
 -              eth_zero_addr(mac_addr);
 +      /* only process one nested attribute */
 +      nest = nla_data(nla);
 +      if (!nla_ok(nest, nla_len(nest)))
 +              return -EINVAL;
 +
 +      err = nla_parse(attr, NL80211_BSS_SELECT_ATTR_MAX, nla_data(nest),
 +                      nla_len(nest), nl80211_bss_select_policy);
 +      if (err)
 +              return err;
 +
 +      /* only one attribute may be given */
 +      for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) {
 +              if (attr[i]) {
 +                      if (found)
 +                              return -EINVAL;
 +                      found = true;
 +              }
 +      }
 +
 +      bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID;
 +
 +      if (attr[NL80211_BSS_SELECT_ATTR_RSSI])
 +              bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI;
 +
 +      if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) {
 +              bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF;
 +              bss_select->param.band_pref =
 +                      nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]);
 +              if (!is_band_valid(wiphy, bss_select->param.band_pref))
 +                      return -EINVAL;
 +      }
 +
 +      if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) {
 +              struct nl80211_bss_select_rssi_adjust *adj_param;
 +
 +              adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]);
 +              bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST;
 +              bss_select->param.adjust.band = adj_param->band;
 +              bss_select->param.adjust.delta = adj_param->delta;
 +              if (!is_band_valid(wiphy, bss_select->param.adjust.band))
 +                      return -EINVAL;
 +      }
 +
 +      /* user-space did not provide behaviour attribute */
 +      if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID)
 +              return -EINVAL;
 +
 +      if (!(wiphy->bss_select_support & BIT(bss_select->behaviour)))
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +static int nl80211_parse_random_mac(struct nlattr **attrs,
 +                                  u8 *mac_addr, u8 *mac_addr_mask)
 +{
 +      int i;
 +
 +      if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
 +              eth_zero_addr(mac_addr);
                eth_zero_addr(mac_addr_mask);
                mac_addr[0] = 0x2;
                mac_addr_mask[0] = 0x3;
@@@ -6507,25 -6019,6 +6510,25 @@@ static int nl80211_trigger_scan(struct 
        request->no_cck =
                nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
  
 +      /* Initial implementation used NL80211_ATTR_MAC to set the specific
 +       * BSSID to scan for. This was problematic because that same attribute
 +       * was already used for another purpose (local random MAC address). The
 +       * NL80211_ATTR_BSSID attribute was added to fix this. For backwards
 +       * compatibility with older userspace components, also use the
 +       * NL80211_ATTR_MAC value here if it can be determined to be used for
 +       * the specific BSSID use case instead of the random MAC address
 +       * (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use).
 +       */
 +      if (info->attrs[NL80211_ATTR_BSSID])
 +              memcpy(request->bssid,
 +                     nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN);
 +      else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) &&
 +               info->attrs[NL80211_ATTR_MAC])
 +              memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
 +                     ETH_ALEN);
 +      else
 +              eth_broadcast_addr(request->bssid);
 +
        request->wdev = wdev;
        request->wiphy = &rdev->wiphy;
        request->scan_start = jiffies;
@@@ -6739,12 -6232,6 +6742,12 @@@ nl80211_parse_sched_scan(struct wiphy *
        if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
                return ERR_PTR(-EINVAL);
  
 +      if (!wiphy_ext_feature_isset(
 +                  wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
 +          (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
 +           attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
 +              return ERR_PTR(-EINVAL);
 +
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->match_sets) * n_match_sets
                request->delay =
                        nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
  
 +      if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
 +              request->relative_rssi = nla_get_s8(
 +                      attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
 +              request->relative_rssi_set = true;
 +      }
 +
 +      if (request->relative_rssi_set &&
 +          attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
 +              struct nl80211_bss_select_rssi_adjust *rssi_adjust;
 +
 +              rssi_adjust = nla_data(
 +                      attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
 +              request->rssi_adjust.band = rssi_adjust->band;
 +              request->rssi_adjust.delta = rssi_adjust->delta;
 +              if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
 +                      err = -EINVAL;
 +                      goto out_free;
 +              }
 +      }
 +
        err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
        if (err)
                goto out_free;
@@@ -6983,24 -6450,6 +6986,24 @@@ out_free
        return ERR_PTR(err);
  }
  
 +static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct wireless_dev *wdev = info->user_ptr[1];
 +
 +      if (!rdev->ops->abort_scan)
 +              return -EOPNOTSUPP;
 +
 +      if (rdev->scan_msg)
 +              return 0;
 +
 +      if (!rdev->scan_req)
 +              return -ENOENT;
 +
 +      rdev_abort_scan(rdev, wdev);
 +      return 0;
 +}
 +
  static int nl80211_start_sched_scan(struct sk_buff *skb,
                                    struct genl_info *info)
  {
@@@ -7077,9 -6526,6 +7080,9 @@@ static int nl80211_start_radar_detectio
        if (err)
                return err;
  
 +      if (rdev->wiphy.flags & WIPHY_FLAG_DFS_OFFLOAD)
 +              return -EOPNOTSUPP;
 +
        if (netif_carrier_ok(dev))
                return -EBUSY;
  
@@@ -7574,8 -7020,8 +7577,8 @@@ static int nl80211_authenticate(struct 
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
        struct ieee80211_channel *chan;
 -      const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL;
 -      int err, ssid_len, ie_len = 0, sae_data_len = 0;
 +      const u8 *bssid, *ssid, *ie = NULL, *auth_data = NULL;
 +      int err, ssid_len, ie_len = 0, auth_data_len = 0;
        enum nl80211_auth_type auth_type;
        struct key_parse key;
        bool local_state_change;
        if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
                return -EINVAL;
  
 -      if (auth_type == NL80211_AUTHTYPE_SAE &&
 -          !info->attrs[NL80211_ATTR_SAE_DATA])
 +      if ((auth_type == NL80211_AUTHTYPE_SAE ||
 +           auth_type == NL80211_AUTHTYPE_FILS_SK ||
 +           auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
 +           auth_type == NL80211_AUTHTYPE_FILS_PK) &&
 +          !info->attrs[NL80211_ATTR_AUTH_DATA])
                return -EINVAL;
  
 -      if (info->attrs[NL80211_ATTR_SAE_DATA]) {
 -              if (auth_type != NL80211_AUTHTYPE_SAE)
 +      if (info->attrs[NL80211_ATTR_AUTH_DATA]) {
 +              if (auth_type != NL80211_AUTHTYPE_SAE &&
 +                  auth_type != NL80211_AUTHTYPE_FILS_SK &&
 +                  auth_type != NL80211_AUTHTYPE_FILS_SK_PFS &&
 +                  auth_type != NL80211_AUTHTYPE_FILS_PK)
                        return -EINVAL;
 -              sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]);
 -              sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]);
 +              auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
 +              auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
                /* need to include at least Auth Transaction and Status Code */
 -              if (sae_data_len < 4)
 +              if (auth_data_len < 4)
                        return -EINVAL;
        }
  
        err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
                                 ssid, ssid_len, ie, ie_len,
                                 key.p.key, key.p.key_len, key.idx,
 -                               sae_data, sae_data_len);
 +                               auth_data, auth_data_len);
        wdev_unlock(dev->ieee80211_ptr);
        return err;
  }
@@@ -7728,9 -7168,6 +7731,9 @@@ static int nl80211_crypto_settings(stru
                if (settings->n_ciphers_pairwise > cipher_limit)
                        return -EINVAL;
  
 +              if (len > sizeof(u32) * NL80211_MAX_NR_CIPHER_SUITES)
 +                      return -EINVAL;
 +
                memcpy(settings->ciphers_pairwise, data, len);
  
                for (i = 0; i < settings->n_ciphers_pairwise; i++)
@@@ -7866,29 -7303,11 +7869,29 @@@ static int nl80211_associate(struct sk_
                req.flags |= ASSOC_REQ_USE_RRM;
        }
  
 +      if (info->attrs[NL80211_ATTR_FILS_KEK]) {
 +              req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]);
 +              req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]);
 +              if (!info->attrs[NL80211_ATTR_FILS_NONCES])
 +                      return -EINVAL;
 +              req.fils_nonces =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]);
 +      }
 +
        err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
        if (!err) {
                wdev_lock(dev->ieee80211_ptr);
 +
                err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
                                          ssid, ssid_len, &req);
 +
 +              if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
 +                      dev->ieee80211_ptr->conn_owner_nlportid =
 +                              info->snd_portid;
 +                      memcpy(dev->ieee80211_ptr->disconnect_bssid,
 +                             bssid, ETH_ALEN);
 +              }
 +
                wdev_unlock(dev->ieee80211_ptr);
        }
  
@@@ -8037,14 -7456,12 +8040,14 @@@ static int nl80211_join_ibss(struct sk_
  
        ibss.beacon_interval = 100;
  
 -      if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
 +      if (info->attrs[NL80211_ATTR_BEACON_INTERVAL])
                ibss.beacon_interval =
                        nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
 -              if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000)
 -                      return -EINVAL;
 -      }
 +
 +      err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC,
 +                                         ibss.beacon_interval);
 +      if (err)
 +              return err;
  
        if (!rdev->ops->join_ibss)
                return -EOPNOTSUPP;
@@@ -8513,10 -7930,6 +8516,10 @@@ static int nl80211_connect(struct sk_bu
                connect.mfp = NL80211_MFP_NO;
        }
  
 +      if (info->attrs[NL80211_ATTR_PREV_BSSID])
 +              connect.prev_bssid =
 +                      nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
 +
        if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
                connect.channel = nl80211_get_valid_chan(
                        wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
                connect.flags |= ASSOC_REQ_USE_RRM;
        }
  
 +      connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
 +      if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
 +              kzfree(connkeys);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      if (info->attrs[NL80211_ATTR_BSS_SELECT]) {
 +              /* bss selection makes no sense if bssid is set */
 +              if (connect.bssid) {
 +                      kzfree(connkeys);
 +                      return -EINVAL;
 +              }
 +
 +              err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT],
 +                                     wiphy, &connect.bss_select);
 +              if (err) {
 +                      kzfree(connkeys);
 +                      return err;
 +              }
 +      }
 +
 +      if (wiphy_ext_feature_isset(&rdev->wiphy,
 +                                  NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
 +              connect.fils_erp_username =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
 +              connect.fils_erp_username_len =
 +                      nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
 +              connect.fils_erp_realm =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
 +              connect.fils_erp_realm_len =
 +                      nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
 +              connect.fils_erp_next_seq_num =
 +                      nla_get_u16(
 +                         info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
 +              connect.fils_erp_rrk =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
 +              connect.fils_erp_rrk_len =
 +                      nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
 +      } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
 +                 info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
 +                 info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
 +                 info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
 +              kzfree(connkeys);
 +              return -EINVAL;
 +      }
 +
 +      if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) {
 +              if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
 +                      kzfree(connkeys);
 +                      return -EINVAL;
 +              }
 +              connect.flags |= CONNECT_REQ_EXTERNAL_AUTH_SUPPORT;
 +      }
 +
        wdev_lock(dev->ieee80211_ptr);
        err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
 -      wdev_unlock(dev->ieee80211_ptr);
        if (err)
                kzfree(connkeys);
 +
 +      if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
 +              dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
 +              if (connect.bssid)
 +                      memcpy(dev->ieee80211_ptr->disconnect_bssid,
 +                             connect.bssid, ETH_ALEN);
 +              else
 +                      memset(dev->ieee80211_ptr->disconnect_bssid,
 +                             0, ETH_ALEN);
 +      }
 +
 +      wdev_unlock(dev->ieee80211_ptr);
 +
        return err;
  }
  
 +static int nl80211_update_connect_params(struct sk_buff *skb,
 +                                       struct genl_info *info)
 +{
 +      struct cfg80211_connect_params connect = {};
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct net_device *dev = info->user_ptr[1];
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +      u32 changed = 0;
 +      int ret;
 +
 +      if (!rdev->ops->update_connect_params)
 +              return -EOPNOTSUPP;
 +
 +      if (info->attrs[NL80211_ATTR_IE]) {
 +              if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
 +                      return -EINVAL;
 +              connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
 +              connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 +              changed |= UPDATE_ASSOC_IES;
 +      }
 +
 +      if (wiphy_ext_feature_isset(&rdev->wiphy,
 +                                  NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
 +          info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
 +              connect.fils_erp_username =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
 +              connect.fils_erp_username_len =
 +                      nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
 +              connect.fils_erp_realm =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
 +              connect.fils_erp_realm_len =
 +                      nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
 +              connect.fils_erp_next_seq_num =
 +                      nla_get_u16(
 +                         info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
 +              connect.fils_erp_rrk =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
 +              connect.fils_erp_rrk_len =
 +                      nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
 +              changed |= UPDATE_FILS_ERP_INFO;
 +      } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
 +                 info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
 +                 info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
 +                 info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
 +              return -EINVAL;
 +      }
 +
 +      if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
 +              u32 auth_type =
 +                      nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
 +              if (!nl80211_valid_auth_type(rdev, auth_type,
 +                                           NL80211_CMD_CONNECT))
 +                      return -EINVAL;
 +              connect.auth_type = auth_type;
 +              changed |= UPDATE_AUTH_TYPE;
 +      }
 +
 +      wdev_lock(dev->ieee80211_ptr);
 +      if (!wdev->current_bss)
 +              ret = -ENOLINK;
 +      else
 +              ret = rdev_update_connect_params(rdev, dev, &connect, changed);
 +      wdev_unlock(dev->ieee80211_ptr);
 +
 +      return ret;
 +}
 +
  static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
  {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@@ -8796,34 -8069,17 +8799,34 @@@ static int nl80211_setdel_pmksa(struct 
  
        memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
  
 -      if (!info->attrs[NL80211_ATTR_MAC])
 -              return -EINVAL;
 -
        if (!info->attrs[NL80211_ATTR_PMKID])
                return -EINVAL;
  
        pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
 -      pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
 +
 +      if (info->attrs[NL80211_ATTR_MAC]) {
 +              pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
 +      } else if (info->attrs[NL80211_ATTR_SSID] &&
 +                 info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
 +                 (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA ||
 +                  info->attrs[NL80211_ATTR_PMK])) {
 +              pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
 +              pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
 +              pmksa.cache_id =
 +                      nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]);
 +      } else {
 +              return -EINVAL;
 +      }
 +      if (info->attrs[NL80211_ATTR_PMK]) {
 +              pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
 +              pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
 +      }
  
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
 -          dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
 +          dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
 +          !(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP &&
 +            wiphy_ext_feature_isset(&rdev->wiphy,
 +                                    NL80211_EXT_FEATURE_AP_PMKSA_CACHING)))
                return -EOPNOTSUPP;
  
        switch (info->genlhdr->cmd) {
@@@ -8963,58 -8219,274 +8966,58 @@@ static int nl80211_remain_on_channel(st
        }
  
        err = rdev_remain_on_channel(rdev, wdev, chandef.chan,
 -                                   duration, &cookie);
 -
 -      if (err)
 -              goto free_msg;
 -
 -      if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
 -              goto nla_put_failure;
 -
 -      genlmsg_end(msg, hdr);
 -
 -      return genlmsg_reply(msg, info);
 -
 - nla_put_failure:
 -      err = -ENOBUFS;
 - free_msg:
 -      nlmsg_free(msg);
 -      return err;
 -}
 -
 -static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
 -                                          struct genl_info *info)
 -{
 -      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 -      struct wireless_dev *wdev = info->user_ptr[1];
 -      u64 cookie;
 -
 -      if (!info->attrs[NL80211_ATTR_COOKIE])
 -              return -EINVAL;
 -
 -      if (!rdev->ops->cancel_remain_on_channel)
 -              return -EOPNOTSUPP;
 -
 -      cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
 -
 -      return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
 -}
 -
 -static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
 -                         u8 *rates, u8 rates_len)
 -{
 -      u8 i;
 -      u32 mask = 0;
 -
 -      for (i = 0; i < rates_len; i++) {
 -              int rate = (rates[i] & 0x7f) * 5;
 -              int ridx;
 -              for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
 -                      struct ieee80211_rate *srate =
 -                              &sband->bitrates[ridx];
 -                      if (rate == srate->bitrate) {
 -                              mask |= 1 << ridx;
 -                              break;
 -                      }
 -              }
 -              if (ridx == sband->n_bitrates)
 -                      return 0; /* rate not found */
 -      }
 -
 -      return mask;
 -}
 -
 -static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
 -                             u8 *rates, u8 rates_len,
 -                             u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
 -{
 -      u8 i;
 -
 -      memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
 -
 -      for (i = 0; i < rates_len; i++) {
 -              int ridx, rbit;
 -
 -              ridx = rates[i] / 8;
 -              rbit = BIT(rates[i] % 8);
 -
 -              /* check validity */
 -              if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
 -                      return false;
 -
 -              /* check availability */
 -              if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
 -                      mcs[ridx] |= rbit;
 -              else
 -                      return false;
 -      }
 -
 -      return true;
 -}
 -
 -static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
 -{
 -      u16 mcs_mask = 0;
 -
 -      switch (vht_mcs_map) {
 -      case IEEE80211_VHT_MCS_NOT_SUPPORTED:
 -              break;
 -      case IEEE80211_VHT_MCS_SUPPORT_0_7:
 -              mcs_mask = 0x00FF;
 -              break;
 -      case IEEE80211_VHT_MCS_SUPPORT_0_8:
 -              mcs_mask = 0x01FF;
 -              break;
 -      case IEEE80211_VHT_MCS_SUPPORT_0_9:
 -              mcs_mask = 0x03FF;
 -              break;
 -      default:
 -              break;
 -      }
 -
 -      return mcs_mask;
 -}
 -
 -static void vht_build_mcs_mask(u16 vht_mcs_map,
 -                             u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
 -{
 -      u8 nss;
 -
 -      for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
 -              vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
 -              vht_mcs_map >>= 2;
 -      }
 -}
 -
 -static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
 -                           struct nl80211_txrate_vht *txrate,
 -                           u16 mcs[NL80211_VHT_NSS_MAX])
 -{
 -      u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
 -      u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
 -      u8 i;
 -
 -      if (!sband->vht_cap.vht_supported)
 -              return false;
 -
 -      memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
 -
 -      /* Build vht_mcs_mask from VHT capabilities */
 -      vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
 -
 -      for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
 -              if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
 -                      mcs[i] = txrate->mcs[i];
 -              else
 -                      return false;
 -      }
 -
 -      return true;
 -}
 -
 -static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
 -      [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
 -                                  .len = NL80211_MAX_SUPP_RATES },
 -      [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
 -                              .len = NL80211_MAX_SUPP_HT_RATES },
 -      [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
 -      [NL80211_TXRATE_GI] = { .type = NLA_U8 },
 -};
 -
 -static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
 -                                     struct genl_info *info)
 -{
 -      struct nlattr *tb[NL80211_TXRATE_MAX + 1];
 -      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 -      struct cfg80211_bitrate_mask mask;
 -      int rem, i;
 -      struct net_device *dev = info->user_ptr[1];
 -      struct nlattr *tx_rates;
 -      struct ieee80211_supported_band *sband;
 -      u16 vht_tx_mcs_map;
 +                                   duration, &cookie);
  
 -      if (!rdev->ops->set_bitrate_mask)
 -              return -EOPNOTSUPP;
 +      if (err)
 +              goto free_msg;
  
 -      memset(&mask, 0, sizeof(mask));
 -      /* Default to all rates enabled */
 -      for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
 -              sband = rdev->wiphy.bands[i];
 +      if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
 +              goto nla_put_failure;
  
 -              if (!sband)
 -                      continue;
 +      genlmsg_end(msg, hdr);
  
 -              mask.control[i].legacy = (1 << sband->n_bitrates) - 1;
 -              memcpy(mask.control[i].ht_mcs,
 -                     sband->ht_cap.mcs.rx_mask,
 -                     sizeof(mask.control[i].ht_mcs));
 +      return genlmsg_reply(msg, info);
  
 -              if (!sband->vht_cap.vht_supported)
 -                      continue;
 + nla_put_failure:
 +      err = -ENOBUFS;
 + free_msg:
 +      nlmsg_free(msg);
 +      return err;
 +}
  
 -              vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
 -              vht_build_mcs_mask(vht_tx_mcs_map, mask.control[i].vht_mcs);
 -      }
 +static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
 +                                          struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct wireless_dev *wdev = info->user_ptr[1];
 +      u64 cookie;
  
 -      /* if no rates are given set it back to the defaults */
 -      if (!info->attrs[NL80211_ATTR_TX_RATES])
 -              goto out;
 +      if (!info->attrs[NL80211_ATTR_COOKIE])
 +              return -EINVAL;
  
 -      /*
 -       * The nested attribute uses enum nl80211_band as the index. This maps
 -       * directly to the enum ieee80211_band values used in cfg80211.
 -       */
 -      BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
 -      nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
 -              enum ieee80211_band band = nla_type(tx_rates);
 -              int err;
 +      if (!rdev->ops->cancel_remain_on_channel)
 +              return -EOPNOTSUPP;
  
 -              if (band < 0 || band >= IEEE80211_NUM_BANDS)
 -                      return -EINVAL;
 -              sband = rdev->wiphy.bands[band];
 -              if (sband == NULL)
 -                      return -EINVAL;
 -              err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
 -                              nla_len(tx_rates), nl80211_txattr_policy);
 -              if (err)
 -                      return err;
 -              if (tb[NL80211_TXRATE_LEGACY]) {
 -                      mask.control[band].legacy = rateset_to_mask(
 -                              sband,
 -                              nla_data(tb[NL80211_TXRATE_LEGACY]),
 -                              nla_len(tb[NL80211_TXRATE_LEGACY]));
 -                      if ((mask.control[band].legacy == 0) &&
 -                          nla_len(tb[NL80211_TXRATE_LEGACY]))
 -                              return -EINVAL;
 -              }
 -              if (tb[NL80211_TXRATE_HT]) {
 -                      if (!ht_rateset_to_mask(
 -                                      sband,
 -                                      nla_data(tb[NL80211_TXRATE_HT]),
 -                                      nla_len(tb[NL80211_TXRATE_HT]),
 -                                      mask.control[band].ht_mcs))
 -                              return -EINVAL;
 -              }
 -              if (tb[NL80211_TXRATE_VHT]) {
 -                      if (!vht_set_mcs_mask(
 -                                      sband,
 -                                      nla_data(tb[NL80211_TXRATE_VHT]),
 -                                      mask.control[band].vht_mcs))
 -                              return -EINVAL;
 -              }
 -              if (tb[NL80211_TXRATE_GI]) {
 -                      mask.control[band].gi =
 -                              nla_get_u8(tb[NL80211_TXRATE_GI]);
 -                      if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
 -                              return -EINVAL;
 -              }
 +      cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
  
 -              if (mask.control[band].legacy == 0) {
 -                      /* don't allow empty legacy rates if HT or VHT
 -                       * are not even supported.
 -                       */
 -                      if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
 -                            rdev->wiphy.bands[band]->vht_cap.vht_supported))
 -                              return -EINVAL;
 +      return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
 +}
  
 -                      for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
 -                              if (mask.control[band].ht_mcs[i])
 -                                      goto out;
 +static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
 +                                     struct genl_info *info)
 +{
 +      struct cfg80211_bitrate_mask mask;
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct net_device *dev = info->user_ptr[1];
 +      int err;
  
 -                      for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
 -                              if (mask.control[band].vht_mcs[i])
 -                                      goto out;
 +      if (!rdev->ops->set_bitrate_mask)
 +              return -EOPNOTSUPP;
  
 -                      /* legacy and mcs rates may not be both empty */
 -                      return -EINVAL;
 -              }
 -      }
 +      err = nl80211_parse_tx_bitrate_mask(info, &mask);
 +      if (err)
 +              return err;
  
 -out:
        return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
  }
  
@@@ -9433,12 -8905,9 +9436,12 @@@ static int nl80211_join_mesh(struct sk_
        if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
                setup.beacon_interval =
                        nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
 -              if (setup.beacon_interval < 10 ||
 -                  setup.beacon_interval > 10000)
 -                      return -EINVAL;
 +
 +              err = cfg80211_validate_beacon_int(rdev,
 +                                                 NL80211_IFTYPE_MESH_POINT,
 +                                                 setup.beacon_interval);
 +              if (err)
 +                      return err;
        }
  
        if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
                        return err;
        }
  
 +      if (info->attrs[NL80211_ATTR_TX_RATES] && setup.chandef.chan != NULL) {
 +              err = nl80211_parse_tx_bitrate_mask(info, &setup.beacon_rate);
 +              if (err)
 +                      return err;
 +
 +              err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
 +                                            &setup.beacon_rate);
 +              if (err)
 +                      return err;
 +      }
 +
        return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
  }
  
@@@ -9604,20 -9062,6 +9607,20 @@@ static int nl80211_send_wowlan_nd(struc
        if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
                return -ENOBUFS;
  
 +      if (req->relative_rssi_set) {
 +              struct nl80211_bss_select_rssi_adjust rssi_adjust;
 +
 +              if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
 +                             req->relative_rssi))
 +                      return -ENOBUFS;
 +
 +              rssi_adjust.band = req->rssi_adjust.band;
 +              rssi_adjust.delta = req->rssi_adjust.delta;
 +              if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
 +                          sizeof(rssi_adjust), &rssi_adjust))
 +                      return -ENOBUFS;
 +      }
 +
        freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
        if (!freqs)
                return -ENOBUFS;
@@@ -10394,27 -9838,18 +10397,27 @@@ static int nl80211_set_rekey_data(struc
        if (err)
                return err;
  
 -      if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
 -          !tb[NL80211_REKEY_DATA_KCK])
 +      if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] ||
 +          (!wiphy_ext_feature_isset(&rdev->wiphy,
 +                                    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
 +           !wiphy_ext_feature_isset(&rdev->wiphy,
 +                                    NL80211_EXT_FEATURE_FILS_STA) &&
 +           !tb[NL80211_REKEY_DATA_KCK]))
                return -EINVAL;
 +
        if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
                return -ERANGE;
 -      if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
 +      if (nla_len(tb[NL80211_REKEY_DATA_KEK]) < NL80211_KEK_LEN)
                return -ERANGE;
 -      if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
 +      if (tb[NL80211_REKEY_DATA_KCK] &&
 +          nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
                return -ERANGE;
  
 +      memset(&rekey_data, 0, sizeof(rekey_data));
        rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
 -      rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
 +      rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]);
 +      if (tb[NL80211_REKEY_DATA_KCK])
 +              rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
        rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]);
  
        wdev_lock(wdev);
@@@ -10785,7 -10220,7 +10788,7 @@@ static int nl80211_prepare_vendor_dump(
                *wdev = NULL;
  
                if (cb->args[1]) {
 -                      list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
 +                      list_for_each_entry(tmp, &wiphy->wdev_list, list) {
                                if (tmp->identifier == cb->args[1] - 1) {
                                        *wdev = tmp;
                                        break;
@@@ -11210,74 -10645,6 +11213,74 @@@ static int nl80211_tdls_cancel_channel_
        return 0;
  }
  
 +static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct net_device *dev = info->user_ptr[1];
 +      struct cfg80211_external_auth_params params;
 +
 +      if (!rdev->ops->external_auth)
 +              return -EOPNOTSUPP;
 +
 +      if (!info->attrs[NL80211_ATTR_SSID] &&
 +          dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
 +          dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
 +              return -EINVAL;
 +
 +      if (!info->attrs[NL80211_ATTR_BSSID])
 +              return -EINVAL;
 +
 +      if (!info->attrs[NL80211_ATTR_STATUS_CODE])
 +              return -EINVAL;
 +
 +      memset(&params, 0, sizeof(params));
 +
 +      if (info->attrs[NL80211_ATTR_SSID]) {
 +              params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
 +              if (params.ssid.ssid_len == 0 ||
 +                  params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
 +                      return -EINVAL;
 +              memcpy(params.ssid.ssid,
 +                     nla_data(info->attrs[NL80211_ATTR_SSID]),
 +                     params.ssid.ssid_len);
 +      }
 +
 +      memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]),
 +             ETH_ALEN);
 +
 +      params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
 +
 +      if (info->attrs[NL80211_ATTR_PMKID])
 +              params.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
 +
 +      return rdev_external_auth(rdev, dev, &params);
 +}
 +
 +static int nl80211_update_owe_info(struct sk_buff *skb, struct genl_info *info)
 +{
 +      struct cfg80211_registered_device *rdev = info->user_ptr[0];
 +      struct cfg80211_update_owe_info owe_info;
 +      struct net_device *dev = info->user_ptr[1];
 +
 +      if (!rdev->ops->update_owe_info)
 +              return -EOPNOTSUPP;
 +
 +      if (!info->attrs[NL80211_ATTR_STATUS_CODE] ||
 +          !info->attrs[NL80211_ATTR_MAC])
 +              return -EINVAL;
 +
 +      memset(&owe_info, 0, sizeof(owe_info));
 +      owe_info.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
 +      nla_memcpy(owe_info.peer, info->attrs[NL80211_ATTR_MAC], ETH_ALEN);
 +
 +      if (info->attrs[NL80211_ATTR_IE]) {
 +              owe_info.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
 +              owe_info.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 +      }
 +
 +      return rdev_update_owe_info(rdev, dev, &owe_info);
 +}
 +
  #define NL80211_FLAG_NEED_WIPHY               0x01
  #define NL80211_FLAG_NEED_NETDEV      0x02
  #define NL80211_FLAG_NEED_RTNL                0x04
@@@ -11627,14 -10994,6 +11630,14 @@@ static const struct genl_ops nl80211_op
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
 +              .cmd = NL80211_CMD_ABORT_SCAN,
 +              .doit = nl80211_abort_scan,
 +              .policy = nl80211_policy,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
 +      {
                .cmd = NL80211_CMD_GET_SCAN,
                .policy = nl80211_policy,
                .dumpit = nl80211_dump_scan,
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
 +              .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
 +              .doit = nl80211_update_connect_params,
 +              .policy = nl80211_policy,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
 +      {
                .cmd = NL80211_CMD_DISCONNECT,
                .doit = nl80211_disconnect,
                .policy = nl80211_policy,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
 +      {
 +              .cmd = NL80211_CMD_EXTERNAL_AUTH,
 +              .doit = nl80211_external_auth,
 +              .policy = nl80211_policy,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
 +      {
 +              .cmd = NL80211_CMD_UPDATE_OWE_INFO,
 +              .doit = nl80211_update_owe_info,
 +              .flags = GENL_ADMIN_PERM,
 +              .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 +                                NL80211_FLAG_NEED_RTNL,
 +      },
  };
  
  /* notification functions */
@@@ -12569,16 -11905,15 +12572,16 @@@ void nl80211_send_assoc_timeout(struct 
  }
  
  void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
 -                               struct net_device *netdev, const u8 *bssid,
 -                               const u8 *req_ie, size_t req_ie_len,
 -                               const u8 *resp_ie, size_t resp_ie_len,
 -                               u16 status, gfp_t gfp)
 +                               struct net_device *netdev,
 +                               struct cfg80211_connect_resp_params *cr,
 +                               gfp_t gfp)
  {
        struct sk_buff *msg;
        void *hdr;
  
 -      msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
 +      msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
 +                      cr->fils_kek_len + cr->pmk_len +
 +                      (cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
        if (!msg)
                return;
  
  
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
 -          (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
 -          nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
 -          (req_ie &&
 -           nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
 -          (resp_ie &&
 -           nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
 +          (cr->bssid &&
 +           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
 +          nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
 +                      cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
 +                      cr->status) ||
 +          (cr->status < 0 &&
 +           (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
 +            nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
 +                        cr->timeout_reason))) ||
 +          (cr->req_ie &&
 +           nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
 +          (cr->resp_ie &&
 +           nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
 +                   cr->resp_ie)) ||
 +          (cr->update_erp_next_seq_num &&
 +           nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
 +                       cr->fils_erp_next_seq_num)) ||
 +          (cr->status == WLAN_STATUS_SUCCESS &&
 +           ((cr->fils_kek &&
 +             nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
 +                     cr->fils_kek)) ||
 +            (cr->pmk &&
 +             nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
 +            (cr->pmkid &&
 +             nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
                goto nla_put_failure;
  
        genlmsg_end(msg, hdr);
@@@ -13914,13 -13230,11 +13917,13 @@@ static int nl80211_netlink_notify(struc
                                schedule_work(&rdev->sched_scan_stop_wk);
                }
  
 -              list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
 +              list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
                        cfg80211_mlme_unregister_socket(wdev, notify->portid);
  
                        if (wdev->owner_nlportid == notify->portid)
                                schedule_destroy_work = true;
 +                      else if (wdev->conn_owner_nlportid == notify->portid)
 +                              schedule_work(&wdev->disconnect_wk);
                }
  
                spin_lock_bh(&rdev->beacon_registrations_lock);
@@@ -13975,8 -13289,7 +13978,8 @@@ void cfg80211_ft_event(struct net_devic
        if (!ft_event->target_ap)
                return;
  
 -      msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
 +      msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
 +                      GFP_KERNEL);
        if (!msg)
                return;
  
@@@ -14075,97 -13388,6 +14078,97 @@@ void nl80211_send_ap_stopped(struct wir
        nlmsg_free(msg);
  }
  
 +void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp)
 +{
 +      struct wireless_dev *wdev = netdev->ieee80211_ptr;
 +      struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 +
 +      nl80211_send_mlme_event(rdev, netdev, NULL, 0,
 +                              NL80211_CMD_STOP_AP, gfp, -1);
 +}
 +EXPORT_SYMBOL(cfg80211_ap_stopped);
 +
 +int cfg80211_external_auth_request(struct net_device *dev,
 +                                 struct cfg80211_external_auth_params *params,
 +                                 gfp_t gfp)
 +{
 +      struct wireless_dev *wdev = dev->ieee80211_ptr;
 +      struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 +      struct sk_buff *msg;
 +      void *hdr;
 +
 +      if (!wdev->conn_owner_nlportid)
 +              return -EINVAL;
 +
 +      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      if (!msg)
 +              return -ENOMEM;
 +
 +      hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EXTERNAL_AUTH);
 +      if (!hdr)
 +              goto nla_put_failure;
 +
 +      if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 +          nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
 +          nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) ||
 +          nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION,
 +                      params->action) ||
 +          nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) ||
 +          nla_put(msg, NL80211_ATTR_SSID, params->ssid.ssid_len,
 +                  params->ssid.ssid))
 +              goto nla_put_failure;
 +
 +      genlmsg_end(msg, hdr);
 +      genlmsg_unicast(wiphy_net(&rdev->wiphy), msg,
 +                      wdev->conn_owner_nlportid);
 +      return 0;
 +
 + nla_put_failure:
 +      nlmsg_free(msg);
 +      return -ENOBUFS;
 +}
 +EXPORT_SYMBOL(cfg80211_external_auth_request);
 +
 +void cfg80211_update_owe_info_event(struct net_device *netdev,
 +                                  struct cfg80211_update_owe_info *owe_info,
 +                                  gfp_t gfp)
 +{
 +      struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
 +      struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 +      struct sk_buff *msg;
 +      void *hdr;
 +
 +      trace_cfg80211_update_owe_info_event(wiphy, netdev, owe_info);
 +
 +      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      if (!msg)
 +              return;
 +
 +      hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_UPDATE_OWE_INFO);
 +      if (!hdr)
 +              goto nla_put_failure;
 +
 +      if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 +          nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
 +          nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, owe_info->peer))
 +              goto nla_put_failure;
 +
 +      if (!owe_info->ie_len ||
 +          nla_put(msg, NL80211_ATTR_IE, owe_info->ie_len, owe_info->ie))
 +              goto nla_put_failure;
 +
 +      genlmsg_end(msg, hdr);
 +
 +      genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
 +                              NL80211_MCGRP_MLME, gfp);
 +      return;
 +
 +nla_put_failure:
 +      genlmsg_cancel(msg, hdr);
 +      nlmsg_free(msg);
 +}
 +EXPORT_SYMBOL(cfg80211_update_owe_info_event);
 +
  /* initialisation/exit functions */
  
  int nl80211_init(void)