OSDN Git Service

Merge tag 'v4.4.214' into 10
author0ranko0P <ranko0p@outlook.com>
Tue, 18 Feb 2020 05:24:55 +0000 (13:24 +0800)
committer0ranko0P <ranko0p@outlook.com>
Tue, 18 Feb 2020 05:24:55 +0000 (13:24 +0800)
This is the 4.4.214 stable release

30 files changed:
1  2 
Makefile
arch/arm64/boot/Makefile
arch/powerpc/Kconfig
crypto/algapi.c
crypto/api.c
crypto/internal.h
drivers/md/dm.c
drivers/net/wireless/airo.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
drivers/of/Kconfig
drivers/of/address.c
drivers/scsi/ufs/ufshcd.c
drivers/staging/vt6656/int.c
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/rxtx.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/function/f_ncm.c
fs/btrfs/extent_io.c
fs/ext2/super.c
fs/namei.c
kernel/events/core.c
kernel/time/clocksource.c
lib/test_kasan.c
mm/mempolicy.c
net/ipv4/tcp.c
net/ipv6/ip6_vti.c
sound/core/pcm_native.c
sound/soc/soc-pcm.c

diff --combined Makefile
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 4
  PATCHLEVEL = 4
- SUBLEVEL = 212
+ SUBLEVEL = 214
  EXTRAVERSION =
  NAME = Blurry Fish Butt
  
@@@ -30,7 -30,7 +30,7 @@@ unexport GREP_OPTION
  # Most importantly: sub-Makefiles should only ever modify files in
  # their own directory. If in some directory we have a dependency on
  # a file in another dir (which doesn't happen often, but it's often
 -# unavoidable when linking the built-in.o targets which finally
 +# unavoidable when linking the built-in.a targets which finally
  # turn into vmlinux), we will call a sub make in that other dir, and
  # after that we are sure that everything which is in that other dir
  # is now up to date.
@@@ -148,7 -148,7 +148,7 @@@ PHONY += $(MAKECMDGOALS) sub-mak
  $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
        @:
  
 -sub-make: FORCE
 +sub-make:
        $(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
        -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
  
@@@ -303,7 -303,7 +303,7 @@@ CONFIG_SHELL := $(shell if [ -x "$$BASH
  
  HOSTCC       = gcc
  HOSTCXX      = g++
 -HOSTCFLAGS   := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
 +HOSTCFLAGS   := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89 -pipe
  HOSTCXXFLAGS = -O2
  
  # Decide whether to build built-in, modular, or both.
@@@ -343,7 -343,6 +343,7 @@@ include scripts/Kbuild.includ
  # Make variables (CC, etc...)
  AS            = $(CROSS_COMPILE)as
  LD            = $(CROSS_COMPILE)ld
 +LDLLD         = ld.lld
  CC            = $(CROSS_COMPILE)gcc
  CPP           = $(CC) -E
  AR            = $(CROSS_COMPILE)ar
@@@ -367,7 -366,6 +367,7 @@@ LDFLAGS_MODULE  
  CFLAGS_KERNEL =
  AFLAGS_KERNEL =
  CFLAGS_GCOV   = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
 +CFLAGS_KCOV   = -fsanitize-coverage=trace-pc
  
  
  # Use USERINCLUDE when you must reference the UAPI directories only.
@@@ -390,15 -388,13 +390,15 @@@ LINUXINCLUDE    := 
  
  KBUILD_CPPFLAGS := -D__KERNEL__
  
 -KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
 +KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -pipe \
                   -fno-strict-aliasing -fno-common \
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -std=gnu89 $(call cc-option,-fno-PIE)
  
 -
 +ifeq ($(TARGET_BOARD_TYPE),auto)
 +KBUILD_CFLAGS    += -DCONFIG_PLATFORM_AUTO
 +endif
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
  KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
@@@ -418,8 -414,7 +418,8 @@@ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MOD
  
  export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
  export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
 -export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
 +export CFLAGS_KASAN CFLAGS_UBSAN CFLAGS_KASAN_NOSANITIZE
 +export CFLAGS_KCOV
  export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
  export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
  export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@@ -614,11 -609,7 +614,11 @@@ all: vmlinu
  
  ifeq ($(cc-name),clang)
  ifneq ($(CROSS_COMPILE),)
 -CLANG_TARGET  := --target=$(notdir $(CROSS_COMPILE:%-=%))
 +CLANG_TRIPLE  ?= $(CROSS_COMPILE)
 +CLANG_TARGET  := --target=$(notdir $(CLANG_TRIPLE:%-=%))
 +ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_TARGET)), y)
 +$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
 +endif
  GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
  CLANG_PREFIX  := --prefix=$(GCC_TOOLCHAIN_DIR)
  GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
@@@ -632,26 -623,6 +632,26 @@@ KBUILD_CFLAGS += $(call cc-option, -no-
  KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
  endif
  
 +# Make toolchain changes before including arch/$(SRCARCH)/Makefile to ensure
 +# ar/cc/ld-* macros return correct values.
 +ifdef CONFIG_LTO_CLANG
 +# use LLVM linker LLD for LTO linking and vmlinux_link
 +LD            := $(LDLLD)
 +# use llvm-ar for building symbol tables from IR files, and llvm-nm instead
 +# of objdump for processing symbol versions and exports
 +LLVM_AR               := llvm-ar
 +LLVM_NM               := llvm-nm
 +export LLVM_AR LLVM_NM
 +endif
 +
 +ifeq ($(cc-name),clang)
 +ifeq ($(ld-name),lld)
 +KBUILD_CFLAGS += -fuse-ld=lld
 +LDFLAGS               += -O2
 +endif
 +KBUILD_CPPFLAGS       += -Qunused-arguments
 +endif
 +
  # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
  # values of the respective KBUILD_* variables
  ARCH_CPPFLAGS :=
@@@ -659,29 -630,6 +659,29 @@@ ARCH_AFLAGS :
  ARCH_CFLAGS :=
  include arch/$(SRCARCH)/Makefile
  
 +ifeq ($(cc-name),clang)
 +KBUILD_CFLAGS += -O3
 +KBUILD_CFLAGS += $(call cc-option, -mllvm -polly) \
 +                 $(call cc-option, -mllvm -polly-run-dce) \
 +                 $(call cc-option, -mllvm -polly-run-inliner) \
 +                 $(call cc-option, -mllvm -polly-opt-fusion=max) \
 +                 $(call cc-option, -mllvm -polly-ast-use-context) \
 +                 $(call cc-option, -mllvm -polly-detect-keep-going) \
 +                 $(call cc-option, -mllvm -polly-vectorizer=stripmine) \
 +                 $(call cc-option, -mllvm -polly-invariant-load-hoisting)
 +else
 +KBUILD_CFLAGS += -O2
 +endif
 +
 +ifeq ($(cc-name),gcc)
 +KBUILD_CFLAGS += -mcpu=cortex-a73.cortex-a53
 +KBUILD_AFLAGS += -mcpu=cortex-a73.cortex-a53
 +endif
 +ifeq ($(cc-name),clang)
 +KBUILD_CFLAGS += -mcpu=cortex-a53
 +KBUILD_AFLAGS += -mcpu=cortex-a53
 +endif
 +
  KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
  KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
  KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
@@@ -691,8 -639,14 +691,8 @@@ KBUILD_CFLAGS     += $(call cc-disable-warn
  KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
  KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
  
 -ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 -KBUILD_CFLAGS += -Os
 -else
 -ifdef CONFIG_PROFILE_ALL_BRANCHES
 -KBUILD_CFLAGS += -O2
 -else
 -KBUILD_CFLAGS   += -O2
 -endif
 +ifdef CONFIG_CC_WERROR
 +KBUILD_CFLAGS += -Werror
  endif
  
  # Tell gcc to never replace conditional load with a non-conditional one
@@@ -755,24 -709,17 +755,24 @@@ endi
  endif
  KBUILD_CFLAGS += $(stackp-flag)
  
 +ifdef CONFIG_KCOV
 +  ifeq ($(call cc-option, $(CFLAGS_KCOV)),)
 +    $(warning Cannot use CONFIG_KCOV: \
 +             -fsanitize-coverage=trace-pc is not supported by compiler)
 +    CFLAGS_KCOV =
 +  endif
 +endif
 +
  ifeq ($(cc-name),clang)
 -KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
  KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
  KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
 +KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier)
  # Quiet clang warning: comparison of unsigned expression < 0 is always false
  KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
  # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
  # source of a reference will be _MergedGlobals and not on of the whitelisted names.
  # See modpost pattern 2
  KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
 -KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
  else
  
  # These warnings generated too much noise in a regular build.
@@@ -794,11 -741,6 +794,11 @@@ KBUILD_CFLAGS    += -fomit-frame-pointe
  endif
  endif
  
 +# Initialize all stack variables with a pattern, if desired.
 +ifdef CONFIG_INIT_STACK_ALL
 +KBUILD_CFLAGS += $(call cc-option, -ftrivial-auto-var-init=pattern)
 +endif
 +
  KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
  
  ifdef CONFIG_DEBUG_INFO
@@@ -841,33 -783,6 +841,33 @@@ ifdef CONFIG_DEBUG_SECTION_MISMATC
  KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
  endif
  
 +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 +KBUILD_CFLAGS_KERNEL  += $(call cc-option,-ffunction-sections,)
 +KBUILD_CFLAGS_KERNEL  += $(call cc-option,-fdata-sections,)
 +endif
 +
 +ifdef CONFIG_LTO_CLANG
 +ifdef CONFIG_THINLTO
 +lto-clang-flags := -flto=thin
 +LDFLAGS += --thinlto-cache-dir=.thinlto-cache
 +else
 +lto-clang-flags       := -flto
 +endif
 +lto-clang-flags += -fvisibility=hidden
 +
 +# allow disabling only clang LTO where needed
 +DISABLE_LTO_CLANG := -fno-lto -fvisibility=default
 +export DISABLE_LTO_CLANG
 +endif
 +
 +ifdef CONFIG_LTO
 +LTO_CFLAGS    := $(lto-clang-flags)
 +KBUILD_CFLAGS += $(LTO_CFLAGS)
 +
 +DISABLE_LTO   := $(DISABLE_LTO_CLANG)
 +export LTO_CFLAGS DISABLE_LTO
 +endif
 +
  # arch Makefile may override CC so keep this after arch Makefile is included
  NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
  CHECKFLAGS     += $(NOSTDINC_FLAGS)
@@@ -919,7 -834,6 +919,7 @@@ KBUILD_ARFLAGS := $(call ar-option,D
  
  include scripts/Makefile.kasan
  include scripts/Makefile.extrawarn
 +include scripts/Makefile.ubsan
  
  # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
  # last assignments
@@@ -933,10 -847,6 +933,10 @@@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comm
  KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
  LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
  
 +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 +LDFLAGS_vmlinux       += $(call ld-option, --gc-sections,)
 +endif
 +
  ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
  LDFLAGS_vmlinux       += $(call ld-option, -X,)
  endif
@@@ -1034,24 -944,24 +1034,24 @@@ vmlinux-dirs  := $(patsubst %/,%,$(filte
  vmlinux-alldirs       := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
                     $(init-) $(core-) $(drivers-) $(net-) $(libs-) $(virt-))))
  
 -init-y                := $(patsubst %/, %/built-in.o, $(init-y))
 -core-y                := $(patsubst %/, %/built-in.o, $(core-y))
 -drivers-y     := $(patsubst %/, %/built-in.o, $(drivers-y))
 -net-y         := $(patsubst %/, %/built-in.o, $(net-y))
 +init-y                := $(patsubst %/, %/built-in.a, $(init-y))
 +core-y                := $(patsubst %/, %/built-in.a, $(core-y))
 +drivers-y     := $(patsubst %/, %/built-in.a, $(drivers-y))
 +net-y         := $(patsubst %/, %/built-in.a, $(net-y))
  libs-y1               := $(patsubst %/, %/lib.a, $(libs-y))
 -libs-y2               := $(patsubst %/, %/built-in.o, $(libs-y))
 -libs-y                := $(libs-y1) $(libs-y2)
 -virt-y                := $(patsubst %/, %/built-in.o, $(virt-y))
 +libs-y2               := $(patsubst %/, %/built-in.a, $(filter-out %.a, $(libs-y)))
 +virt-y                := $(patsubst %/, %/built-in.a, $(virt-y))
  
  # Externally visible symbols (used by link-vmlinux.sh)
  export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
 -export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y) $(virt-y)
 +export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-y)
 +export KBUILD_VMLINUX_LIBS := $(libs-y1)
  export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
  export LDFLAGS_vmlinux
  # used by scripts/pacmage/Makefile
  export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
  
 -vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
 +vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
  
  # Final link of vmlinux
        cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
@@@ -1086,7 -996,7 +1086,7 @@@ $(sort $(vmlinux-deps)): $(vmlinux-dirs
  
  PHONY += $(vmlinux-dirs)
  $(vmlinux-dirs): prepare scripts
 -      $(Q)$(MAKE) $(build)=$@
 +      $(Q)$(MAKE) $(build)=$@ need-builtin=1
  
  define filechk_kernel.release
        echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
@@@ -1128,29 -1038,12 +1128,29 @@@ prepare1: prepare2 $(version_h) include
  
  archprepare: archheaders archscripts prepare1 scripts_basic
  
 -prepare0: archprepare FORCE
 +prepare0: archprepare
        $(Q)$(MAKE) $(build)=.
  
  # All the preparing..
  prepare: prepare0
  
 +# Make sure we're using a supported toolchain with LTO_CLANG
 +ifdef CONFIG_LTO_CLANG
 +  ifneq ($(call clang-ifversion, -ge, 0800, y), y)
 +      @echo Cannot use CONFIG_LTO_CLANG: requires clang 8.0 or later >&2 && exit 1
 +  endif
 +  ifneq ($(ld-name),lld)
 +      @echo Cannot use CONFIG_LTO_CLANG: requires LLD >&2 && exit 1
 +  endif
 +endif
 +# Make sure compiler supports LTO flags
 +ifdef lto-flags
 +  ifeq ($(call cc-option, $(lto-flags)),)
 +      @echo Cannot use CONFIG_LTO: $(lto-flags) not supported by compiler \
 +              >&2 && exit 1
 +  endif
 +endif
 +
  # Generate some files
  # ---------------------------------------------------------------------------
  
@@@ -1190,7 -1083,7 +1190,7 @@@ INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib
  export INSTALL_FW_PATH
  
  PHONY += firmware_install
 -firmware_install: FORCE
 +firmware_install:
        @mkdir -p $(objtree)/firmware
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install
  
@@@ -1210,7 -1103,7 +1210,7 @@@ PHONY += archscript
  archscripts:
  
  PHONY += __headers
 -__headers: $(version_h) scripts_basic asm-generic archheaders archscripts FORCE
 +__headers: $(version_h) scripts_basic asm-generic archheaders archscripts
        $(Q)$(MAKE) $(build)=scripts build_unifdef
  
  PHONY += headers_install_all
@@@ -1599,8 -1492,7 +1599,8 @@@ clean: $(clean-dirs
                -o -name '*.symtypes' -o -name 'modules.order' \
                -o -name modules.builtin -o -name '.tmp_*.o.*' \
                -o -name '*.ll' \
 -              -o -name '*.gcno' \) -type f -print | xargs rm -f
 +              -o -name '*.gcno' \
 +              -o -name '*.*.symversions' \) -type f -print | xargs rm -f
  
  # Generate tags for editors
  # ---------------------------------------------------------------------------
diff --combined arch/arm64/boot/Makefile
  # Based on the ia64 boot/Makefile.
  #
  
- targets := Image Image.gz
+ targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
  
 +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
 +ifneq ($(DTB_NAMES),)
 +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
 +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
 +else
 +DTB_OBJS := $(shell find -L $(obj)/dts/ -name \*.dtb)
 +endif
 +
  $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
  
  $(obj)/Image.bz2: $(obj)/Image FORCE
        $(call if_changed,bzip2)
  
 +$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE
 +      $(call if_changed,cat)
 +
  $(obj)/Image.gz: $(obj)/Image FORCE
        $(call if_changed,gzip)
  
@@@ -45,9 -34,6 +45,9 @@@ $(obj)/Image.lzma: $(obj)/Image FORC
  $(obj)/Image.lzo: $(obj)/Image FORCE
        $(call if_changed,lzo)
  
 +$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
 +      $(call if_changed,cat)
 +
  install: $(obj)/Image
        $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
        $(obj)/Image System.map "$(INSTALL_PATH)"
diff --combined arch/powerpc/Kconfig
@@@ -93,6 -93,7 +93,7 @@@ config PP
        select BINFMT_ELF
        select ARCH_HAS_ELF_RANDOMIZE
        select OF
+       select OF_DMA_DEFAULT_COHERENT          if !NOT_COHERENT_CACHE
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
 +      select HAVE_EXIT_THREAD
        select HAVE_OPROFILE
        select HAVE_DEBUG_KMEMLEAK
        select ARCH_HAS_SG_CHAIN
        select EDAC_ATOMIC_SCRUB
        select ARCH_HAS_DMA_SET_COHERENT_MASK
        select HAVE_ARCH_SECCOMP_FILTER
 +      select HAVE_ARCH_HARDENED_USERCOPY
  
  config PPC_BARRIER_NOSPEC
      bool
diff --combined crypto/algapi.c
@@@ -653,11 -653,9 +653,9 @@@ EXPORT_SYMBOL_GPL(crypto_grab_spawn)
  
  void crypto_drop_spawn(struct crypto_spawn *spawn)
  {
-       if (!spawn->alg)
-               return;
        down_write(&crypto_alg_sem);
-       list_del(&spawn->list);
+       if (spawn->alg)
+               list_del(&spawn->list);
        up_write(&crypto_alg_sem);
  }
  EXPORT_SYMBOL_GPL(crypto_drop_spawn);
  static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
  {
        struct crypto_alg *alg;
-       struct crypto_alg *alg2;
  
        down_read(&crypto_alg_sem);
        alg = spawn->alg;
-       alg2 = alg;
-       if (alg2)
-               alg2 = crypto_mod_get(alg2);
-       up_read(&crypto_alg_sem);
-       if (!alg2) {
-               if (alg)
-                       crypto_shoot_alg(alg);
-               return ERR_PTR(-EAGAIN);
+       if (alg && !crypto_mod_get(alg)) {
+               alg->cra_flags |= CRYPTO_ALG_DYING;
+               alg = NULL;
        }
+       up_read(&crypto_alg_sem);
  
-       return alg;
+       return alg ?: ERR_PTR(-EAGAIN);
  }
  
  struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@@@ -1001,21 -993,6 +993,21 @@@ unsigned int crypto_alg_extsize(struct 
  }
  EXPORT_SYMBOL_GPL(crypto_alg_extsize);
  
 +int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
 +                      u32 type, u32 mask)
 +{
 +      int ret = 0;
 +      struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
 +
 +      if (!IS_ERR(alg)) {
 +              crypto_mod_put(alg);
 +              ret = 1;
 +      }
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(crypto_type_has_alg);
 +
  static int __init crypto_algapi_init(void)
  {
        crypto_init_proc();
diff --combined crypto/api.c
@@@ -24,7 -24,6 +24,7 @@@
  #include <linux/sched.h>
  #include <linux/slab.h>
  #include <linux/string.h>
 +#include <linux/completion.h>
  #include "internal.h"
  
  LIST_HEAD(crypto_alg_list);
@@@ -356,13 -355,12 +356,12 @@@ static unsigned int crypto_ctxsize(stru
        return len;
  }
  
- void crypto_shoot_alg(struct crypto_alg *alg)
static void crypto_shoot_alg(struct crypto_alg *alg)
  {
        down_write(&crypto_alg_sem);
        alg->cra_flags |= CRYPTO_ALG_DYING;
        up_write(&crypto_alg_sem);
  }
- EXPORT_SYMBOL_GPL(crypto_shoot_alg);
  
  struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
                                      u32 mask)
@@@ -612,17 -610,5 +611,17 @@@ int crypto_has_alg(const char *name, u3
  }
  EXPORT_SYMBOL_GPL(crypto_has_alg);
  
 +void crypto_req_done(struct crypto_async_request *req, int err)
 +{
 +      struct crypto_wait *wait = req->data;
 +
 +      if (err == -EINPROGRESS)
 +              return;
 +
 +      wait->err = err;
 +      complete(&wait->completion);
 +}
 +EXPORT_SYMBOL_GPL(crypto_req_done);
 +
  MODULE_DESCRIPTION("Cryptographic core API");
  MODULE_LICENSE("GPL");
diff --combined crypto/internal.h
@@@ -87,7 -87,6 +87,6 @@@ void crypto_alg_tested(const char *name
  void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
                          struct crypto_alg *nalg);
  void crypto_remove_final(struct list_head *list);
- void crypto_shoot_alg(struct crypto_alg *alg);
  struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
                                      u32 mask);
  void *crypto_create_tfm(struct crypto_alg *alg,
@@@ -104,9 -103,6 +103,9 @@@ int crypto_probing_notify(unsigned lon
  
  unsigned int crypto_alg_extsize(struct crypto_alg *alg);
  
 +int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
 +                      u32 type, u32 mask);
 +
  static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
  {
        atomic_inc(&alg->cra_refcnt);
diff --combined drivers/md/dm.c
@@@ -1147,7 -1147,7 +1147,7 @@@ static void free_rq_clone(struct reques
   * Must be called without clone's queue lock held,
   * see end_clone_request() for more details.
   */
 -static void dm_end_request(struct request *clone, int error)
 +void dm_end_request(struct request *clone, int error)
  {
        int rw = rq_data_dir(clone);
        struct dm_rq_target_io *tio = clone->end_io_data;
@@@ -1345,7 -1345,7 +1345,7 @@@ static void dm_complete_request(struct 
   * Target's rq_end_io() function isn't called.
   * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
   */
 -static void dm_kill_unmapped_request(struct request *rq, int error)
 +void dm_kill_unmapped_request(struct request *rq, int error)
  {
        rq->cmd_flags |= REQ_FAILED;
        dm_complete_request(rq, error);
@@@ -1862,13 -1862,6 +1862,13 @@@ static void dm_dispatch_clone_request(s
                dm_complete_request(rq, r);
  }
  
 +void dm_dispatch_request(struct request *rq)
 +{
 +      struct dm_rq_target_io *tio = tio_from_request(rq);
 +
 +      dm_dispatch_clone_request(tio->clone, rq);
 +}
 +
  static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
                                 void *data)
  {
@@@ -2192,11 -2185,8 +2192,11 @@@ static void dm_request_fn(struct reques
                tio = tio_from_request(rq);
                /* Establish tio->ti before queuing work (map_tio_request) */
                tio->ti = ti;
 -              queue_kthread_work(&md->kworker, &tio->work);
 +              spin_unlock(q->queue_lock);
 +              if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
 +                      dm_requeue_original_request(md, rq);
                BUG_ON(!irqs_disabled());
 +              spin_lock(q->queue_lock);
        }
  
        goto out;
@@@ -2221,7 -2211,7 +2221,7 @@@ static int dm_any_congested(void *conge
                         * the query about congestion status of request_queue
                         */
                        if (dm_request_based(md))
 -                              r = md->queue->backing_dev_info.wb.state &
 +                              r = md->queue->backing_dev_info->wb.state &
                                    bdi_bits;
                        else
                                r = dm_table_any_congested(map, bdi_bits);
@@@ -2303,7 -2293,6 +2303,6 @@@ static void dm_init_md_queue(struct map
         * - must do so here (in alloc_dev callchain) before queue is used
         */
        md->queue->queuedata = md;
-       md->queue->backing_dev_info->congested_data = md;
  }
  
  static void dm_init_old_md_queue(struct mapped_device *md)
        /*
         * Initialize aspects of queue that aren't relevant for blk-mq
         */
 -      md->queue->backing_dev_info.congested_data = md;
 -      md->queue->backing_dev_info.congested_fn = dm_any_congested;
++      md->queue->backing_dev_info->congested_data = md;
 +      md->queue->backing_dev_info->congested_fn = dm_any_congested;
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
  }
  
@@@ -2396,6 -2386,12 +2396,12 @@@ static struct mapped_device *alloc_dev(
                goto bad;
  
        dm_init_md_queue(md);
+       /*
+        * default to bio-based required ->make_request_fn until DM
+        * table is loaded and md->type established. If request-based
+        * table is loaded: blk-mq will override accordingly.
+        */
+       blk_queue_make_request(md->queue, dm_make_request);
  
        md->disk = alloc_disk(1);
        if (!md->disk)
@@@ -2859,7 -2855,6 +2865,6 @@@ int dm_setup_md_queue(struct mapped_dev
                break;
        case DM_TYPE_BIO_BASED:
                dm_init_old_md_queue(md);
-               blk_queue_make_request(md->queue, dm_make_request);
                /*
                 * DM handles splitting bios as needed.  Free the bio_split bioset
                 * since it won't be used (saves 1 process per bio-based DM device).
@@@ -5848,7 -5848,7 +5848,7 @@@ static int airo_get_freq(struct net_dev
        ch = le16_to_cpu(status_rid.channel);
        if((ch > 0) && (ch < 15)) {
                fwrq->m = 100000 *
 -                      ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
 +                      ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
                fwrq->e = 1;
        } else {
                fwrq->m = ch;
@@@ -6906,7 -6906,7 +6906,7 @@@ static int airo_get_range(struct net_de
        for(i = 0; i < 14; i++) {
                range->freq[k].i = i + 1; /* List index */
                range->freq[k].m = 100000 *
 -                   ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
 +                   ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
                range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
        }
        range->num_frequency = k;
@@@ -7314,7 -7314,7 +7314,7 @@@ static inline char *airo_translate_scan
        iwe.cmd = SIOCGIWFREQ;
        iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
        iwe.u.freq.m = 100000 *
 -            ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
 +            ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
        iwe.u.freq.e = 1;
        current_ev = iwe_stream_add_event(info, current_ev, end_buf,
                                          &iwe, IW_EV_FREQ_LEN);
@@@ -7808,16 -7808,8 +7808,8 @@@ static int readrids(struct net_device *
        case AIROGVLIST:    ridcode = RID_APLIST;       break;
        case AIROGDRVNAM:   ridcode = RID_DRVNAME;      break;
        case AIROGEHTENC:   ridcode = RID_ETHERENCAP;   break;
-       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
-       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
+       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;     break;
+       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;     break;
        case AIROGSTAT:     ridcode = RID_STATUS;       break;
        case AIROGSTATSD32: ridcode = RID_STATSDELTA;   break;
        case AIROGSTATSC32: ridcode = RID_STATS;        break;
                return -EINVAL;
        }
  
-       if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+       if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+               /* Only super-user can read WEP keys */
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+       }
+       if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
                return -ENOMEM;
  
        PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
@@@ -23,7 -23,7 +23,7 @@@
  
  
  #define CHAN2G(_channel, _freq, _flags) {        \
 -      .band             = IEEE80211_BAND_2GHZ, \
 +      .band             = NL80211_BAND_2GHZ, \
        .center_freq      = (_freq),             \
        .hw_value         = (_channel),          \
        .flags            = (_flags),            \
@@@ -643,7 -643,7 +643,7 @@@ static int lbs_ret_scan(struct lbs_priv
                if (chan_no != -1) {
                        struct wiphy *wiphy = priv->wdev->wiphy;
                        int freq = ieee80211_channel_to_frequency(chan_no,
 -                                                      IEEE80211_BAND_2GHZ);
 +                                                      NL80211_BAND_2GHZ);
                        struct ieee80211_channel *channel =
                                ieee80211_get_channel(wiphy, freq);
  
@@@ -1269,7 -1269,7 +1269,7 @@@ _new_connect_scan_req(struct wiphy *wip
  {
        struct cfg80211_scan_request *creq = NULL;
        int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
 -      enum ieee80211_band band;
 +      enum nl80211_band band;
  
        creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
                       n_channels * sizeof(void *),
  
        /* Scan all available channels */
        i = 0;
 -      for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
 +      for (band = 0; band < NUM_NL80211_BANDS; band++) {
                int j;
  
                if (!wiphy->bands[band])
@@@ -1853,6 -1853,8 +1853,8 @@@ static int lbs_ibss_join_existing(struc
                rates_max = rates_eid[1];
                if (rates_max > MAX_RATES) {
                        lbs_deb_join("invalid rates");
+                       rcu_read_unlock();
+                       ret = -EINVAL;
                        goto out;
                }
                rates = cmd.bss.rates;
@@@ -2171,7 -2173,7 +2173,7 @@@ int lbs_cfg_register(struct lbs_privat
        if (lbs_mesh_activated(priv))
                wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
  
 -      wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
 +      wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz;
  
        /*
         * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
@@@ -495,13 -495,13 +495,13 @@@ mwifiex_scan_create_channel_list(struc
                                                        *scan_chan_list,
                                 u8 filtered_scan)
  {
 -      enum ieee80211_band band;
 +      enum nl80211_band band;
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *ch;
        struct mwifiex_adapter *adapter = priv->adapter;
        int chan_idx = 0, i;
  
 -      for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
 +      for (band = 0; (band < NUM_NL80211_BANDS) ; band++) {
  
                if (!priv->wdev.wiphy->bands[band])
                        continue;
@@@ -2568,6 -2568,13 +2568,13 @@@ mwifiex_cmd_append_vsie_tlv(struct mwif
                        vs_param_set->header.len =
                                cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
                                & 0x00FF) + 2);
+                       if (le16_to_cpu(vs_param_set->header.len) >
+                               MWIFIEX_MAX_VSIE_LEN) {
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "Invalid param length!\n");
+                               break;
+                       }
                        memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
                               le16_to_cpu(vs_param_set->header.len));
                        *buffer += le16_to_cpu(vs_param_set->header.len) +
@@@ -88,33 -88,33 +88,33 @@@ static struct ieee80211_rate rtl8xxxu_r
  };
  
  static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
          .hw_value = 1, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
          .hw_value = 2, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
          .hw_value = 3, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
          .hw_value = 4, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
          .hw_value = 5, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
          .hw_value = 6, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
          .hw_value = 7, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
          .hw_value = 8, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
          .hw_value = 9, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
          .hw_value = 10, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
          .hw_value = 11, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
          .hw_value = 12, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
          .hw_value = 13, .max_power = 30 },
 -      { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
 +      { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
          .hw_value = 14, .max_power = 30 }
  };
  
@@@ -5555,7 -5555,7 +5555,7 @@@ static int rtl8xxxu_parse_usb(struct rt
        u8 dir, xtype, num;
        int ret = 0;
  
-       host_interface = &interface->altsetting[0];
+       host_interface = interface->cur_altsetting;
        interface_desc = &host_interface->desc;
        endpoints = interface_desc->bNumEndpoints;
  
@@@ -5739,7 -5739,7 +5739,7 @@@ static int rtl8xxxu_probe(struct usb_in
                dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
                sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
        }
 -      hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
 +      hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
  
        hw->wiphy->rts_threshold = 2347;
  
diff --combined drivers/of/Kconfig
@@@ -102,12 -102,6 +102,12 @@@ config OF_RESERVED_ME
  config OF_RESOLVE
        bool
  
 +config OF_SLIMBUS
 +      def_tristate SLIMBUS
 +      depends on SLIMBUS
 +      help
 +        OpenFirmware SLIMBUS accessors
 +
  config OF_OVERLAY
        bool "Device Tree overlays"
        select OF_DYNAMIC
          While this option is selected automatically when needed, you can
          enable it manually to improve device tree unit test coverage.
  
 +config OF_BATTERYDATA
 +      def_bool y
 +      help
 +        OpenFirmware BatteryData accessors
 +
+ config OF_DMA_DEFAULT_COHERENT
+       # arches should select this if DMA is coherent by default for OF devices
+       bool
  endif # OF
diff --combined drivers/of/address.c
@@@ -788,22 -788,6 +788,22 @@@ unsigned long __weak pci_address_to_pio
  #endif
  }
  
 +const __be32 *of_get_address_by_name(struct device_node *dev, const char *name,
 +              u64 *size, unsigned int *flags)
 +{
 +      int index;
 +      if (!name)
 +              return NULL;
 +
 +      /* Try to read "reg-names" property and get the index by name */
 +      index = of_property_match_string(dev, "reg-names", name);
 +      if (index < 0)
 +              return NULL;
 +
 +      return of_get_address(dev, index, size, flags);
 +}
 +EXPORT_SYMBOL(of_get_address_by_name);
 +
  static int __of_address_to_resource(struct device_node *dev,
                const __be32 *addrp, u64 size, unsigned int flags,
                const char *name, struct resource *r)
@@@ -1025,12 -1009,16 +1025,16 @@@ EXPORT_SYMBOL_GPL(of_dma_get_range)
   * @np:       device node
   *
   * It returns true if "dma-coherent" property was found
-  * for this device in DT.
+  * for this device in the DT, or if DMA is coherent by
+  * default for OF devices on the current platform.
   */
  bool of_dma_is_coherent(struct device_node *np)
  {
        struct device_node *node = of_node_get(np);
  
+       if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+               return true;
        while (node) {
                if (of_property_read_bool(node, "dma-coherent")) {
                        of_node_put(node);
        return false;
  }
  EXPORT_SYMBOL_GPL(of_dma_is_coherent);
 +
 +void __iomem *of_iomap_by_name(struct device_node *np, const char *name)
 +{
 +      int index;
 +
 +      if (!name)
 +              return NULL;
 +
 +      /* Try to read "reg-names" property and get the index by name */
 +      index = of_property_match_string(np, "reg-names", name);
 +      if (index < 0)
 +              return NULL;
 +
 +      return of_iomap(np, index);
 +}
 +EXPORT_SYMBOL(of_iomap_by_name);
@@@ -3,7 -3,7 +3,7 @@@
   *
   * This code is based on drivers/scsi/ufs/ufshcd.c
   * Copyright (C) 2011-2013 Samsung India Software Operations
 - * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
   *
   * Authors:
   *    Santosh Yaraganavi <santosh.sy@samsung.com>
   */
  
  #include <linux/async.h>
 +#include <scsi/ufs/ioctl.h>
  #include <linux/devfreq.h>
 +#include <linux/nls.h>
 +#include <linux/of.h>
 +#include <linux/blkdev.h>
 +#include <asm/unaligned.h>
  
  #include "ufshcd.h"
 -#include "unipro.h"
 +#include "ufshci.h"
 +#include "ufs_quirks.h"
 +#include "ufs-debugfs.h"
 +#include "ufs-qcom.h"
 +
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/ufs.h>
 +
 +#ifdef CONFIG_DEBUG_FS
 +
 +static int ufshcd_tag_req_type(struct request *rq)
 +{
 +      int rq_type = TS_WRITE;
 +
 +      if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
 +              rq_type = TS_NOT_SUPPORTED;
 +      else if (rq->cmd_flags & REQ_FLUSH)
 +              rq_type = TS_FLUSH;
 +      else if (rq_data_dir(rq) == READ)
 +              rq_type = (rq->cmd_flags & REQ_URGENT) ?
 +                      TS_URGENT_READ : TS_READ;
 +      else if (rq->cmd_flags & REQ_URGENT)
 +              rq_type = TS_URGENT_WRITE;
 +
 +      return rq_type;
 +}
 +
 +static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
 +{
 +      ufsdbg_set_err_state(hba);
 +      if (type < UFS_ERR_MAX)
 +              hba->ufs_stats.err_stats[type]++;
 +}
 +
 +static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
 +{
 +      struct request *rq =
 +              hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
 +      u64 **tag_stats = hba->ufs_stats.tag_stats;
 +      int rq_type;
 +
 +      if (!hba->ufs_stats.enabled)
 +              return;
 +
 +      tag_stats[tag][TS_TAG]++;
 +      if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
 +              return;
 +
 +      WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
 +      rq_type = ufshcd_tag_req_type(rq);
 +      if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
 +              tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
 +}
 +
 +static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
 +              struct scsi_cmnd *cmd)
 +{
 +      struct request *rq = cmd ? cmd->request : NULL;
 +
 +      if (rq && rq->cmd_type & REQ_TYPE_FS)
 +              hba->ufs_stats.q_depth--;
 +}
 +
 +static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +      int rq_type;
 +      struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
 +      s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
 +              lrbp->issue_time_stamp);
 +
 +      /* update general request statistics */
 +      if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
 +              hba->ufs_stats.req_stats[TS_TAG].min = delta;
 +      hba->ufs_stats.req_stats[TS_TAG].count++;
 +      hba->ufs_stats.req_stats[TS_TAG].sum += delta;
 +      if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
 +              hba->ufs_stats.req_stats[TS_TAG].max = delta;
 +      if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
 +                      hba->ufs_stats.req_stats[TS_TAG].min = delta;
 +
 +      rq_type = ufshcd_tag_req_type(rq);
 +      if (rq_type == TS_NOT_SUPPORTED)
 +              return;
 +
 +      /* update request type specific statistics */
 +      if (hba->ufs_stats.req_stats[rq_type].count == 0)
 +              hba->ufs_stats.req_stats[rq_type].min = delta;
 +      hba->ufs_stats.req_stats[rq_type].count++;
 +      hba->ufs_stats.req_stats[rq_type].sum += delta;
 +      if (delta > hba->ufs_stats.req_stats[rq_type].max)
 +              hba->ufs_stats.req_stats[rq_type].max = delta;
 +      if (delta < hba->ufs_stats.req_stats[rq_type].min)
 +                      hba->ufs_stats.req_stats[rq_type].min = delta;
 +}
 +
 +static void
 +ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
 +{
 +      if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
 +              hba->ufs_stats.query_stats_arr[opcode][idn]++;
 +}
 +
 +#else
 +static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
 +{
 +}
 +
 +static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
 +              struct scsi_cmnd *cmd)
 +{
 +}
 +
 +static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
 +{
 +}
 +
 +static inline
 +void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +}
 +
 +static inline
 +void ufshcd_update_query_stats(struct ufs_hba *hba,
 +                             enum query_opcode opcode, u8 idn)
 +{
 +}
 +#endif
 +
 +#define PWR_INFO_MASK 0xF
 +#define PWR_RX_OFFSET 4
 +
 +#define UFSHCD_REQ_SENSE_SIZE 18
  
  #define UFSHCD_ENABLE_INTRS   (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
  #define NOP_OUT_TIMEOUT    30 /* msecs */
  
  /* Query request retries */
 -#define QUERY_REQ_RETRIES 10
 +#define QUERY_REQ_RETRIES 3
  /* Query request timeout */
 -#define QUERY_REQ_TIMEOUT 30 /* msec */
 +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  
  /* Task management command timeout */
  #define TM_CMD_TIMEOUT        100 /* msecs */
  
 +/* maximum number of retries for a general UIC command  */
 +#define UFS_UIC_COMMAND_RETRIES 3
 +
  /* maximum number of link-startup retries */
  #define DME_LINKSTARTUP_RETRIES 3
  
 +/* Maximum retries for Hibern8 enter */
 +#define UIC_HIBERN8_ENTER_RETRIES 3
 +
  /* maximum number of reset retries before giving up */
  #define MAX_HOST_RESET_RETRIES 5
  
  /* Interrupt aggregation default timeout, unit: 40us */
  #define INT_AGGR_DEF_TO       0x02
  
 +/* default value of auto suspend is 3 seconds */
 +#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
 +
 +#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE   10
 +#define UFSHCD_CLK_GATING_DELAY_MS_PERF               50
 +
 +/* IOCTL opcode for command - ufs set device read only */
 +#define UFS_IOCTL_BLKROSET      BLKROSET
 +
 +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION            2
 +
  #define ufshcd_toggle_vreg(_dev, _vreg, _on)                          \
        ({                                                              \
                int _ret;                                               \
                _ret;                                                   \
        })
  
 +#define ufshcd_hex_dump(prefix_str, buf, len) \
 +print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
 +
  static u32 ufs_query_desc_max_size[] = {
        QUERY_DESC_DEVICE_MAX_SIZE,
        QUERY_DESC_CONFIGURAION_MAX_SIZE,
        QUERY_DESC_RFU_MAX_SIZE,
        QUERY_DESC_GEOMETRY_MAZ_SIZE,
        QUERY_DESC_POWER_MAX_SIZE,
 +      QUERY_DESC_HEALTH_MAX_SIZE,
        QUERY_DESC_RFU_MAX_SIZE,
  };
  
@@@ -276,11 -119,9 +276,11 @@@ enum 
  /* UFSHCD UIC layer error flags */
  enum {
        UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
 -      UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
 -      UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
 -      UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
 +      UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
 +      UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
 +      UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
 +      UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
 +      UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
  };
  
  /* Interrupt configuration options */
@@@ -290,8 -131,6 +290,8 @@@ enum 
        UFSHCD_INT_CLEAR,
  };
  
 +#define DEFAULT_UFSHCD_DBG_PRINT_EN   UFSHCD_DBG_PRINT_ALL
 +
  #define ufshcd_set_eh_in_progress(h) \
        (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
  #define ufshcd_eh_in_progress(h) \
@@@ -333,1706 -172,489 +333,1706 @@@ ufs_get_pm_lvl_to_link_pwr_state(enum u
        return ufs_pm_lvl_states[lvl].link_state;
  }
  
 -static void ufshcd_tmc_handler(struct ufs_hba *hba);
 +static inline enum ufs_pm_level
 +ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 +                                      enum uic_link_state link_state)
 +{
 +      enum ufs_pm_level lvl;
 +
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
 +              if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
 +                      (ufs_pm_lvl_states[lvl].link_state == link_state))
 +                      return lvl;
 +      }
 +
 +      /* if no match found, return the level 0 */
 +      return UFS_PM_LVL_0;
 +}
 +
 +static inline bool ufshcd_is_valid_pm_lvl(int lvl)
 +{
 +      if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
 +              return true;
 +      else
 +              return false;
 +}
 +
 +static irqreturn_t ufshcd_intr(int irq, void *__hba);
 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
  static void ufshcd_async_scan(void *data, async_cookie_t cookie);
  static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
  static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
  static void ufshcd_hba_exit(struct ufs_hba *hba);
  static int ufshcd_probe_hba(struct ufs_hba *hba);
 -static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 -                               bool skip_ref_clk);
 -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 +static int ufshcd_enable_clocks(struct ufs_hba *hba);
 +static int ufshcd_disable_clocks(struct ufs_hba *hba,
 +                               bool is_gating_context);
 +static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 +                                            bool is_gating_context);
 +static void ufshcd_hold_all(struct ufs_hba *hba);
 +static void ufshcd_release_all(struct ufs_hba *hba);
 +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
  static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 +static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
  static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 -static irqreturn_t ufshcd_intr(int irq, void *__hba);
 -static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
 -              struct ufs_pa_layer_attr *desired_pwr_mode);
 -static int ufshcd_change_power_mode(struct ufs_hba *hba,
 -                           struct ufs_pa_layer_attr *pwr_mode);
 +static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 +static void ufshcd_release_all(struct ufs_hba *hba);
 +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 +static int ufshcd_devfreq_target(struct device *dev,
 +                              unsigned long *freq, u32 flags);
 +static int ufshcd_devfreq_get_dev_status(struct device *dev,
 +              struct devfreq_dev_status *stat);
 +
 +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 +static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
 +      .upthreshold = 70,
 +      .downdifferential = 65,
 +      .simple_scaling = 1,
 +};
 +
 +static void *gov_data = &ufshcd_ondemand_data;
 +#else
 +static void *gov_data;
 +#endif
 +
 +static struct devfreq_dev_profile ufs_devfreq_profile = {
 +      .polling_ms     = 60,
 +      .target         = ufshcd_devfreq_target,
 +      .get_dev_status = ufshcd_devfreq_get_dev_status,
 +};
  
 -static inline int ufshcd_enable_irq(struct ufs_hba *hba)
 +static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
  {
 -      int ret = 0;
 +      return tag >= 0 && tag < hba->nutrs;
 +}
  
 +static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 +{
        if (!hba->is_irq_enabled) {
 -              ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
 -                              hba);
 -              if (ret)
 -                      dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
 -                              __func__, ret);
 +              enable_irq(hba->irq);
                hba->is_irq_enabled = true;
        }
 -
 -      return ret;
  }
  
  static inline void ufshcd_disable_irq(struct ufs_hba *hba)
  {
        if (hba->is_irq_enabled) {
 -              free_irq(hba->irq, hba);
 +              disable_irq(hba->irq);
                hba->is_irq_enabled = false;
        }
  }
  
 -/*
 - * ufshcd_wait_for_register - wait for register value to change
 - * @hba - per-adapter interface
 - * @reg - mmio register offset
 - * @mask - mask to apply to read register value
 - * @val - wait condition
 - * @interval_us - polling interval in microsecs
 - * @timeout_ms - timeout in millisecs
 - *
 - * Returns -ETIMEDOUT on error, zero on success
 - */
 -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 -              u32 val, unsigned long interval_us, unsigned long timeout_ms)
 +void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
  {
 -      int err = 0;
 -      unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 -
 -      /* ignore bits that we don't intend to wait on */
 -      val = val & mask;
 -
 -      while ((ufshcd_readl(hba, reg) & mask) != val) {
 -              /* wakeup within 50us of expiry */
 -              usleep_range(interval_us, interval_us + 50);
 -
 -              if (time_after(jiffies, timeout)) {
 -                      if ((ufshcd_readl(hba, reg) & mask) != val)
 -                              err = -ETIMEDOUT;
 -                      break;
 -              }
 -      }
 +      unsigned long flags;
 +      bool unblock = false;
  
 -      return err;
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->scsi_block_reqs_cnt--;
 +      unblock = !hba->scsi_block_reqs_cnt;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      if (unblock)
 +              scsi_unblock_requests(hba->host);
  }
 +EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
  
 -/**
 - * ufshcd_get_intr_mask - Get the interrupt bit mask
 - * @hba - Pointer to adapter instance
 - *
 - * Returns interrupt bit mask per version
 - */
 -static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 +static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
  {
 -      if (hba->ufs_version == UFSHCI_VERSION_10)
 -              return INTERRUPT_MASK_ALL_VER_10;
 -      else
 -              return INTERRUPT_MASK_ALL_VER_11;
 +      if (!hba->scsi_block_reqs_cnt++)
 +              scsi_block_requests(hba->host);
  }
  
 -/**
 - * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 - * @hba - Pointer to adapter instance
 - *
 - * Returns UFSHCI version supported by the controller
 - */
 -static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 +void ufshcd_scsi_block_requests(struct ufs_hba *hba)
  {
 -      if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 -              return ufshcd_vops_get_ufs_hci_version(hba);
 +      unsigned long flags;
  
 -      return ufshcd_readl(hba, REG_UFS_VERSION);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_scsi_block_requests(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
  }
 +EXPORT_SYMBOL(ufshcd_scsi_block_requests);
  
 -/**
 - * ufshcd_is_device_present - Check if any device connected to
 - *                          the host controller
 - * @hba: pointer to adapter instance
 - *
 - * Returns 1 if device present, 0 if no device detected
 - */
 -static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 +static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
  {
 -      return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 -                                              DEVICE_PRESENT) ? 1 : 0;
 +      int ret = 0;
 +
 +      if (!hba->pctrl)
 +              return 0;
 +
 +      /* Assert reset if ctrl == true */
 +      if (ctrl)
 +              ret = pinctrl_select_state(hba->pctrl,
 +                      pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
 +      else
 +              ret = pinctrl_select_state(hba->pctrl,
 +                      pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
 +
 +      if (ret < 0)
 +              dev_err(hba->dev, "%s: %s failed with err %d\n",
 +                      __func__, ctrl ? "Assert" : "Deassert", ret);
 +
 +      return ret;
  }
  
 -/**
 - * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 - * @lrb: pointer to local command reference block
 - *
 - * This function is used to get the OCS field from UTRD
 - * Returns the OCS field in the UTRD
 - */
 -static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 +static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
  {
 -      return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 +      return ufshcd_device_reset_ctrl(hba, true);
  }
  
 -/**
 - * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 - * @task_req_descp: pointer to utp_task_req_desc structure
 - *
 - * This function is used to get the OCS field from UTMRD
 - * Returns the OCS field in the UTMRD
 - */
 -static inline int
 -ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 +static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
  {
 -      return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 +      return ufshcd_device_reset_ctrl(hba, false);
  }
  
 -/**
 - * ufshcd_get_tm_free_slot - get a free slot for task management request
 - * @hba: per adapter instance
 - * @free_slot: pointer to variable with available slot value
 - *
 - * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 - * Returns 0 if free slot is not available, else return 1 with tag value
 - * in @free_slot.
 - */
 -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 +static int ufshcd_reset_device(struct ufs_hba *hba)
  {
 -      int tag;
 -      bool ret = false;
 +      int ret;
  
 -      if (!free_slot)
 +      /* reset the connected UFS device */
 +      ret = ufshcd_assert_device_reset(hba);
 +      if (ret)
                goto out;
 +      /*
 +       * The reset signal is active low.
 +       * The UFS device shall detect more than or equal to 1us of positive
 +       * or negative RST_n pulse width.
 +       * To be on safe side, keep the reset low for atleast 10us.
 +       */
 +      usleep_range(10, 15);
  
 -      do {
 -              tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 -              if (tag >= hba->nutmrs)
 -                      goto out;
 -      } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 -
 -      *free_slot = tag;
 -      ret = true;
 +      ret = ufshcd_deassert_device_reset(hba);
 +      if (ret)
 +              goto out;
 +      /* same as assert, wait for atleast 10us after deassert */
 +      usleep_range(10, 15);
  out:
        return ret;
  }
  
 -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 +/* replace non-printable or non-ASCII characters with spaces */
 +static inline void ufshcd_remove_non_printable(char *val)
  {
 -      clear_bit_unlock(slot, &hba->tm_slots_in_use);
 +      if (!val || !*val)
 +              return;
 +
 +      if (*val < 0x20 || *val > 0x7e)
 +              *val = ' ';
  }
  
 -/**
 - * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 - * @hba: per adapter instance
 - * @pos: position of the bit to be cleared
 - */
 -static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 +#define UFSHCD_MAX_CMD_LOGGING        200
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
 +                      struct ufshcd_cmd_log_entry *entry, u8 opcode)
  {
 -      ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 -}
 +      if (trace_ufshcd_command_enabled()) {
 +              u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  
 -/**
 - * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 - * @reg: Register value of host controller status
 - *
 - * Returns integer, 0 on Success and positive value if failed
 - */
 -static inline int ufshcd_get_lists_status(u32 reg)
 +              trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
 +                                   entry->doorbell, entry->transfer_len, intr,
 +                                   entry->lba, opcode);
 +      }
 +}
 +#else
 +static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
 +                      struct ufshcd_cmd_log_entry *entry, u8 opcode)
  {
 -      /*
 -       * The mask 0xFF is for the following HCS register bits
 -       * Bit          Description
 -       *  0           Device Present
 -       *  1           UTRLRDY
 -       *  2           UTMRLRDY
 -       *  3           UCRDY
 -       *  4           HEI
 -       *  5           DEI
 -       * 6-7          reserved
 -       */
 -      return (((reg) & (0xFF)) >> 1) ^ (0x07);
  }
 +#endif
  
 -/**
 - * ufshcd_get_uic_cmd_result - Get the UIC command result
 - * @hba: Pointer to adapter instance
 - *
 - * This function gets the result of UIC command completion
 - * Returns 0 on success, non zero value on error
 - */
 -static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 +#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
 +static void ufshcd_cmd_log_init(struct ufs_hba *hba)
  {
 -      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 -             MASK_UIC_COMMAND_RESULT;
 +      /* Allocate log entries */
 +      if (!hba->cmd_log.entries) {
 +              hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
 +                      sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
 +              if (!hba->cmd_log.entries)
 +                      return;
 +              dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
 +                              __func__);
 +      }
  }
  
 -/**
 - * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 - * @hba: Pointer to adapter instance
 - *
 - * This function gets UIC command argument3
 - * Returns 0 on success, non zero value on error
 - */
 -static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 +#ifdef CONFIG_TRACEPOINTS
 +static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +                           unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
 +                           sector_t lba, int transfer_len, u8 opcode)
  {
 -      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 +      struct ufshcd_cmd_log_entry *entry;
 +
 +      if (!hba->cmd_log.entries)
 +              return;
 +
 +      entry = &hba->cmd_log.entries[hba->cmd_log.pos];
 +      entry->lun = lun;
 +      entry->str = str;
 +      entry->cmd_type = cmd_type;
 +      entry->cmd_id = cmd_id;
 +      entry->lba = lba;
 +      entry->transfer_len = transfer_len;
 +      entry->idn = idn;
 +      entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      entry->tag = tag;
 +      entry->tstamp = ktime_get();
 +      entry->outstanding_reqs = hba->outstanding_reqs;
 +      entry->seq_num = hba->cmd_log.seq_num;
 +      hba->cmd_log.seq_num++;
 +      hba->cmd_log.pos =
 +                      (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 +
 +      ufshcd_add_command_trace(hba, entry, opcode);
 +}
 +#endif
 +
 +static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +      unsigned int tag, u8 cmd_id, u8 idn)
 +{
 +      __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
 +                       0xff, (sector_t)-1, -1, -1);
 +}
 +
 +static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
 +{
 +      ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
 +}
 +
 +static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 +{
 +      int i;
 +      int pos;
 +      struct ufshcd_cmd_log_entry *p;
 +
 +      if (!hba->cmd_log.entries)
 +              return;
 +
 +      pos = hba->cmd_log.pos;
 +      for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
 +              p = &hba->cmd_log.entries[pos];
 +              pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 +
 +              if (ktime_to_us(p->tstamp)) {
 +                      pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
 +                              p->cmd_type, p->str, p->seq_num,
 +                              p->lun, p->cmd_id, (unsigned long long)p->lba,
 +                              p->transfer_len, p->tag, p->doorbell,
 +                              p->outstanding_reqs, p->idn,
 +                              ktime_to_us(p->tstamp));
 +                              usleep_range(1000, 1100);
 +              }
 +      }
 +}
 +#else
 +static void ufshcd_cmd_log_init(struct ufs_hba *hba)
 +{
 +}
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 +                           unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
 +                           sector_t lba, int transfer_len, u8 opcode)
 +{
 +      struct ufshcd_cmd_log_entry entry;
 +
 +      entry.str = str;
 +      entry.lba = lba;
 +      entry.cmd_id = cmd_id;
 +      entry.transfer_len = transfer_len;
 +      entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      entry.tag = tag;
 +
 +      ufshcd_add_command_trace(hba, &entry, opcode);
 +}
 +#endif
 +
 +static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
 +{
 +}
 +
 +static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 +{
 +}
 +#endif
 +
 +#ifdef CONFIG_TRACEPOINTS
 +static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
 +                                      unsigned int tag, const char *str)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      char *cmd_type = NULL;
 +      u8 opcode = 0;
 +      u8 cmd_id = 0, idn = 0;
 +      sector_t lba = -1;
 +      int transfer_len = -1;
 +
 +      lrbp = &hba->lrb[tag];
 +
 +      if (lrbp->cmd) { /* data phase exists */
 +              opcode = (u8)(*lrbp->cmd->cmnd);
 +              if ((opcode == READ_10) || (opcode == WRITE_10)) {
 +                      /*
 +                       * Currently we only fully trace read(10) and write(10)
 +                       * commands
 +                       */
 +                      if (lrbp->cmd->request && lrbp->cmd->request->bio)
 +                              lba =
 +                              lrbp->cmd->request->bio->bi_iter.bi_sector;
 +                      transfer_len = be32_to_cpu(
 +                              lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 +              }
 +      }
 +
 +      if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
 +              cmd_type = "scsi";
 +              cmd_id = (u8)(*lrbp->cmd->cmnd);
 +      } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 +              if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
 +                      cmd_type = "nop";
 +                      cmd_id = 0;
 +              } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
 +                      cmd_type = "query";
 +                      cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
 +                      idn = hba->dev_cmd.query.request.upiu_req.idn;
 +              }
 +      }
 +
 +      __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
 +                       lrbp->lun, lba, transfer_len, opcode);
 +}
 +#else
 +static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
 +                                      unsigned int tag, const char *str)
 +{
 +}
 +#endif
 +
 +static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
 +{
 +      struct ufs_clk_info *clki;
 +      struct list_head *head = &hba->clk_list_head;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
 +              return;
 +
 +      if (!head || list_empty(head))
 +              return;
 +
 +      list_for_each_entry(clki, head, list) {
 +              if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
 +                              clki->max_freq)
 +                      dev_err(hba->dev, "clk: %s, rate: %u\n",
 +                                      clki->name, clki->curr_freq);
 +      }
 +}
 +
 +static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
 +              struct ufs_uic_err_reg_hist *err_hist, char *err_name)
 +{
 +      int i;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
 +              return;
 +
 +      for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
 +              int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
 +
 +              if (err_hist->reg[p] == 0)
 +                      continue;
 +              dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
 +                      err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
 +      }
 +}
 +
 +static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
 +{
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
 +              return;
 +
 +      /*
 +       * hex_dump reads its data without the readl macro. This might
 +       * cause inconsistency issues on some platform, as the printed
 +       * values may be from cache and not the most recent value.
 +       * To know whether you are looking at an un-cached version verify
 +       * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
 +       * during platform/pci probe function.
 +       */
 +      ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
 +      dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
 +              hba->ufs_version, hba->capabilities);
 +      dev_err(hba->dev,
 +              "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
 +              (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
 +      dev_err(hba->dev,
 +              "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
 +              ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 +              hba->ufs_stats.hibern8_exit_cnt);
 +
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
 +      ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
 +
 +      ufshcd_print_clk_freqs(hba);
 +
 +      ufshcd_vops_dbg_register_dump(hba, no_sleep);
 +}
 +
 +static void ufshcd_print_host_regs(struct ufs_hba *hba)
 +{
 +      __ufshcd_print_host_regs(hba, false);
 +}
 +
 +static
 +void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      int prdt_length;
 +      int tag;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
 +              return;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutrs) {
 +              lrbp = &hba->lrb[tag];
 +
 +              dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
 +                              tag, ktime_to_us(lrbp->issue_time_stamp));
 +              dev_err(hba->dev,
 +                      "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
 +                      tag, (u64)lrbp->utrd_dma_addr);
 +              ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
 +                              sizeof(struct utp_transfer_req_desc));
 +              dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
 +                      (u64)lrbp->ucd_req_dma_addr);
 +              ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
 +                              sizeof(struct utp_upiu_req));
 +              dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
 +                      (u64)lrbp->ucd_rsp_dma_addr);
 +              ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
 +                              sizeof(struct utp_upiu_rsp));
 +              prdt_length =
 +                      le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
 +              dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
 +                      tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
 +              if (pr_prdt)
 +                      ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 +                              sizeof(struct ufshcd_sg_entry) * prdt_length);
 +      }
 +}
 +
 +static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 +{
 +      struct utp_task_req_desc *tmrdp;
 +      int tag;
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
 +              return;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutmrs) {
 +              tmrdp = &hba->utmrdl_base_addr[tag];
 +              dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
 +              ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
 +                              sizeof(struct request_desc_header));
 +              dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
 +                              tag);
 +              ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
 +                              sizeof(struct utp_upiu_req));
 +              dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
 +                              tag);
 +              ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
 +                              sizeof(struct utp_task_req_desc));
 +      }
 +}
 +
 +static void ufshcd_print_fsm_state(struct ufs_hba *hba)
 +{
 +      int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
 +
 +      err = ufshcd_dme_get(hba,
 +                      UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 +                      UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 +                      &tx_fsm_val);
 +      dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
 +                      tx_fsm_val, err);
 +      err = ufshcd_dme_get(hba,
 +                      UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
 +                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                      &rx_fsm_val);
 +      dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
 +                      rx_fsm_val, err);
 +}
 +
 +static void ufshcd_print_host_state(struct ufs_hba *hba)
 +{
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
 +              return;
 +
 +      dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 +      dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
 +              hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
 +      dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
 +              hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
 +      dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
 +              hba->pm_op_in_progress, hba->is_sys_suspended);
 +      dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
 +              hba->auto_bkops_enabled, hba->host->host_self_blocked);
 +      dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
 +              hba->clk_gating.state, hba->hibern8_on_idle.state);
 +      dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
 +              hba->eh_flags, hba->req_abort_count);
 +      dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
 +              hba->capabilities, hba->caps);
 +      dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
 +              hba->dev_quirks);
  }
  
  /**
 - * ufshcd_get_req_rsp - returns the TR response transaction type
 - * @ucd_rsp_ptr: pointer to response UPIU
 + * ufshcd_print_pwr_info - print power params as saved in hba
 + * power info
 + * @hba: per-adapter instance
   */
 -static inline int
 -ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 +{
 +      char *names[] = {
 +              "INVALID MODE",
 +              "FAST MODE",
 +              "SLOW_MODE",
 +              "INVALID MODE",
 +              "FASTAUTO_MODE",
 +              "SLOWAUTO_MODE",
 +              "INVALID MODE",
 +      };
 +
 +      if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
 +              return;
 +
 +      dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 +               __func__,
 +               hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 +               hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 +               names[hba->pwr_info.pwr_rx],
 +               names[hba->pwr_info.pwr_tx],
 +               hba->pwr_info.hs_rate);
 +}
 +
 +/*
 + * ufshcd_wait_for_register - wait for register value to change
 + * @hba - per-adapter interface
 + * @reg - mmio register offset
 + * @mask - mask to apply to read register value
 + * @val - wait condition
 + * @interval_us - polling interval in microsecs
 + * @timeout_ms - timeout in millisecs
 + * @can_sleep - perform sleep or just spin
 + * Returns -ETIMEDOUT on error, zero on success
 + */
 +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 +                              u32 val, unsigned long interval_us,
 +                              unsigned long timeout_ms, bool can_sleep)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 +      int err = 0;
 +      unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 +
 +      /* ignore bits that we don't intend to wait on */
 +      val = val & mask;
 +
 +      while ((ufshcd_readl(hba, reg) & mask) != val) {
 +              if (can_sleep)
 +                      usleep_range(interval_us, interval_us + 50);
 +              else
 +                      udelay(interval_us);
 +              if (time_after(jiffies, timeout)) {
 +                      if ((ufshcd_readl(hba, reg) & mask) != val)
 +                              err = -ETIMEDOUT;
 +                      break;
 +              }
 +      }
 +
 +      return err;
  }
  
  /**
 - * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 - * @ucd_rsp_ptr: pointer to response UPIU
 + * ufshcd_get_intr_mask - Get the interrupt bit mask
 + * @hba - Pointer to adapter instance
   *
 - * This function gets the response status and scsi_status from response UPIU
 - * Returns the response result code.
 + * Returns interrupt bit mask per version
   */
 -static inline int
 -ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 +      u32 intr_mask = 0;
 +
 +      switch (hba->ufs_version) {
 +      case UFSHCI_VERSION_10:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_10;
 +              break;
 +      /* allow fall through */
 +      case UFSHCI_VERSION_11:
 +      case UFSHCI_VERSION_20:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_11;
 +              break;
 +      /* allow fall through */
 +      case UFSHCI_VERSION_21:
 +      default:
 +              intr_mask = INTERRUPT_MASK_ALL_VER_21;
 +      }
 +
 +      if (!ufshcd_is_crypto_supported(hba))
 +              intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
 +
 +      return intr_mask;
  }
  
 -/*
 - * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 - *                            from response UPIU
 - * @ucd_rsp_ptr: pointer to response UPIU
 +/**
 + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 + * @hba - Pointer to adapter instance
   *
 - * Return the data segment length.
 + * Returns UFSHCI version supported by the controller
   */
 -static inline unsigned int
 -ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 -              MASK_RSP_UPIU_DATA_SEG_LEN;
 +      if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 +              return ufshcd_vops_get_ufs_hci_version(hba);
 +
 +      return ufshcd_readl(hba, REG_UFS_VERSION);
  }
  
  /**
 - * ufshcd_is_exception_event - Check if the device raised an exception event
 - * @ucd_rsp_ptr: pointer to response UPIU
 + * ufshcd_is_device_present - Check if any device connected to
 + *                          the host controller
 + * @hba: pointer to adapter instance
   *
 - * The function checks if the device raised an exception event indicated in
 - * the Device Information field of response UPIU.
 + * Returns 1 if device present, 0 if no device detected
 + */
 +static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 +{
 +      return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 +                                              DEVICE_PRESENT) ? 1 : 0;
 +}
 +
 +/**
 + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 + * @lrb: pointer to local command reference block
   *
 - * Returns true if exception is raised, false otherwise.
 + * This function is used to get the OCS field from UTRD
 + * Returns the OCS field in the UTRD
   */
 -static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 +static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 +{
 +      return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 +}
 +
 +/**
 + * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 + * @task_req_descp: pointer to utp_task_req_desc structure
 + *
 + * This function is used to get the OCS field from UTMRD
 + * Returns the OCS field in the UTMRD
 + */
 +static inline int
 +ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 +{
 +      return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 +}
 +
 +/**
 + * ufshcd_get_tm_free_slot - get a free slot for task management request
 + * @hba: per adapter instance
 + * @free_slot: pointer to variable with available slot value
 + *
 + * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 + * Returns 0 if free slot is not available, else return 1 with tag value
 + * in @free_slot.
 + */
 +static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 +{
 +      int tag;
 +      bool ret = false;
 +
 +      if (!free_slot)
 +              goto out;
 +
 +      do {
 +              tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 +              if (tag >= hba->nutmrs)
 +                      goto out;
 +      } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 +
 +      *free_slot = tag;
 +      ret = true;
 +out:
 +      return ret;
 +}
 +
 +static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 +{
 +      clear_bit_unlock(slot, &hba->tm_slots_in_use);
 +}
 +
 +/**
 + * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 + * @hba: per adapter instance
 + * @pos: position of the bit to be cleared
 + */
 +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 +{
 +      ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 +}
 +
 +/**
 + * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
 + * @hba: per adapter instance
 + * @tag: position of the bit to be cleared
 + */
 +static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
 +{
 +      __clear_bit(tag, &hba->outstanding_reqs);
 +}
 +
 +/**
 + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 + * @reg: Register value of host controller status
 + *
 + * Returns integer, 0 on Success and positive value if failed
 + */
 +static inline int ufshcd_get_lists_status(u32 reg)
 +{
 +      /*
 +       * The mask 0xFF is for the following HCS register bits
 +       * Bit          Description
 +       *  0           Device Present
 +       *  1           UTRLRDY
 +       *  2           UTMRLRDY
 +       *  3           UCRDY
 +       * 4-7          reserved
 +       */
 +      return ((reg & 0xFF) >> 1) ^ 0x07;
 +}
 +
 +/**
 + * ufshcd_get_uic_cmd_result - Get the UIC command result
 + * @hba: Pointer to adapter instance
 + *
 + * This function gets the result of UIC command completion
 + * Returns 0 on success, non zero value on error
 + */
 +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 +{
 +      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 +             MASK_UIC_COMMAND_RESULT;
 +}
 +
 +/**
 + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 + * @hba: Pointer to adapter instance
 + *
 + * This function gets UIC command argument3
 + * Returns 0 on success, non zero value on error
 + */
 +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 +{
 +      return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 +}
 +
 +/**
 + * ufshcd_get_req_rsp - returns the TR response transaction type
 + * @ucd_rsp_ptr: pointer to response UPIU
 + */
 +static inline int
 +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 +}
 +
 +/**
 + * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * This function gets the response status and scsi_status from response UPIU
 + * Returns the response result code.
 + */
 +static inline int
 +ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 +}
 +
 +/*
 + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 + *                            from response UPIU
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * Return the data segment length.
 + */
 +static inline unsigned int
 +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 +              MASK_RSP_UPIU_DATA_SEG_LEN;
 +}
 +
 +/**
 + * ufshcd_is_exception_event - Check if the device raised an exception event
 + * @ucd_rsp_ptr: pointer to response UPIU
 + *
 + * The function checks if the device raised an exception event indicated in
 + * the Device Information field of response UPIU.
 + *
 + * Returns true if exception is raised, false otherwise.
 + */
 +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 +{
 +      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 +                      MASK_RSP_EXCEPTION_EVENT ? true : false;
 +}
 +
 +/**
 + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 + * @hba: per adapter instance
 + */
 +static inline void
 +ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, INT_AGGR_ENABLE |
 +                    INT_AGGR_COUNTER_AND_TIMER_RESET,
 +                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 + * @hba: per adapter instance
 + * @cnt: Interrupt aggregation counter threshold
 + * @tmout: Interrupt aggregation timeout value
 + */
 +static inline void
 +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 +{
 +      ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 +                    INT_AGGR_COUNTER_THLD_VAL(cnt) |
 +                    INT_AGGR_TIMEOUT_VAL(tmout),
 +                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 + * @hba: per adapter instance
 + */
 +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +}
 +
 +/**
 + * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 + *                    When run-stop registers are set to 1, it indicates the
 + *                    host controller that it can process the requests
 + * @hba: per adapter instance
 + */
 +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 +{
 +      ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 +                    REG_UTP_TASK_REQ_LIST_RUN_STOP);
 +      ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 +                    REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 +}
 +
 +/**
 + * ufshcd_hba_start - Start controller initialization sequence
 + * @hba: per adapter instance
 + */
 +static inline void ufshcd_hba_start(struct ufs_hba *hba)
 +{
 +      u32 val = CONTROLLER_ENABLE;
 +
 +      if (ufshcd_is_crypto_supported(hba))
 +              val |= CRYPTO_GENERAL_ENABLE;
 +      ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
 +}
 +
 +/**
 + * ufshcd_is_hba_active - Get controller state
 + * @hba: per adapter instance
 + *
 + * Returns zero if controller is active, 1 otherwise
 + */
 +static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 +{
 +      return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 +}
 +
 +static const char *ufschd_uic_link_state_to_string(
 +                      enum uic_link_state state)
 +{
 +      switch (state) {
 +      case UIC_LINK_OFF_STATE:        return "OFF";
 +      case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
 +      case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
 +      default:                        return "UNKNOWN";
 +      }
 +}
 +
 +static const char *ufschd_ufs_dev_pwr_mode_to_string(
 +                      enum ufs_dev_pwr_mode state)
 +{
 +      switch (state) {
 +      case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
 +      case UFS_SLEEP_PWR_MODE:        return "SLEEP";
 +      case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
 +      default:                        return "UNKNOWN";
 +      }
 +}
 +
 +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 +{
 +      /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
 +      if ((hba->ufs_version == UFSHCI_VERSION_10) ||
 +          (hba->ufs_version == UFSHCI_VERSION_11))
 +              return UFS_UNIPRO_VER_1_41;
 +      else
 +              return UFS_UNIPRO_VER_1_6;
 +}
 +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
 +
 +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 +{
 +      /*
 +       * If both host and device support UniPro ver1.6 or later, PA layer
 +       * parameters tuning happens during link startup itself.
 +       *
 +       * We can manually tune PA layer parameters if either host or device
 +       * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
 +       * logic simple, we will only do manual tuning if local unipro version
 +       * doesn't support ver1.6 or later.
 +       */
 +      if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
 +              return true;
 +      else
 +              return false;
 +}
 +
 +/**
 + * ufshcd_set_clk_freq - set UFS controller clock frequencies
 + * @hba: per adapter instance
 + * @scale_up: If True, set max possible frequency othewise set low frequency
 + *
 + * Returns 0 if successful
 + * Returns < 0 for any other errors
 + */
 +static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +      struct ufs_clk_info *clki;
 +      struct list_head *head = &hba->clk_list_head;
 +
 +      if (!head || list_empty(head))
 +              goto out;
 +
 +      list_for_each_entry(clki, head, list) {
 +              if (!IS_ERR_OR_NULL(clki->clk)) {
 +                      if (scale_up && clki->max_freq) {
 +                              if (clki->curr_freq == clki->max_freq)
 +                                      continue;
 +
 +                              ret = clk_set_rate(clki->clk, clki->max_freq);
 +                              if (ret) {
 +                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 +                                              __func__, clki->name,
 +                                              clki->max_freq, ret);
 +                                      break;
 +                              }
 +                              trace_ufshcd_clk_scaling(dev_name(hba->dev),
 +                                              "scaled up", clki->name,
 +                                              clki->curr_freq,
 +                                              clki->max_freq);
 +                              clki->curr_freq = clki->max_freq;
 +
 +                      } else if (!scale_up && clki->min_freq) {
 +                              if (clki->curr_freq == clki->min_freq)
 +                                      continue;
 +
 +                              ret = clk_set_rate(clki->clk, clki->min_freq);
 +                              if (ret) {
 +                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 +                                              __func__, clki->name,
 +                                              clki->min_freq, ret);
 +                                      break;
 +                              }
 +                              trace_ufshcd_clk_scaling(dev_name(hba->dev),
 +                                              "scaled down", clki->name,
 +                                              clki->curr_freq,
 +                                              clki->min_freq);
 +                              clki->curr_freq = clki->min_freq;
 +                      }
 +              }
 +              dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 +                              clki->name, clk_get_rate(clki->clk));
 +      }
 +
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_scale_clks - scale up or scale down UFS controller clocks
 + * @hba: per adapter instance
 + * @scale_up: True if scaling up and false if scaling down
 + *
 + * Returns 0 if successful
 + * Returns < 0 for any other errors
 + */
 +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +
 +      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 +      if (ret)
 +              return ret;
 +
 +      ret = ufshcd_set_clk_freq(hba, scale_up);
 +      if (ret)
 +              return ret;
 +
 +      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 +      if (ret) {
 +              ufshcd_set_clk_freq(hba, !scale_up);
 +              return ret;
 +      }
 +
 +      return ret;
 +}
 +
 +static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
 +{
 +      hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
 +      cancel_work_sync(&hba->clk_gating.gate_work);
 +}
 +
 +static void ufshcd_ungate_work(struct work_struct *work)
 +{
 +      int ret;
 +      unsigned long flags;
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                      clk_gating.ungate_work);
 +
 +      ufshcd_cancel_gate_work(hba);
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_gating.state == CLKS_ON) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              goto unblock_reqs;
 +      }
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_hba_vreg_set_hpm(hba);
 +      ufshcd_enable_clocks(hba);
 +
 +      /* Exit from hibern8 */
 +      if (ufshcd_can_hibern8_during_gating(hba)) {
 +              /* Prevent gating in this path */
 +              hba->clk_gating.is_suspended = true;
 +              if (ufshcd_is_link_hibern8(hba)) {
 +                      ret = ufshcd_uic_hibern8_exit(hba);
 +                      if (ret)
 +                              dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 +                                      __func__, ret);
 +                      else
 +                              ufshcd_set_link_active(hba);
 +              }
 +              hba->clk_gating.is_suspended = false;
 +      }
 +unblock_reqs:
 +      ufshcd_scsi_unblock_requests(hba);
 +}
 +
 +/**
 + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
 + * Also, exit from hibern8 mode and set the link as active.
 + * @hba: per adapter instance
 + * @async: This indicates whether caller should ungate clocks asynchronously.
 + */
 +int ufshcd_hold(struct ufs_hba *hba, bool async)
 +{
 +      int rc = 0;
 +      unsigned long flags;
 +
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              goto out;
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_gating.active_reqs++;
 +
 +      if (ufshcd_eh_in_progress(hba)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return 0;
 +      }
 +
 +start:
 +      switch (hba->clk_gating.state) {
 +      case CLKS_ON:
 +              /*
 +               * Wait for the ungate work to complete if in progress.
 +               * Though the clocks may be in ON state, the link could
 +               * still be in hibner8 state if hibern8 is allowed
 +               * during clock gating.
 +               * Make sure we exit hibern8 state also in addition to
 +               * clocks being ON.
 +               */
 +              if (ufshcd_can_hibern8_during_gating(hba) &&
 +                  ufshcd_is_link_hibern8(hba)) {
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      flush_work(&hba->clk_gating.ungate_work);
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      goto start;
 +              }
 +              break;
 +      case REQ_CLKS_OFF:
 +              /*
 +               * If the timer was active but the callback was not running
 +               * we have nothing to do, just change state and return.
 +               */
 +              if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
 +                      hba->clk_gating.state = CLKS_ON;
 +                      trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                              hba->clk_gating.state);
 +                      break;
 +              }
 +              /*
 +               * If we are here, it means gating work is either done or
 +               * currently running. Hence, fall through to cancel gating
 +               * work and to enable clocks.
 +               */
 +      case CLKS_OFF:
 +              __ufshcd_scsi_block_requests(hba);
 +              hba->clk_gating.state = REQ_CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +              queue_work(hba->clk_gating.clk_gating_workq,
 +                              &hba->clk_gating.ungate_work);
 +              /*
 +               * fall through to check if we should wait for this
 +               * work to be done or not.
 +               */
 +      case REQ_CLKS_ON:
 +              if (async) {
 +                      rc = -EAGAIN;
 +                      hba->clk_gating.active_reqs--;
 +                      break;
 +              }
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              flush_work(&hba->clk_gating.ungate_work);
 +              /* Make sure state is CLKS_ON before returning */
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              goto start;
 +      default:
 +              dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 +                              __func__, hba->clk_gating.state);
 +              break;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +out:
 +      hba->ufs_stats.clk_hold.ts = ktime_get();
 +      return rc;
 +}
 +EXPORT_SYMBOL_GPL(ufshcd_hold);
 +
 +static void ufshcd_gate_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                              clk_gating.gate_work);
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * In case you are here to cancel this work the gating state
 +       * would be marked as REQ_CLKS_ON. In this case save time by
 +       * skipping the gating work and exit after changing the clock
 +       * state to CLKS_ON.
 +       */
 +      if (hba->clk_gating.is_suspended ||
 +              (hba->clk_gating.state != REQ_CLKS_OFF)) {
 +              hba->clk_gating.state = CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +              goto rel_lock;
 +      }
 +
 +      if (hba->clk_gating.active_reqs
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done)
 +              goto rel_lock;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          hba->hibern8_on_idle.is_enabled)
 +              /*
 +               * Hibern8 enter work (on Idle) needs clocks to be ON hence
 +               * make sure that it is flushed before turning off the clocks.
 +               */
 +              flush_delayed_work(&hba->hibern8_on_idle.enter_work);
 +
 +      /* put the link into hibern8 mode before turning off clocks */
 +      if (ufshcd_can_hibern8_during_gating(hba)) {
 +              if (ufshcd_uic_hibern8_enter(hba)) {
 +                      hba->clk_gating.state = CLKS_ON;
 +                      trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                              hba->clk_gating.state);
 +                      goto out;
 +              }
 +              ufshcd_set_link_hibern8(hba);
 +      }
 +
 +      /*
 +       * If auto hibern8 is supported then the link will already
 +       * be in hibern8 state and the ref clock can be gated.
 +       */
 +      if ((ufshcd_is_auto_hibern8_supported(hba) ||
 +           !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
 +              ufshcd_disable_clocks(hba, true);
 +      else
 +              /* If link is active, device ref_clk can't be switched off */
 +              ufshcd_disable_clocks_skip_ref_clk(hba, true);
 +
 +      /* Put the host controller in low power mode if possible */
 +      ufshcd_hba_vreg_set_lpm(hba);
 +
 +      /*
 +       * In case you are here to cancel this work the gating state
 +       * would be marked as REQ_CLKS_ON. In this case keep the state
 +       * as REQ_CLKS_ON which would anyway imply that clocks are off
 +       * and a request to turn them on is pending. By doing this way,
 +       * we keep the state machine in tact and this would ultimately
 +       * prevent from doing cancel work multiple times when there are
 +       * new requests arriving before the current cancel work is done.
 +       */
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_gating.state == REQ_CLKS_OFF) {
 +              hba->clk_gating.state = CLKS_OFF;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
 +      }
 +rel_lock:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +out:
 +      return;
 +}
 +
 +/* host lock must be held before calling this variant */
 +static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +
 +      hba->clk_gating.active_reqs--;
 +
 +      if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done
 +              || ufshcd_eh_in_progress(hba) || no_sched)
 +              return;
 +
 +      hba->clk_gating.state = REQ_CLKS_OFF;
 +      trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
 +      hba->ufs_stats.clk_rel.ts = ktime_get();
 +
 +      hrtimer_start(&hba->clk_gating.gate_hrtimer,
 +                      ms_to_ktime(hba->clk_gating.delay_ms),
 +                      HRTIMER_MODE_REL);
 +}
 +
 +void ufshcd_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_release(hba, no_sched);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +EXPORT_SYMBOL_GPL(ufshcd_release);
 +
 +static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 -                      MASK_RSP_EXCEPTION_EVENT ? true : false;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
  }
  
 -/**
 - * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 - * @hba: per adapter instance
 - */
 -static inline void
 -ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      ufshcd_writel(hba, INT_AGGR_ENABLE |
 -                    INT_AGGR_COUNTER_AND_TIMER_RESET,
 -                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_gating.delay_ms = value;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
  }
  
 -/**
 - * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 - * @hba: per adapter instance
 - * @cnt: Interrupt aggregation counter threshold
 - * @tmout: Interrupt aggregation timeout value
 - */
 -static inline void
 -ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 +static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 -                    INT_AGGR_COUNTER_THLD_VAL(cnt) |
 -                    INT_AGGR_TIMEOUT_VAL(tmout),
 -                    REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n",
 +                      hba->clk_gating.delay_ms_pwr_save);
  }
  
 -/**
 - * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 - * @hba: per adapter instance
 - */
 -static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      hba->clk_gating.delay_ms_pwr_save = value;
 +      if (ufshcd_is_clkscaling_supported(hba) &&
 +          !hba->clk_scaling.is_scaled_up)
 +              hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
  }
  
 -/**
 - * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 - *                    When run-stop registers are set to 1, it indicates the
 - *                    host controller that it can process the requests
 - * @hba: per adapter instance
 - */
 -static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 -                    REG_UTP_TASK_REQ_LIST_RUN_STOP);
 -      ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 -                    REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
  }
  
 -/**
 - * ufshcd_hba_start - Start controller initialization sequence
 - * @hba: per adapter instance
 - */
 -static inline void ufshcd_hba_start(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      hba->clk_gating.delay_ms_perf = value;
 +      if (ufshcd_is_clkscaling_supported(hba) &&
 +          hba->clk_scaling.is_scaled_up)
 +              hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
 +
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
  }
  
 -/**
 - * ufshcd_is_hba_active - Get controller state
 - * @hba: per adapter instance
 - *
 - * Returns zero if controller is active, 1 otherwise
 - */
 -static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 +static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
  }
  
 -static void ufshcd_ungate_work(struct work_struct *work)
 +static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
  {
 -      int ret;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags;
 -      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 -                      clk_gating.ungate_work);
 +      u32 value;
  
 -      cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
  
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.state == CLKS_ON) {
 +      value = !!value;
 +      if (value == hba->clk_gating.is_enabled)
 +              goto out;
 +
 +      if (value) {
 +              ufshcd_release(hba, false);
 +      } else {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->clk_gating.active_reqs++;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              goto unblock_reqs;
        }
  
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      ufshcd_setup_clocks(hba, true);
 +      hba->clk_gating.is_enabled = value;
 +out:
 +      return count;
 +}
  
 -      /* Exit from hibern8 */
 -      if (ufshcd_can_hibern8_during_gating(hba)) {
 -              /* Prevent gating in this path */
 -              hba->clk_gating.is_suspended = true;
 -              if (ufshcd_is_link_hibern8(hba)) {
 -                      ret = ufshcd_uic_hibern8_exit(hba);
 -                      if (ret)
 -                              dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 -                                      __func__, ret);
 -                      else
 -                              ufshcd_set_link_active(hba);
 -              }
 -              hba->clk_gating.is_suspended = false;
 +static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
 +                                      struct hrtimer *timer)
 +{
 +      struct ufs_hba *hba = container_of(timer, struct ufs_hba,
 +                                         clk_gating.gate_hrtimer);
 +
 +      queue_work(hba->clk_gating.clk_gating_workq,
 +                              &hba->clk_gating.gate_work);
 +
 +      return HRTIMER_NORESTART;
 +}
 +
 +static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 +{
 +      struct ufs_clk_gating *gating = &hba->clk_gating;
 +      char wq_name[sizeof("ufs_clk_gating_00")];
 +
 +      hba->clk_gating.state = CLKS_ON;
 +
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +
 +      /*
 +       * Disable hibern8 during clk gating if
 +       * auto hibern8 is supported
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba))
 +              hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 +
 +      INIT_WORK(&gating->gate_work, ufshcd_gate_work);
 +      INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
 +      /*
 +       * Clock gating work must be executed only after auto hibern8
 +       * timeout has expired in the hardware or after aggressive
 +       * hibern8 on idle software timeout. Using jiffy based low
 +       * resolution delayed work is not reliable to guarantee this,
 +       * hence use a high resolution timer to make sure we schedule
 +       * the gate work precisely more than hibern8 timeout.
 +       *
 +       * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
 +       */
 +      hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +      gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
 +
 +      snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
 +                      hba->host->host_no);
 +      hba->clk_gating.clk_gating_workq =
 +              create_singlethread_workqueue(wq_name);
 +
 +      gating->is_enabled = true;
 +
 +      gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
 +      gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
 +
 +      /* start with performance mode */
 +      gating->delay_ms = gating->delay_ms_perf;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              goto scaling_not_supported;
 +
 +      gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
 +      gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
 +      sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
 +      gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
 +      gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
 +
 +      gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
 +      gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
 +      sysfs_attr_init(&gating->delay_perf_attr.attr);
 +      gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
 +      gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->delay_perf_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
 +
 +      goto add_clkgate_enable;
 +
 +scaling_not_supported:
 +      hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 +      hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 +      sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
 +      hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
 +      hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
 +
 +add_clkgate_enable:
 +      gating->enable_attr.show = ufshcd_clkgate_enable_show;
 +      gating->enable_attr.store = ufshcd_clkgate_enable_store;
 +      sysfs_attr_init(&gating->enable_attr.attr);
 +      gating->enable_attr.attr.name = "clkgate_enable";
 +      gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &gating->enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
 +}
 +
 +static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 +{
 +      if (!ufshcd_is_clkgating_allowed(hba))
 +              return;
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev,
 +                                 &hba->clk_gating.delay_pwr_save_attr);
 +              device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
 +      } else {
 +              device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
        }
 -unblock_reqs:
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 -      scsi_unblock_requests(hba->host);
 +      device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
 +      ufshcd_cancel_gate_work(hba);
 +      cancel_work_sync(&hba->clk_gating.ungate_work);
 +      destroy_workqueue(hba->clk_gating.clk_gating_workq);
 +}
 +
 +static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
 +{
 +      ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
 +                       AUTO_HIBERN8_IDLE_TIMER_MASK,
 +                      AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
 +                      REG_AUTO_HIBERN8_IDLE_TIMER);
 +      /* Make sure the timer gets applied before further operations */
 +      mb();
  }
  
  /**
 - * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
 - * Also, exit from hibern8 mode and set the link as active.
 + * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
 + *
   * @hba: per adapter instance
 - * @async: This indicates whether caller should ungate clocks asynchronously.
 + * @async: This indicates whether caller wants to exit hibern8 asynchronously.
 + *
 + * Exit from hibern8 mode and set the link as active.
 + *
 + * Return 0 on success, non-zero on failure.
   */
 -int ufshcd_hold(struct ufs_hba *hba, bool async)
 +static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
  {
        int rc = 0;
        unsigned long flags;
  
 -      if (!ufshcd_is_clkgating_allowed(hba))
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba))
                goto out;
 +
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->clk_gating.active_reqs++;
 +      hba->hibern8_on_idle.active_reqs++;
 +
 +      if (ufshcd_eh_in_progress(hba)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return 0;
 +      }
  
  start:
 -      switch (hba->clk_gating.state) {
 -      case CLKS_ON:
 -              /*
 -               * Wait for the ungate work to complete if in progress.
 -               * Though the clocks may be in ON state, the link could
 -               * still be in hibner8 state if hibern8 is allowed
 -               * during clock gating.
 -               * Make sure we exit hibern8 state also in addition to
 -               * clocks being ON.
 -               */
 -              if (ufshcd_can_hibern8_during_gating(hba) &&
 -                  ufshcd_is_link_hibern8(hba)) {
 -                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -                      flush_work(&hba->clk_gating.ungate_work);
 -                      spin_lock_irqsave(hba->host->host_lock, flags);
 -                      goto start;
 -              }
 +      switch (hba->hibern8_on_idle.state) {
 +      case HIBERN8_EXITED:
                break;
 -      case REQ_CLKS_OFF:
 -              if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 -                      hba->clk_gating.state = CLKS_ON;
 +      case REQ_HIBERN8_ENTER:
 +              if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +                      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                              hba->hibern8_on_idle.state);
                        break;
                }
                /*
 -               * If we here, it means gating work is either done or
 -               * currently running. Hence, fall through to cancel gating
 -               * work and to enable clocks.
 +               * If we here, it means Hibern8 enter work is either done or
 +               * currently running. Hence, fall through to cancel hibern8
 +               * work and exit hibern8.
                 */
 -      case CLKS_OFF:
 -              scsi_block_requests(hba->host);
 -              hba->clk_gating.state = REQ_CLKS_ON;
 -              schedule_work(&hba->clk_gating.ungate_work);
 +      case HIBERN8_ENTERED:
 +              __ufshcd_scsi_block_requests(hba);
 +              hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +              schedule_work(&hba->hibern8_on_idle.exit_work);
                /*
                 * fall through to check if we should wait for this
                 * work to be done or not.
                 */
 -      case REQ_CLKS_ON:
 +      case REQ_HIBERN8_EXIT:
                if (async) {
                        rc = -EAGAIN;
 -                      hba->clk_gating.active_reqs--;
 +                      hba->hibern8_on_idle.active_reqs--;
                        break;
 +              } else {
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      flush_work(&hba->hibern8_on_idle.exit_work);
 +                      /* Make sure state is HIBERN8_EXITED before returning */
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      goto start;
                }
 -
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              flush_work(&hba->clk_gating.ungate_work);
 -              /* Make sure state is CLKS_ON before returning */
 -              spin_lock_irqsave(hba->host->host_lock, flags);
 -              goto start;
        default:
 -              dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 -                              __func__, hba->clk_gating.state);
 +              dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
 +                              __func__, hba->hibern8_on_idle.state);
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 -out:
 -      return rc;
 +out:
 +      return rc;
 +}
 +
 +/* host lock must be held before calling this variant */
 +static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long delay_in_jiffies;
 +
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba))
 +              return;
 +
 +      hba->hibern8_on_idle.active_reqs--;
 +      BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
 +
 +      if (hba->hibern8_on_idle.active_reqs
 +              || hba->hibern8_on_idle.is_suspended
 +              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 +              || hba->lrb_in_use || hba->outstanding_tasks
 +              || hba->active_uic_cmd || hba->uic_async_done
 +              || ufshcd_eh_in_progress(hba) || no_sched)
 +              return;
 +
 +      hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
 +      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +              hba->hibern8_on_idle.state);
 +      /*
 +       * Scheduling the delayed work after 1 jiffies will make the work to
 +       * get schedule any time from 0ms to 1000/HZ ms which is not desirable
 +       * for hibern8 enter work as it may impact the performance if it gets
 +       * scheduled almost immediately. Hence make sure that hibern8 enter
 +       * work gets scheduled atleast after 2 jiffies (any time between
 +       * 1000/HZ ms to 2000/HZ ms).
 +       */
 +      delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
 +      if (delay_in_jiffies == 1)
 +              delay_in_jiffies++;
 +
 +      schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
 +                            delay_in_jiffies);
 +}
 +
 +static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      __ufshcd_hibern8_release(hba, no_sched);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
  }
 -EXPORT_SYMBOL_GPL(ufshcd_hold);
  
 -static void ufshcd_gate_work(struct work_struct *work)
 +static void ufshcd_hibern8_enter_work(struct work_struct *work)
  {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
 -                      clk_gating.gate_work.work);
 +                                         hibern8_on_idle.enter_work.work);
        unsigned long flags;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.is_suspended) {
 -              hba->clk_gating.state = CLKS_ON;
 +      if (hba->hibern8_on_idle.is_suspended) {
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
                goto rel_lock;
        }
  
 -      if (hba->clk_gating.active_reqs
 +      if (hba->hibern8_on_idle.active_reqs
                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
                || hba->lrb_in_use || hba->outstanding_tasks
                || hba->active_uic_cmd || hba->uic_async_done)
  
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 -      /* put the link into hibern8 mode before turning off clocks */
 -      if (ufshcd_can_hibern8_during_gating(hba)) {
 -              if (ufshcd_uic_hibern8_enter(hba)) {
 -                      hba->clk_gating.state = CLKS_ON;
 -                      goto out;
 -              }
 -              ufshcd_set_link_hibern8(hba);
 -      }
 -
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 +      if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
 +              /* Enter failed */
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +              goto out;
        }
 -
 -      if (!ufshcd_is_link_active(hba))
 -              ufshcd_setup_clocks(hba, false);
 -      else
 -              /* If link is active, device ref_clk can't be switched off */
 -              __ufshcd_setup_clocks(hba, false, true);
 +      ufshcd_set_link_hibern8(hba);
  
        /*
 -       * In case you are here to cancel this work the gating state
 -       * would be marked as REQ_CLKS_ON. In this case keep the state
 -       * as REQ_CLKS_ON which would anyway imply that clocks are off
 -       * and a request to turn them on is pending. By doing this way,
 +       * In case you are here to cancel this work the hibern8_on_idle.state
 +       * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
 +       * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
 +       * and a request to exit from it is pending. By doing this way,
         * we keep the state machine in tact and this would ultimately
         * prevent from doing cancel work multiple times when there are
         * new requests arriving before the current cancel work is done.
         */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->clk_gating.state == REQ_CLKS_OFF)
 -              hba->clk_gating.state = CLKS_OFF;
 -
 +      if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 +              trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                      hba->hibern8_on_idle.state);
 +      }
  rel_lock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
        return;
  }
  
 -/* host lock must be held before calling this variant */
 -static void __ufshcd_release(struct ufs_hba *hba)
 +static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
 +                                          unsigned long delay_ms)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 -              return;
 -
 -      hba->clk_gating.active_reqs--;
 -
 -      if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 -              || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 -              || hba->lrb_in_use || hba->outstanding_tasks
 -              || hba->active_uic_cmd || hba->uic_async_done)
 -              return;
 -
 -      hba->clk_gating.state = REQ_CLKS_OFF;
 -      schedule_delayed_work(&hba->clk_gating.gate_work,
 -                      msecs_to_jiffies(hba->clk_gating.delay_ms));
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold_all(hba);
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      /* wait for all the outstanding requests to finish */
 +      ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      ufshcd_set_auto_hibern8_timer(hba, delay_ms);
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
 +      ufshcd_release_all(hba);
 +      pm_runtime_put_sync(hba->dev);
  }
  
 -void ufshcd_release(struct ufs_hba *hba)
 +static void ufshcd_hibern8_exit_work(struct work_struct *work)
  {
 +      int ret;
        unsigned long flags;
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         hibern8_on_idle.exit_work);
 +
 +      cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      __ufshcd_release(hba);
 +      if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
 +           || ufshcd_is_link_active(hba)) {
 +              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              goto unblock_reqs;
 +      }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /* Exit from hibern8 */
 +      if (ufshcd_is_link_hibern8(hba)) {
 +              hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
 +              ufshcd_hold(hba, false);
 +              ret = ufshcd_uic_hibern8_exit(hba);
 +              hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
 +              ufshcd_release(hba, false);
 +              if (!ret) {
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +                      ufshcd_set_link_active(hba);
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +                      trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
 +                              hba->hibern8_on_idle.state);
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              }
 +      }
 +unblock_reqs:
 +      ufshcd_scsi_unblock_requests(hba);
  }
 -EXPORT_SYMBOL_GPL(ufshcd_release);
  
 -static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 +static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
        struct ufs_hba *hba = dev_get_drvdata(dev);
  
 -      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
 +      return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
  }
  
 -static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
 +static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
  {
        struct ufs_hba *hba = dev_get_drvdata(dev);
        unsigned long flags, value;
 +      bool change = true;
  
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      hba->clk_gating.delay_ms = value;
 +      if (hba->hibern8_on_idle.delay_ms == value)
 +              change = false;
 +
 +      if (value >= hba->clk_gating.delay_ms_pwr_save ||
 +          value >= hba->clk_gating.delay_ms_perf) {
 +              dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
 +                      value, hba->clk_gating.delay_ms_pwr_save,
 +                      hba->clk_gating.delay_ms_perf);
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              return -EINVAL;
 +      }
 +
 +      hba->hibern8_on_idle.delay_ms = value;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /* Update auto hibern8 timer value if supported */
 +      if (change && ufshcd_is_auto_hibern8_supported(hba) &&
 +          hba->hibern8_on_idle.is_enabled)
 +              __ufshcd_set_auto_hibern8_timer(hba,
 +                                              hba->hibern8_on_idle.delay_ms);
 +
        return count;
  }
  
 -static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 +static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 -              return;
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
  
 -      hba->clk_gating.delay_ms = 150;
 -      INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
 -      INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 +      return snprintf(buf, PAGE_SIZE, "%d\n",
 +                      hba->hibern8_on_idle.is_enabled);
 +}
  
 -      hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 -      hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 -      sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
 -      hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
 -      hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 -      if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 -              dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
 +static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags;
 +      u32 value;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->hibern8_on_idle.is_enabled)
 +              goto out;
 +
 +      /* Update auto hibern8 timer value if supported */
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              __ufshcd_set_auto_hibern8_timer(hba,
 +                      value ? hba->hibern8_on_idle.delay_ms : value);
 +              goto update;
 +      }
 +
 +      if (value) {
 +              /*
 +               * As clock gating work would wait for the hibern8 enter work
 +               * to finish, clocks would remain on during hibern8 enter work.
 +               */
 +              ufshcd_hold(hba, false);
 +              ufshcd_release_all(hba);
 +      } else {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->hibern8_on_idle.active_reqs++;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      }
 +
 +update:
 +      hba->hibern8_on_idle.is_enabled = value;
 +out:
 +      return count;
  }
  
 -static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 +static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
  {
 -      if (!ufshcd_is_clkgating_allowed(hba))
 +      /* initialize the state variable here */
 +      hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          !ufshcd_is_auto_hibern8_supported(hba))
                return;
 -      device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
 -      cancel_work_sync(&hba->clk_gating.ungate_work);
 -      cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 +
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              hba->hibern8_on_idle.delay_ms = 1;
 +              hba->hibern8_on_idle.state = AUTO_HIBERN8;
 +              /*
 +               * Disable SW hibern8 enter on idle in case
 +               * auto hibern8 is supported
 +               */
 +              hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
 +      } else {
 +              hba->hibern8_on_idle.delay_ms = 10;
 +              INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
 +                                ufshcd_hibern8_enter_work);
 +              INIT_WORK(&hba->hibern8_on_idle.exit_work,
 +                        ufshcd_hibern8_exit_work);
 +      }
 +
 +      hba->hibern8_on_idle.is_enabled = true;
 +
 +      hba->hibern8_on_idle.delay_attr.show =
 +                                      ufshcd_hibern8_on_idle_delay_show;
 +      hba->hibern8_on_idle.delay_attr.store =
 +                                      ufshcd_hibern8_on_idle_delay_store;
 +      sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
 +      hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
 +      hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
 +
 +      hba->hibern8_on_idle.enable_attr.show =
 +                                      ufshcd_hibern8_on_idle_enable_show;
 +      hba->hibern8_on_idle.enable_attr.store =
 +                                      ufshcd_hibern8_on_idle_enable_store;
 +      sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
 +      hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
 +      hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
 +}
 +
 +static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
 +{
 +      if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
 +          !ufshcd_is_auto_hibern8_supported(hba))
 +              return;
 +      device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
 +      device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
 +}
 +
 +static void ufshcd_hold_all(struct ufs_hba *hba)
 +{
 +      ufshcd_hold(hba, false);
 +      ufshcd_hibern8_hold(hba, false);
 +}
 +
 +static void ufshcd_release_all(struct ufs_hba *hba)
 +{
 +      ufshcd_hibern8_release(hba, false);
 +      ufshcd_release(hba, false);
  }
  
  /* Must be called with host lock acquired */
  static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
  {
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      bool queue_resume_work = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      if (!hba->clk_scaling.active_reqs++)
 +              queue_resume_work = true;
 +
 +      if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
                return;
  
 +      if (queue_resume_work)
 +              queue_work(hba->clk_scaling.workq,
 +                         &hba->clk_scaling.resume_work);
 +
 +      if (!hba->clk_scaling.window_start_t) {
 +              hba->clk_scaling.window_start_t = jiffies;
 +              hba->clk_scaling.tot_busy_t = 0;
 +              hba->clk_scaling.is_busy_started = false;
 +      }
 +
        if (!hba->clk_scaling.is_busy_started) {
                hba->clk_scaling.busy_start_t = ktime_get();
                hba->clk_scaling.is_busy_started = true;
@@@ -2319,7 -796,7 +2319,7 @@@ static void ufshcd_clk_scaling_update_b
  {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return;
  
        if (!hba->outstanding_reqs && scaling->is_busy_started) {
                scaling->is_busy_started = false;
        }
  }
 +
  /**
   * ufshcd_send_command - Send SCSI or device management commands
   * @hba: per adapter instance
   * @task_tag: Task tag of the command
   */
  static inline
 -void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 +int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  {
 +      int ret = 0;
 +
 +      hba->lrb[task_tag].issue_time_stamp = ktime_get();
 +      hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      /* Make sure that doorbell is committed immediately */
 +      wmb();
 +      ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
 +      ufshcd_update_tag_stats(hba, task_tag);
 +      return ret;
  }
  
  /**
@@@ -2368,7 -835,7 +2368,7 @@@ static inline void ufshcd_copy_sense_da
  
                memcpy(lrbp->sense_buffer,
                        lrbp->ucd_rsp_ptr->sr.sense_data,
 -                      min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
 +                      min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
        }
  }
  
@@@ -2423,9 -890,6 +2423,9 @@@ static inline void ufshcd_hba_capabilit
        hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
        hba->nutmrs =
        ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
 +
 +      /* disable auto hibern8 */
 +      hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
  }
  
  /**
@@@ -2468,7 -932,6 +2468,7 @@@ ufshcd_dispatch_uic_cmd(struct ufs_hba 
  
        hba->active_uic_cmd = uic_cmd;
  
 +      ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
        /* Write Args */
        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@@ -2499,11 -962,6 +2499,11 @@@ ufshcd_wait_for_uic_cmd(struct ufs_hba 
        else
                ret = -ETIMEDOUT;
  
 +      if (ret)
 +              ufsdbg_set_err_state(hba);
 +
 +      ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
 +
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->active_uic_cmd = NULL;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
   * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
   * @hba: per adapter instance
   * @uic_cmd: UIC command
 + * @completion: initialize the completion only if this is set to true
   *
   * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
   * with mutex held and host_lock locked.
   * Returns 0 only if success.
   */
  static int
 -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
 +                    bool completion)
  {
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
                return -EIO;
        }
  
 -      init_completion(&uic_cmd->done);
 +      if (completion)
 +              init_completion(&uic_cmd->done);
  
        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  
@@@ -2552,25 -1007,19 +2552,25 @@@ ufshcd_send_uic_cmd(struct ufs_hba *hba
        int ret;
        unsigned long flags;
  
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->uic_cmd_mutex);
        ufshcd_add_delay_before_dme_cmd(hba);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
 +      ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (!ret)
                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  
 +      ufshcd_save_tstamp_of_last_dme_cmd(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 +      ufshcd_release_all(hba);
 +      hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_UIC, 0, &ret);
  
 -      ufshcd_release(hba);
        return ret;
  }
  
@@@ -2606,7 -1055,6 +2606,7 @@@ static int ufshcd_map_sg(struct ufshcd_
                                cpu_to_le32(lower_32_bits(sg->dma_address));
                        prd_table[i].upper_addr =
                                cpu_to_le32(upper_32_bits(sg->dma_address));
 +                      prd_table[i].reserved = 0;
                }
        } else {
                lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@@ -2657,52 -1105,15 +2657,52 @@@ static void ufshcd_disable_intr(struct 
        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  }
  
 +static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
 +              struct ufshcd_lrb *lrbp)
 +{
 +      struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
 +      u8 cc_index = 0;
 +      bool enable = false;
 +      u64 dun = 0;
 +      int ret;
 +
 +      /*
 +       * Call vendor specific code to get crypto info for this request:
 +       * enable, crypto config. index, DUN.
 +       * If bypass is set, don't bother setting the other fields.
 +       */
 +      ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
 +      if (ret) {
 +              if (ret != -EAGAIN) {
 +                      dev_err(hba->dev,
 +                              "%s: failed to setup crypto request (%d)\n",
 +                              __func__, ret);
 +              }
 +
 +              return ret;
 +      }
 +
 +      if (!enable)
 +              goto out;
 +
 +      req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
 +      req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
 +      req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
 +out:
 +      return 0;
 +}
 +
  /**
   * ufshcd_prepare_req_desc_hdr() - Fills the requests header
   * descriptor according to request
 + * @hba: per adapter instance
   * @lrbp: pointer to local reference block
   * @upiu_flags: flags required in the header
   * @cmd_dir: requests data direction
   */
 -static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 -              u32 *upiu_flags, enum dma_data_direction cmd_dir)
 +static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
 +      struct ufshcd_lrb *lrbp, u32 *upiu_flags,
 +      enum dma_data_direction cmd_dir)
  {
        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
        u32 data_direction;
  
        /* Transfer request descriptor header fields */
        req_desc->header.dword_0 = cpu_to_le32(dword_0);
 -
 +      /* dword_1 is reserved, hence it is set to 0 */
 +      req_desc->header.dword_1 = 0;
        /*
         * assigning invalid value for command status. Controller
         * updates OCS on command completion, with the command
         */
        req_desc->header.dword_2 =
                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
 +      /* dword_3 is reserved, hence it is set to 0 */
 +      req_desc->header.dword_3 = 0;
 +
 +      req_desc->prd_table_length = 0;
 +
 +      if (ufshcd_is_crypto_supported(hba))
 +              return ufshcd_prepare_crypto_utrd(hba, lrbp);
 +
 +      return 0;
  }
  
  /**
@@@ -2756,7 -1157,6 +2756,7 @@@ stati
  void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
  {
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
 +      unsigned short cdb_len;
  
        /* command descriptor fields */
        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
        ucd_req_ptr->sc.exp_data_transfer_len =
                cpu_to_be32(lrbp->cmd->sdb.length);
  
 -      memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
 -              (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
 +      cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
 +      memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
 +      if (cdb_len < MAX_CDB_SIZE)
 +              memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
 +                     (MAX_CDB_SIZE - cdb_len));
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  /**
@@@ -2813,7 -1209,6 +2813,7 @@@ static void ufshcd_prepare_utp_query_re
        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
                memcpy(descp, query->descriptor, len);
  
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
        ucd_req_ptr->header.dword_0 =
                UPIU_HEADER_DWORD(
                        UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
 +      /* clear rest of the fields of basic header */
 +      ucd_req_ptr->header.dword_1 = 0;
 +      ucd_req_ptr->header.dword_2 = 0;
 +
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  }
  
  /**
@@@ -2846,16 -1236,15 +2846,16 @@@ static int ufshcd_compose_upiu(struct u
        switch (lrbp->command_type) {
        case UTP_CMD_TYPE_SCSI:
                if (likely(lrbp->cmd)) {
 -                      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
 -                                      lrbp->cmd->sc_data_direction);
 +                      ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
 +                              &upiu_flags, lrbp->cmd->sc_data_direction);
                        ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
                } else {
                        ret = -EINVAL;
                }
                break;
        case UTP_CMD_TYPE_DEV_MANAGE:
 -              ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
 +              ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
 +                      DMA_NONE);
                if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
                        ufshcd_prepare_utp_query_req_upiu(
                                        hba, lrbp, upiu_flags);
@@@ -2907,61 -1296,6 +2907,61 @@@ static inline u16 ufshcd_upiu_wlun_to_s
  }
  
  /**
 + * ufshcd_get_write_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Lock is predominantly held by shutdown context thus, ensuring
 + * that no requests from any other context may sneak through.
 + */
 +static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
 +{
 +      down_write(&hba->lock);
 +}
 +
 +/**
 + * ufshcd_get_read_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Returns 1 if acquired, < 0 on contention
 + *
 + * After shutdown's initiated, allow requests only directed to the
 + * well known device lun. The sync between scaling & issue is maintained
 + * as is and this restructuring syncs shutdown with these too.
 + */
 +static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
 +{
 +      int err = 0;
 +
 +      err = down_read_trylock(&hba->lock);
 +      if (err > 0)
 +              goto out;
 +      /* let requests for well known device lun to go through */
 +      if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
 +              return 0;
 +      else if (!ufshcd_is_shutdown_ongoing(hba))
 +              return -EAGAIN;
 +      else
 +              return -EPERM;
 +
 +out:
 +      return err;
 +}
 +
 +/**
 + * ufshcd_put_read_lock - synchronize between shutdown, scaling &
 + * arrival of requests
 + * @hba: ufs host
 + *
 + * Returns none
 + */
 +static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
 +{
 +      up_read(&hba->lock);
 +}
 +
 +/**
   * ufshcd_queuecommand - main entry point for SCSI requests
   * @cmd: command from SCSI Midlayer
   * @done: call back function
@@@ -2975,42 -1309,12 +2975,42 @@@ static int ufshcd_queuecommand(struct S
        unsigned long flags;
        int tag;
        int err = 0;
 +      bool has_read_lock = false;
  
        hba = shost_priv(host);
  
 +      if (!cmd || !cmd->request || !hba)
 +              return -EINVAL;
 +
        tag = cmd->request->tag;
 +      if (!ufshcd_valid_tag(hba, tag)) {
 +              dev_err(hba->dev,
 +                      "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
 +                      __func__, tag, cmd, cmd->request);
 +              BUG();
 +      }
 +
 +      err = ufshcd_get_read_lock(hba, cmd->device->lun);
 +      if (unlikely(err < 0)) {
 +              if (err == -EPERM) {
 +                      set_host_byte(cmd, DID_ERROR);
 +                      cmd->scsi_done(cmd);
 +                      return 0;
 +              }
 +              if (err == -EAGAIN)
 +                      return SCSI_MLQUEUE_HOST_BUSY;
 +      } else if (err == 1) {
 +              has_read_lock = true;
 +      }
  
        spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +      /* if error handling is in progress, return host busy */
 +      if (ufshcd_eh_in_progress(hba)) {
 +              err = SCSI_MLQUEUE_HOST_BUSY;
 +              goto out_unlock;
 +      }
 +
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
                break;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      hba->req_abort_count = 0;
 +
        /* acquire the tag to make sure device cmds don't use it */
        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
                /*
                goto out;
        }
  
 +      hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
        err = ufshcd_hold(hba, true);
        if (err) {
                err = SCSI_MLQUEUE_HOST_BUSY;
                clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
 +      if (ufshcd_is_clkgating_allowed(hba))
 +              WARN_ON(hba->clk_gating.state != CLKS_ON);
 +
 +      err = ufshcd_hibern8_hold(hba, true);
 +      if (err) {
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              err = SCSI_MLQUEUE_HOST_BUSY;
 +              hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
 +              ufshcd_release(hba, true);
 +              goto out;
 +      }
 +      if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +              WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
 +
 +      /* Vote PM QoS for the request */
 +      ufshcd_vops_pm_qos_req_start(hba, cmd->request);
 +
 +      /* IO svc time latency histogram */
 +      if (hba->latency_hist_enabled &&
 +          (cmd->request->cmd_type == REQ_TYPE_FS)) {
 +              cmd->request->lat_hist_io_start = ktime_get();
 +              cmd->request->lat_hist_enabled = 1;
 +      } else {
 +              cmd->request->lat_hist_enabled = 0;
 +      }
 +
        WARN_ON(hba->clk_gating.state != CLKS_ON);
  
        lrbp = &hba->lrb[tag];
  
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
 -      lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
 +      lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
        lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
        lrbp->command_type = UTP_CMD_TYPE_SCSI;
 +      lrbp->req_abort_skip = false;
 +
 +      /* form UPIU before issuing the command */
 +      err = ufshcd_compose_upiu(hba, lrbp);
 +      if (err) {
 +              if (err != -EAGAIN)
 +                      dev_err(hba->dev,
 +                              "%s: failed to compose upiu %d\n",
 +                              __func__, err);
 +
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              goto out;
 +      }
 +
 +      err = ufshcd_map_sg(lrbp);
 +      if (err) {
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              goto out;
 +      }
 +
 +      err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
 +      if (err) {
 +              if (err != -EAGAIN)
 +                      dev_err(hba->dev,
 +                              "%s: failed to configure crypto engine %d\n",
 +                              __func__, err);
 +
 +              scsi_dma_unmap(lrbp->cmd);
 +              lrbp->cmd = NULL;
 +              clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +
 +              goto out;
 +      }
 +
 +      /* Make sure descriptors are ready before ringing the doorbell */
 +      wmb();
 +      /* issue command to the controller */
 +      spin_lock_irqsave(hba->host->host_lock, flags);
  
 -      /* form UPIU before issuing the command */
 -      ufshcd_compose_upiu(hba, lrbp);
 -      err = ufshcd_map_sg(lrbp);
 +      err = ufshcd_send_command(hba, tag);
        if (err) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              scsi_dma_unmap(lrbp->cmd);
                lrbp->cmd = NULL;
                clear_bit_unlock(tag, &hba->lrb_in_use);
 +              ufshcd_release_all(hba);
 +              ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 +              ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
 +              dev_err(hba->dev, "%s: failed sending command, %d\n",
 +                                                      __func__, err);
 +              err = DID_ERROR;
                goto out;
        }
  
 -      /* issue command to the controller */
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_send_command(hba, tag);
  out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  out:
 +      if (has_read_lock)
 +              ufshcd_put_read_lock(hba);
        return err;
  }
  
@@@ -3192,7 -1416,7 +3192,7 @@@ ufshcd_clear_cmd(struct ufs_hba *hba, i
         */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TRANSFER_REQ_DOOR_BELL,
 -                      mask, ~mask, 1000, 1000);
 +                      mask, ~mask, 1000, 1000, true);
  
        return err;
  }
@@@ -3219,7 -1443,6 +3219,7 @@@ ufshcd_dev_cmd_completion(struct ufs_hb
        int resp;
        int err = 0;
  
 +      hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  
        switch (resp) {
@@@ -3272,22 -1495,11 +3272,22 @@@ static int ufshcd_wait_for_dev_cmd(stru
  
        if (!time_left) {
                err = -ETIMEDOUT;
 +              dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
 +                      __func__, lrbp->task_tag);
                if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
 -                      /* sucessfully cleared the command, retry if needed */
 +                      /* successfully cleared the command, retry if needed */
                        err = -EAGAIN;
 +              /*
 +               * in case of an error, after clearing the doorbell,
 +               * we also need to clear the outstanding_request
 +               * field in hba
 +               */
 +              ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
        }
  
 +      if (err)
 +              ufsdbg_set_err_state(hba);
 +
        return err;
  }
  
@@@ -3348,15 -1560,6 +3348,15 @@@ static int ufshcd_exec_dev_cmd(struct u
        unsigned long flags;
  
        /*
 +       * May get invoked from shutdown and IOCTL contexts.
 +       * In shutdown context, it comes in with lock acquired.
 +       * In error recovery context, it may come with lock acquired.
 +       */
 +
 +      if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 +              down_read(&hba->lock);
 +
 +      /*
         * Get free slot, sleep if slots are unavailable.
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by SCSI request timeout.
  
        hba->dev_cmd.complete = &wait;
  
 +      /* Make sure descriptors are ready before ringing the doorbell */
 +      wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_send_command(hba, tag);
 +      err = ufshcd_send_command(hba, tag);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 -
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed sending command, %d\n",
 +                                                      __func__, err);
 +              goto out_put_tag;
 +      }
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
  
  out_put_tag:
        ufshcd_put_dev_cmd_tag(hba, tag);
        wake_up(&hba->dev_cmd.tag_wq);
 +      if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 +              up_read(&hba->lock);
        return err;
  }
  
@@@ -3406,12 -1601,6 +3406,12 @@@ static inline void ufshcd_init_query(st
                struct ufs_query_req **request, struct ufs_query_res **response,
                enum query_opcode opcode, u8 idn, u8 index, u8 selector)
  {
 +      int idn_t = (int)idn;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
 +      idn = idn_t;
 +
        *request = &hba->dev_cmd.query.request;
        *response = &hba->dev_cmd.query.response;
        memset(*request, 0, sizeof(struct ufs_query_req));
        (*request)->upiu_req.idn = idn;
        (*request)->upiu_req.index = index;
        (*request)->upiu_req.selector = selector;
 +
 +      ufshcd_update_query_stats(hba, opcode, idn);
 +}
 +
 +static int ufshcd_query_flag_retry(struct ufs_hba *hba,
 +      enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
 +{
 +      int ret;
 +      int retries;
 +
 +      for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
 +              ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
 +              if (ret)
 +                      dev_dbg(hba->dev,
 +                              "%s: failed with error %d, retries %d\n",
 +                              __func__, ret, retries);
 +              else
 +                      break;
 +      }
 +
 +      if (ret)
 +              dev_err(hba->dev,
 +                      "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
 +                      __func__, opcode, idn, ret, retries);
 +      return ret;
  }
  
  /**
   *
   * Returns 0 for success, non-zero in case of failure
   */
 -static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
                        enum flag_idn idn, bool *flag_res)
  {
        struct ufs_query_req *request = NULL;
        struct ufs_query_res *response = NULL;
        int err, index = 0, selector = 0;
 +      int timeout = QUERY_REQ_TIMEOUT;
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
                        selector);
                goto out_unlock;
        }
  
 -      err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 +      err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
  
        if (err) {
                dev_err(hba->dev,
                        "%s: Sending flag query for idn %d failed, err = %d\n",
 -                      __func__, idn, err);
 +                      __func__, request->upiu_req.idn, err);
                goto out_unlock;
        }
  
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_flag);
  
  /**
   * ufshcd_query_attr - API function for sending attribute requests
   *
   * Returns 0 for success, non-zero in case of failure
  */
 -static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                        enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  {
        struct ufs_query_req *request = NULL;
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        if (!attr_val) {
                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
                                __func__, opcode);
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
 -              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
 -                              __func__, opcode, idn, err);
 +              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
 +                              __func__, opcode,
 +                              request->upiu_req.idn, index, err);
                goto out_unlock;
        }
  
  out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
  out:
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_attr);
  
  /**
 - * ufshcd_query_descriptor - API function for sending descriptor requests
 - * hba: per-adapter instance
 - * opcode: attribute opcode
 - * idn: attribute idn to access
 - * index: index field
 - * selector: selector field
 - * desc_buf: the buffer that contains the descriptor
 - * buf_len: length parameter passed to the device
 + * ufshcd_query_attr_retry() - API function for sending query
 + * attribute with retries
 + * @hba: per-adapter instance
 + * @opcode: attribute opcode
 + * @idn: attribute idn to access
 + * @index: index field
 + * @selector: selector field
 + * @attr_val: the attribute value after the query request
 + * completes
   *
 - * Returns 0 for success, non-zero in case of failure.
 - * The buf_len parameter will contain, on return, the length parameter
 - * received on the response.
 - */
 -static int ufshcd_query_descriptor(struct ufs_hba *hba,
 + * Returns 0 for success, non-zero in case of failure
 +*/
 +static int ufshcd_query_attr_retry(struct ufs_hba *hba,
 +      enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
 +      u32 *attr_val)
 +{
 +      int ret = 0;
 +      u32 retries;
 +
 +       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 +              ret = ufshcd_query_attr(hba, opcode, idn, index,
 +                                              selector, attr_val);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
 +                              __func__, ret, retries);
 +              else
 +                      break;
 +      }
 +
 +      if (ret)
 +              dev_err(hba->dev,
 +                      "%s: query attribute, idn %d, failed with error %d after %d retires\n",
 +                      __func__, idn, ret, retries);
 +      return ret;
 +}
 +
 +static int __ufshcd_query_descriptor(struct ufs_hba *hba,
                        enum query_opcode opcode, enum desc_idn idn, u8 index,
                        u8 selector, u8 *desc_buf, int *buf_len)
  {
  
        BUG_ON(!hba);
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        if (!desc_buf) {
                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
                                __func__, opcode);
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
 -              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
 -                              __func__, opcode, idn, err);
 +              dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
 +                              __func__, opcode,
 +                              request->upiu_req.idn, index, err);
                goto out_unlock;
        }
  
@@@ -3679,41 -1815,9 +3679,41 @@@ out_unlock
        hba->dev_cmd.query.descriptor = NULL;
        mutex_unlock(&hba->dev_cmd.lock);
  out:
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
 +      return err;
 +}
 +
 +/**
 + * ufshcd_query_descriptor - API function for sending descriptor requests
 + * hba: per-adapter instance
 + * opcode: attribute opcode
 + * idn: attribute idn to access
 + * index: index field
 + * selector: selector field
 + * desc_buf: the buffer that contains the descriptor
 + * buf_len: length parameter passed to the device
 + *
 + * Returns 0 for success, non-zero in case of failure.
 + * The buf_len parameter will contain, on return, the length parameter
 + * received on the response.
 + */
 +int ufshcd_query_descriptor(struct ufs_hba *hba,
 +                      enum query_opcode opcode, enum desc_idn idn, u8 index,
 +                      u8 selector, u8 *desc_buf, int *buf_len)
 +{
 +      int err;
 +      int retries;
 +
 +      for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 +              err = __ufshcd_query_descriptor(hba, opcode, idn, index,
 +                                              selector, desc_buf, buf_len);
 +              if (!err || err == -EINVAL)
 +                      break;
 +      }
 +
        return err;
  }
 +EXPORT_SYMBOL(ufshcd_query_descriptor);
  
  /**
   * ufshcd_read_desc_param - read the specified descriptor parameter
@@@ -3761,38 -1865,15 +3761,38 @@@ static int ufshcd_read_desc_param(struc
                                      desc_id, desc_index, 0, desc_buf,
                                      &buff_len);
  
 -      if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
 -          (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
 -           ufs_query_desc_max_size[desc_id])
 -          || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
 -              dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
 -                      __func__, desc_id, param_offset, buff_len, ret);
 -              if (!ret)
 -                      ret = -EINVAL;
 +      if (ret) {
 +              dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
 +                      __func__, desc_id, desc_index, param_offset, ret);
 +
 +              goto out;
 +      }
 +
 +      /* Sanity check */
 +      if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
 +              dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
 +                      __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
 +              ret = -EINVAL;
 +              goto out;
 +      }
  
 +      /*
 +       * While reading variable size descriptors (like string descriptor),
 +       * some UFS devices may report the "LENGTH" (field in "Transaction
 +       * Specific fields" of Query Response UPIU) same as what was requested
 +       * in Query Request UPIU instead of reporting the actual size of the
 +       * variable size descriptor.
 +       * Although it's safe to ignore the "LENGTH" field for variable size
 +       * descriptors as we can always derive the length of the descriptor from
 +       * the descriptor header fields. Hence this change impose the length
 +       * match check only for fixed size descriptors (for which we always
 +       * request the correct size as part of Query Request UPIU).
 +       */
 +      if ((desc_id != QUERY_DESC_IDN_STRING) &&
 +          (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
 +              dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
 +                      __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
 +              ret = -EINVAL;
                goto out;
        }
  
@@@ -3820,82 -1901,6 +3820,82 @@@ static inline int ufshcd_read_power_des
        return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
  }
  
 +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
 +{
 +      return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
 +}
 +
 +/**
 + * ufshcd_read_string_desc - read string descriptor
 + * @hba: pointer to adapter instance
 + * @desc_index: descriptor index
 + * @buf: pointer to buffer where descriptor would be read
 + * @size: size of buf
 + * @ascii: if true convert from unicode to ascii characters
 + *
 + * Return 0 in case of success, non-zero otherwise
 + */
 +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
 +                              u32 size, bool ascii)
 +{
 +      int err = 0;
 +
 +      err = ufshcd_read_desc(hba,
 +                              QUERY_DESC_IDN_STRING, desc_index, buf, size);
 +
 +      if (err) {
 +              dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
 +                      __func__, QUERY_REQ_RETRIES, err);
 +              goto out;
 +      }
 +
 +      if (ascii) {
 +              int desc_len;
 +              int ascii_len;
 +              int i;
 +              char *buff_ascii;
 +
 +              desc_len = buf[0];
 +              /* remove header and divide by 2 to move from UTF16 to UTF8 */
 +              ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
 +              if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
 +                      dev_err(hba->dev, "%s: buffer allocated size is too small\n",
 +                                      __func__);
 +                      err = -ENOMEM;
 +                      goto out;
 +              }
 +
 +              buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
 +              if (!buff_ascii) {
 +                      dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 +                                      __func__, ascii_len);
 +                      err = -ENOMEM;
 +                      goto out_free_buff;
 +              }
 +
 +              /*
 +               * the descriptor contains string in UTF16 format
 +               * we need to convert to utf-8 so it can be displayed
 +               */
 +              utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
 +                              desc_len - QUERY_DESC_HDR_SIZE,
 +                              UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
 +
 +              /* replace non-printable or non-ASCII characters with spaces */
 +              for (i = 0; i < ascii_len; i++)
 +                      ufshcd_remove_non_printable(&buff_ascii[i]);
 +
 +              memset(buf + QUERY_DESC_HDR_SIZE, 0,
 +                              size - QUERY_DESC_HDR_SIZE);
 +              memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
 +              buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
 +out_free_buff:
 +              kfree(buff_ascii);
 +      }
 +out:
 +      return err;
 +}
 +
  /**
   * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
   * @hba: Pointer to adapter instance
@@@ -3916,7 -1921,7 +3916,7 @@@ static inline int ufshcd_read_unit_desc
         * Unit descriptors are only available for general purpose LUs (LUN id
         * from 0 to 7) and RPMB Well known LU.
         */
 -      if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
 +      if (!ufs_is_valid_unit_desc_lun(lun))
                return -EOPNOTSUPP;
  
        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@@ -4058,19 -2063,12 +4058,19 @@@ static void ufshcd_host_memory_configur
                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  
                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
 +              hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
 +                              (i * sizeof(struct utp_transfer_req_desc));
                hba->lrb[i].ucd_req_ptr =
                        (struct utp_upiu_req *)(cmd_descp + i);
 +              hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
                hba->lrb[i].ucd_rsp_ptr =
                        (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
 +              hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
 +                              response_offset;
                hba->lrb[i].ucd_prdt_ptr =
                        (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
 +              hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
 +                              prdt_offset;
        }
  }
  
@@@ -4094,7 -2092,7 +4094,7 @@@ static int ufshcd_dme_link_startup(stru
  
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
 -              dev_err(hba->dev,
 +              dev_dbg(hba->dev,
                        "dme-link-startup: error code %d\n", ret);
        return ret;
  }
@@@ -4130,13 -2128,6 +4130,13 @@@ static inline void ufshcd_add_delay_bef
        usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
  }
  
 +static inline void ufshcd_save_tstamp_of_last_dme_cmd(
 +                      struct ufs_hba *hba)
 +{
 +      if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
 +              hba->last_dme_cmd_tstamp = ktime_get();
 +}
 +
  /**
   * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
   * @hba: per adapter instance
@@@ -4157,10 -2148,6 +4157,10 @@@ int ufshcd_dme_set_attr(struct ufs_hba 
        };
        const char *set = action[!!peer];
        int ret;
 +      int retries = UFS_UIC_COMMAND_RETRIES;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
  
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
        uic_cmd.argument3 = mib_val;
  
 -      ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +      do {
 +              /* for peer attributes we retry upon failure */
 +              ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 +                              set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 +      } while (ret && peer && --retries);
 +
        if (ret)
 -              dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 -                      set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 +              dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
 +                      set, UIC_GET_ATTR_ID(attr_sel), mib_val,
 +                      UFS_UIC_COMMAND_RETRIES - retries);
  
        return ret;
  }
@@@ -4204,7 -2183,6 +4204,7 @@@ int ufshcd_dme_get_attr(struct ufs_hba 
        };
        const char *get = action[!!peer];
        int ret;
 +      int retries = UFS_UIC_COMMAND_RETRIES;
        struct ufs_pa_layer_attr orig_pwr_info;
        struct ufs_pa_layer_attr temp_pwr_info;
        bool pwr_mode_change = false;
  
        uic_cmd.command = peer ?
                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
 +
        uic_cmd.argument1 = attr_sel;
  
 -      ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 -      if (ret) {
 -              dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
 -                      get, UIC_GET_ATTR_ID(attr_sel), ret);
 -              goto out;
 -      }
 +      do {
 +              /* for peer attributes we retry upon failure */
 +              ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 +              if (ret)
 +                      dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
 +                              get, UIC_GET_ATTR_ID(attr_sel), ret);
 +      } while (ret && peer && --retries);
 +
 +      if (ret)
 +              dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
 +                      get, UIC_GET_ATTR_ID(attr_sel),
 +                      UFS_UIC_COMMAND_RETRIES - retries);
  
 -      if (mib_val)
 +      if (mib_val && !ret)
                *mib_val = uic_cmd.argument3;
  
        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@@ -4285,7 -2253,6 +4285,7 @@@ static int ufshcd_uic_pwr_ctrl(struct u
        unsigned long flags;
        u8 status;
        int ret;
 +      bool reenable_intr = false;
  
        mutex_lock(&hba->uic_cmd_mutex);
        init_completion(&uic_async_done);
  
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->uic_async_done = &uic_async_done;
 -      ret = __ufshcd_send_uic_cmd(hba, cmd);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      if (ret) {
 -              dev_err(hba->dev,
 -                      "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
 -                      cmd->command, cmd->argument3, ret);
 -              goto out;
 +      if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
 +              ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
 +              /*
 +               * Make sure UIC command completion interrupt is disabled before
 +               * issuing UIC command.
 +               */
 +              wmb();
 +              reenable_intr = true;
        }
 -      ret = ufshcd_wait_for_uic_cmd(hba, cmd);
 +      ret = __ufshcd_send_uic_cmd(hba, cmd, false);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (ret) {
                dev_err(hba->dev,
                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
                        cmd->command, status);
                ret = (status != PWR_OK) ? status : -1;
        }
 +      ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
 +
  out:
 +      if (ret) {
 +              ufsdbg_set_err_state(hba);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_cmd_log(hba);
 +      }
 +
 +      ufshcd_save_tstamp_of_last_dme_cmd(hba);
        spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->active_uic_cmd = NULL;
        hba->uic_async_done = NULL;
 +      if (reenable_intr)
 +              ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        mutex_unlock(&hba->uic_cmd_mutex);
 +      return ret;
 +}
 +
 +int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
 +{
 +      unsigned long flags;
 +      int ret = 0;
 +      u32 tm_doorbell;
 +      u32 tr_doorbell;
 +      bool timeout = false, do_last_check = false;
 +      ktime_t start;
 +
 +      ufshcd_hold_all(hba);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * Wait for all the outstanding tasks/transfer requests.
 +       * Verify by checking the doorbell registers are clear.
 +       */
 +      start = ktime_get();
 +      do {
 +              if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
 +                      ret = -EBUSY;
 +                      goto out;
 +              }
 +
 +              tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 +              tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +              if (!tm_doorbell && !tr_doorbell) {
 +                      timeout = false;
 +                      break;
 +              } else if (do_last_check) {
 +                      break;
 +              }
  
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              schedule();
 +              if (ktime_to_us(ktime_sub(ktime_get(), start)) >
 +                  wait_timeout_us) {
 +                      timeout = true;
 +                      /*
 +                       * We might have scheduled out for long time so make
 +                       * sure to check if doorbells are cleared by this time
 +                       * or not.
 +                       */
 +                      do_last_check = true;
 +              }
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (tm_doorbell || tr_doorbell);
 +
 +      if (timeout) {
 +              dev_err(hba->dev,
 +                      "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
 +                      __func__, tm_doorbell, tr_doorbell);
 +              ret = -EBUSY;
 +      }
 +out:
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_release_all(hba);
        return ret;
  }
  
   */
  static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
  {
 -      struct uic_command uic_cmd = {0};
 +      struct uic_command uic_cmd = {0};
 +      int ret;
 +
 +      if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
 +              ret = ufshcd_dme_set(hba,
 +                              UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
 +              if (ret) {
 +                      dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
 +                                              __func__, ret);
 +                      goto out;
 +              }
 +      }
 +
 +      uic_cmd.command = UIC_CMD_DME_SET;
 +      uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 +      uic_cmd.argument3 = mode;
 +      hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
 +      ufshcd_hold_all(hba);
 +      ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
 +      ufshcd_release_all(hba);
 +out:
 +      return ret;
 +}
 +
 +static int ufshcd_link_recovery(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      unsigned long flags;
 +
 +      /*
 +       * Check if there is any race with fatal error handling.
 +       * If so, wait for it to complete. Even though fatal error
 +       * handling does reset and restore in some cases, don't assume
 +       * anything out of it. We are just avoiding race here.
 +       */
 +      do {
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              flush_work(&hba->eh_work);
 +      } while (1);
 +
 +
 +      /*
 +       * we don't know if previous reset had really reset the host controller
 +       * or not. So let's force reset here to be sure.
 +       */
 +      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +      hba->force_host_reset = true;
 +      schedule_work(&hba->eh_work);
 +
 +      /* wait for the reset work to finish */
 +      do {
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              flush_work(&hba->eh_work);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (1);
 +
 +      if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
 +            ufshcd_is_link_active(hba)))
 +              ret = -ENOLINK;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      return ret;
 +}
 +
 +static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 +{
        int ret;
 +      struct uic_command uic_cmd = {0};
 +      ktime_t start = ktime_get();
  
 -      if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
 -              ret = ufshcd_dme_set(hba,
 -                              UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
 -              if (ret) {
 -                      dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
 -                                              __func__, ret);
 -                      goto out;
 +      uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 +      ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
 +                           ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 +
 +      /*
 +       * Do full reinit if enter failed or if LINERESET was detected during
 +       * Hibern8 operation. After LINERESET, link moves to default PWM-G1
 +       * mode hence full reinit is required to move link to HS speeds.
 +       */
 +      if (ret || hba->full_init_linereset) {
 +              int err;
 +
 +              hba->full_init_linereset = false;
 +              ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
 +              dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 +                      __func__, ret);
 +              /*
 +               * If link recovery fails then return error code (-ENOLINK)
 +               * returned ufshcd_link_recovery().
 +               * If link recovery succeeds then return -EAGAIN to attempt
 +               * hibern8 enter retry again.
 +               */
 +              err = ufshcd_link_recovery(hba);
 +              if (err) {
 +                      dev_err(hba->dev, "%s: link recovery failed", __func__);
 +                      ret = err;
 +              } else {
 +                      ret = -EAGAIN;
                }
 +      } else {
 +              dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
 +                      ktime_to_us(ktime_get()));
        }
  
 -      uic_cmd.command = UIC_CMD_DME_SET;
 -      uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 -      uic_cmd.argument3 = mode;
 -      ufshcd_hold(hba, false);
 -      ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 -      ufshcd_release(hba);
 -
 -out:
        return ret;
  }
  
 -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 +int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  {
 -      struct uic_command uic_cmd = {0};
 -
 -      uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 +      int ret = 0, retries;
  
 -      return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 +              ret = __ufshcd_uic_hibern8_enter(hba);
 +              if (!ret)
 +                      goto out;
 +              else if (ret != -EAGAIN)
 +                      /* Unable to recover the link, so no point proceeding */
 +                      BUG();
 +      }
 +out:
 +      return ret;
  }
  
 -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
  {
        struct uic_command uic_cmd = {0};
        int ret;
 +      ktime_t start = ktime_get();
  
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 +      trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
 +                           ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 +
 +      /* Do full reinit if exit failed */
        if (ret) {
 -              ufshcd_set_link_off(hba);
 -              ret = ufshcd_host_reset_and_restore(hba);
 +              ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
 +              dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
 +                      __func__, ret);
 +              ret = ufshcd_link_recovery(hba);
 +              /* Unable to recover the link, so no point proceeding */
 +              if (ret)
 +                      BUG();
 +      } else {
 +              dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
 +                      ktime_to_us(ktime_get()));
 +              hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
 +              hba->ufs_stats.hibern8_exit_cnt++;
        }
  
        return ret;
@@@ -4608,8 -2386,8 +4608,8 @@@ static int ufshcd_get_max_pwr_mode(stru
        if (hba->max_pwr_info.is_valid)
                return 0;
  
 -      pwr_info->pwr_tx = FASTAUTO_MODE;
 -      pwr_info->pwr_rx = FASTAUTO_MODE;
 +      pwr_info->pwr_tx = FAST_MODE;
 +      pwr_info->pwr_rx = FAST_MODE;
        pwr_info->hs_rate = PA_HS_MODE_B;
  
        /* Get the connected lane count */
                                __func__, pwr_info->gear_rx);
                        return -EINVAL;
                }
 -              pwr_info->pwr_rx = SLOWAUTO_MODE;
 +              pwr_info->pwr_rx = SLOW_MODE;
        }
  
        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
                                __func__, pwr_info->gear_tx);
                        return -EINVAL;
                }
 -              pwr_info->pwr_tx = SLOWAUTO_MODE;
 +              pwr_info->pwr_tx = SLOW_MODE;
        }
  
        hba->max_pwr_info.is_valid = true;
        return 0;
  }
  
 -static int ufshcd_change_power_mode(struct ufs_hba *hba,
 +int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode)
  {
 -      int ret;
 +      int ret = 0;
  
        /* if already configured to the requested pwr_mode */
 -      if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 -          pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
 +      if (!hba->restore_needed &&
 +              pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
 +              pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
                return 0;
        }
  
 +      ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
 +      if (ret)
 +              return ret;
 +
        /*
         * Configure attributes for power mode change with below.
         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
                                                pwr_mode->hs_rate);
  
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
 +                      DL_FC0ProtectionTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
 +                      DL_TC0ReplayTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
 +                      DL_AFC0ReqTimeOutVal_Default);
 +
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
 +                      DL_FC0ProtectionTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
 +                      DL_TC0ReplayTimeOutVal_Default);
 +      ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
 +                      DL_AFC0ReqTimeOutVal_Default);
 +
        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
                        | pwr_mode->pwr_tx);
  
        if (ret) {
 +              ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
                dev_err(hba->dev,
                        "%s: power mode change failed %d\n", __func__, ret);
        } else {
  
                memcpy(&hba->pwr_info, pwr_mode,
                        sizeof(struct ufs_pa_layer_attr));
 +              hba->ufs_stats.power_mode_change_cnt++;
        }
  
        return ret;
@@@ -4764,8 -2521,6 +4764,8 @@@ static int ufshcd_config_pwr_mode(struc
                memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
  
        ret = ufshcd_change_power_mode(hba, &final_params);
 +      if (!ret)
 +              ufshcd_print_pwr_info(hba);
  
        return ret;
  }
   */
  static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  {
 -      int i, retries, err = 0;
 +      int i;
 +      int err;
        bool flag_res = 1;
  
 -      for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 -              /* Set the fDeviceInit flag */
 -              err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 -                                      QUERY_FLAG_IDN_FDEVICEINIT, NULL);
 -              if (!err || err == -ETIMEDOUT)
 -                      break;
 -              dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
 -      }
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 +              QUERY_FLAG_IDN_FDEVICEINIT, NULL);
        if (err) {
                dev_err(hba->dev,
                        "%s setting fDeviceInit flag failed with error %d\n",
                goto out;
        }
  
 -      /* poll for max. 100 iterations for fDeviceInit flag to clear */
 -      for (i = 0; i < 100 && !err && flag_res; i++) {
 -              for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 -                      err = ufshcd_query_flag(hba,
 -                                      UPIU_QUERY_OPCODE_READ_FLAG,
 -                                      QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
 -                      if (!err || err == -ETIMEDOUT)
 -                              break;
 -                      dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
 -                                      err);
 -              }
 -      }
 +      /* poll for max. 1000 iterations for fDeviceInit flag to clear */
 +      for (i = 0; i < 1000 && !err && flag_res; i++)
 +              err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 +                      QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
 +
        if (err)
                dev_err(hba->dev,
                        "%s reading fDeviceInit flag failed with error %d\n",
@@@ -4816,7 -2583,7 +4816,7 @@@ out
   * To bring UFS host controller to operational state,
   * 1. Enable required interrupts
   * 2. Configure interrupt aggregation
 - * 3. Program UTRL and UTMRL base addres
 + * 3. Program UTRL and UTMRL base address
   * 4. Configure run-stop-registers
   *
   * Returns 0 on success, non-zero value on failure
@@@ -4846,13 -2613,8 +4846,13 @@@ static int ufshcd_make_hba_operational(
                        REG_UTP_TASK_REQ_LIST_BASE_H);
  
        /*
 +       * Make sure base address and interrupt setup are updated before
 +       * enabling the run/stop registers below.
 +       */
 +      wmb();
 +
 +      /*
         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
 -       * DEI, HEI bits must be 0
         */
        reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
        if (!(ufshcd_get_lists_status(reg))) {
  }
  
  /**
 + * ufshcd_hba_stop - Send controller to reset state
 + * @hba: per adapter instance
 + * @can_sleep: perform sleep or just spin
 + */
 +static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
 +{
 +      int err;
 +
 +      ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
 +      err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
 +                                      CONTROLLER_ENABLE, CONTROLLER_DISABLE,
 +                                      10, 1, can_sleep);
 +      if (err)
 +              dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
 +}
 +
 +/**
   * ufshcd_hba_enable - initialize the controller
   * @hba: per adapter instance
   *
@@@ -4905,9 -2650,18 +4905,9 @@@ static int ufshcd_hba_enable(struct ufs
         * development and testing of this driver. msleep can be changed to
         * mdelay and retry count can be reduced based on the controller.
         */
 -      if (!ufshcd_is_hba_active(hba)) {
 -
 +      if (!ufshcd_is_hba_active(hba))
                /* change controller state to "reset state" */
 -              ufshcd_hba_stop(hba);
 -
 -              /*
 -               * This delay is based on the testing done with UFS host
 -               * controller FPGA. The delay can be changed based on the
 -               * host controller used.
 -               */
 -              msleep(5);
 -      }
 +              ufshcd_hba_stop(hba, true);
  
        /* UniPro link is disabled at this point */
        ufshcd_set_link_off(hba);
@@@ -4981,11 -2735,6 +4981,11 @@@ static int ufshcd_disable_tx_lcc(struc
        return err;
  }
  
 +static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
 +{
 +      return ufshcd_disable_tx_lcc(hba, false);
 +}
 +
  static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
  {
        return ufshcd_disable_tx_lcc(hba, true);
@@@ -5001,26 -2750,14 +5001,26 @@@ static int ufshcd_link_startup(struct u
  {
        int ret;
        int retries = DME_LINKSTARTUP_RETRIES;
 +      bool link_startup_again = false;
 +
 +      /*
 +       * If UFS device isn't active then we will have to issue link startup
 +       * 2 times to make sure the device state move to active.
 +       */
 +      if (!ufshcd_is_ufs_dev_active(hba))
 +              link_startup_again = true;
  
 +link_startup:
        do {
                ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
  
                ret = ufshcd_dme_link_startup(hba);
 +              if (ret)
 +                      ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
  
                /* check if device is detected by inter-connect layer */
                if (!ret && !ufshcd_is_device_present(hba)) {
 +                      ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
                        dev_err(hba->dev, "%s: Device not present\n", __func__);
                        ret = -ENXIO;
                        goto out;
                /* failed to get the link up... retire */
                goto out;
  
 +      if (link_startup_again) {
 +              link_startup_again = false;
 +              retries = DME_LINKSTARTUP_RETRIES;
 +              goto link_startup;
 +      }
 +
 +      /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
 +      ufshcd_init_pwr_info(hba);
 +      ufshcd_print_pwr_info(hba);
 +
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
                ret = ufshcd_disable_device_tx_lcc(hba);
                if (ret)
                        goto out;
        }
  
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
 +              ret = ufshcd_disable_host_tx_lcc(hba);
 +              if (ret)
 +                      goto out;
 +      }
 +
        /* Include any host controller configuration via UIC commands */
        ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
        if (ret)
  
        ret = ufshcd_make_hba_operational(hba);
  out:
 -      if (ret)
 +      if (ret) {
                dev_err(hba->dev, "link startup failed %d\n", ret);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_host_regs(hba);
 +      }
        return ret;
  }
  
@@@ -5092,7 -2809,7 +5092,7 @@@ static int ufshcd_verify_dev_init(struc
        int err = 0;
        int retries;
  
 -      ufshcd_hold(hba, false);
 +      ufshcd_hold_all(hba);
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
        }
        mutex_unlock(&hba->dev_cmd.lock);
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
  
        if (err)
                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@@ -5130,10 -2847,10 +5130,10 @@@ static void ufshcd_set_queue_depth(stru
  
        lun_qdepth = hba->nutrs;
        ret = ufshcd_read_unit_desc_param(hba,
 -                                        ufshcd_scsi_to_upiu_lun(sdev->lun),
 -                                        UNIT_DESC_PARAM_LU_Q_DEPTH,
 -                                        &lun_qdepth,
 -                                        sizeof(lun_qdepth));
 +                        ufshcd_scsi_to_upiu_lun(sdev->lun),
 +                        UNIT_DESC_PARAM_LU_Q_DEPTH,
 +                        &lun_qdepth,
 +                        sizeof(lun_qdepth));
  
        /* Some WLUN doesn't support unit descriptor */
        if (ret == -EOPNOTSUPP)
@@@ -5263,9 -2980,6 +5263,9 @@@ static int ufshcd_slave_configure(struc
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
        blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
  
 +      sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
 +      sdev->use_rpm_auto = 1;
 +
        return 0;
  }
  
@@@ -5375,7 -3089,6 +5375,7 @@@ ufshcd_transfer_rsp_status(struct ufs_h
        int result = 0;
        int scsi_status;
        int ocs;
 +      bool print_prdt;
  
        /* overall command status of utrd */
        ocs = ufshcd_get_tr_ocs(lrbp);
        switch (ocs) {
        case OCS_SUCCESS:
                result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 -
 +              hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
                switch (result) {
                case UPIU_TRANSACTION_RESPONSE:
                        /*
                        scsi_status = result & MASK_SCSI_STATUS;
                        result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  
 -                      if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
 -                              schedule_work(&hba->eeh_work);
 +                      /*
 +                       * Currently we are only supporting BKOPs exception
 +                       * events hence we can ignore BKOPs exception event
 +                       * during power management callbacks. BKOPs exception
 +                       * event is not expected to be raised in runtime suspend
 +                       * callback as it allows the urgent bkops.
 +                       * During system suspend, we are anyway forcefully
 +                       * disabling the bkops and if urgent bkops is needed
 +                       * it will be enabled on system resume. Long term
 +                       * solution could be to abort the system suspend if
 +                       * UFS device needs urgent BKOPs.
 +                       */
 +                      if (!hba->pm_op_in_progress &&
 +                          ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
 +                              /*
 +                               * Prevent suspend once eeh_work is scheduled
 +                               * to avoid deadlock between ufshcd_suspend
 +                               * and exception event handler.
 +                               */
 +                              if (schedule_work(&hba->eeh_work))
 +                                      pm_runtime_get_noresume(hba->dev);
 +                      }
                        break;
                case UPIU_TRANSACTION_REJECT_UPIU:
                        /* TODO: handle Reject UPIU Response */
        case OCS_MISMATCH_RESP_UPIU_SIZE:
        case OCS_PEER_COMM_FAILURE:
        case OCS_FATAL_ERROR:
 +      case OCS_DEVICE_FATAL_ERROR:
 +      case OCS_INVALID_CRYPTO_CONFIG:
 +      case OCS_GENERAL_CRYPTO_ERROR:
        default:
                result |= DID_ERROR << 16;
                dev_err(hba->dev,
 -              "OCS error from controller = %x\n", ocs);
 +                              "OCS error from controller = %x for tag %d\n",
 +                              ocs, lrbp->task_tag);
 +              /*
 +               * This is called in interrupt context, hence avoid sleep
 +               * while printing debug registers. Also print only the minimum
 +               * debug registers needed to debug OCS failure.
 +               */
 +              __ufshcd_print_host_regs(hba, true);
 +              ufshcd_print_host_state(hba);
                break;
        } /* end of switch */
  
 +      if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
 +              print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
 +                      ocs == OCS_MISMATCH_DATA_BUF_SIZE);
 +              ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
 +      }
 +
 +      if ((host_byte(result) == DID_ERROR) ||
 +          (host_byte(result) == DID_ABORT))
 +              ufsdbg_set_err_state(hba);
 +
        return result;
  }
  
   * ufshcd_uic_cmd_compl - handle completion of uic command
   * @hba: per adapter instance
   * @intr_status: interrupt status generated by the controller
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
  {
 +      irqreturn_t retval = IRQ_NONE;
 +
        if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
                hba->active_uic_cmd->argument2 |=
                        ufshcd_get_uic_cmd_result(hba);
                hba->active_uic_cmd->argument3 =
                        ufshcd_get_dme_attr_val(hba);
                complete(&hba->active_uic_cmd->done);
 +              retval = IRQ_HANDLED;
        }
  
 -      if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
 -              complete(hba->uic_async_done);
 +      if (intr_status & UFSHCD_UIC_PWR_MASK) {
 +              if (hba->uic_async_done) {
 +                      complete(hba->uic_async_done);
 +                      retval = IRQ_HANDLED;
 +              } else if (ufshcd_is_auto_hibern8_supported(hba)) {
 +                      /*
 +                       * If uic_async_done flag is not set then this
 +                       * is an Auto hibern8 err interrupt.
 +                       * Perform a host reset followed by a full
 +                       * link recovery.
 +                       */
 +                      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +                      hba->force_host_reset = true;
 +                      dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
 +                              __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
 +                              "Enter" : "Exit",
 +                              intr_status, ufshcd_get_upmcrs(hba));
 +                      __ufshcd_print_host_regs(hba, true);
 +                      ufshcd_print_host_state(hba);
 +                      schedule_work(&hba->eh_work);
 +                      retval = IRQ_HANDLED;
 +              }
 +      }
 +      return retval;
  }
  
  /**
 - * ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
   * @hba: per adapter instance
 + * @result: error result to inform scsi layer about
   */
 -static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
 +void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
  {
 +      u8 index;
        struct ufshcd_lrb *lrbp;
        struct scsi_cmnd *cmd;
 -      unsigned long completed_reqs;
 -      u32 tr_doorbell;
 -      int result;
 -      int index;
  
 -      /* Resetting interrupt aggregation counters first and reading the
 -       * DOOR_BELL afterward allows us to handle all the completed requests.
 -       * In order to prevent other interrupts starvation the DB is read once
 -       * after reset. The down side of this solution is the possibility of
 -       * false interrupt if device completes another request after resetting
 -       * aggregation and before reading the DB.
 -       */
 -      if (ufshcd_is_intr_aggr_allowed(hba))
 -              ufshcd_reset_intr_aggr(hba);
 +      if (!hba->outstanding_reqs)
 +              return;
  
 -      tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 -      completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
 +      for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
 +              lrbp = &hba->lrb[index];
 +              cmd = lrbp->cmd;
 +              if (cmd) {
 +                      ufshcd_cond_add_cmd_trace(hba, index, "failed");
 +                      ufshcd_update_error_stats(hba,
 +                                      UFS_ERR_INT_FATAL_ERRORS);
 +                      scsi_dma_unmap(cmd);
 +                      cmd->result = result;
 +                      /* Clear pending transfer requests */
 +                      ufshcd_clear_cmd(hba, index);
 +                      ufshcd_outstanding_req_clear(hba, index);
 +                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      lrbp->complete_time_stamp = ktime_get();
 +                      update_req_stats(hba, lrbp);
 +                      /* Mark completed command as NULL in LRB */
 +                      lrbp->cmd = NULL;
 +                      ufshcd_release_all(hba);
 +                      if (cmd->request) {
 +                              /*
 +                               * As we are accessing the "request" structure,
 +                               * this must be called before calling
 +                               * ->scsi_done() callback.
 +                               */
 +                              ufshcd_vops_pm_qos_req_end(hba, cmd->request,
 +                                      true);
 +                              ufshcd_vops_crypto_engine_cfg_end(hba,
 +                                              lrbp, cmd->request);
 +                      }
 +                      /* Do not touch lrbp after scsi done */
 +                      cmd->scsi_done(cmd);
 +              } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 +                      if (hba->dev_cmd.complete) {
 +                              ufshcd_cond_add_cmd_trace(hba, index,
 +                                                      "dev_failed");
 +                              ufshcd_outstanding_req_clear(hba, index);
 +                              complete(hba->dev_cmd.complete);
 +                      }
 +              }
 +              if (ufshcd_is_clkscaling_supported(hba))
 +                      hba->clk_scaling.active_reqs--;
 +      }
 +}
 +
 +/**
 + * __ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * @hba: per adapter instance
 + * @completed_reqs: requests to complete
 + */
 +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 +                                      unsigned long completed_reqs)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      struct scsi_cmnd *cmd;
 +      int result;
 +      int index;
 +      struct request *req;
  
        for_each_set_bit(index, &completed_reqs, hba->nutrs) {
                lrbp = &hba->lrb[index];
                cmd = lrbp->cmd;
                if (cmd) {
 +                      ufshcd_cond_add_cmd_trace(hba, index, "complete");
 +                      ufshcd_update_tag_stats_completion(hba, cmd);
                        result = ufshcd_transfer_rsp_status(hba, lrbp);
                        scsi_dma_unmap(cmd);
                        cmd->result = result;
 +                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      lrbp->complete_time_stamp = ktime_get();
 +                      update_req_stats(hba, lrbp);
                        /* Mark completed command as NULL in LRB */
                        lrbp->cmd = NULL;
 -                      clear_bit_unlock(index, &hba->lrb_in_use);
 +                      hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
 +                      __ufshcd_release(hba, false);
 +                      __ufshcd_hibern8_release(hba, false);
 +                      if (cmd->request) {
 +                              /*
 +                               * As we are accessing the "request" structure,
 +                               * this must be called before calling
 +                               * ->scsi_done() callback.
 +                               */
 +                              ufshcd_vops_pm_qos_req_end(hba, cmd->request,
 +                                      false);
 +                              ufshcd_vops_crypto_engine_cfg_end(hba,
 +                                      lrbp, cmd->request);
 +                      }
 +
 +                      req = cmd->request;
 +                      if (req) {
 +                              /* Update IO svc time latency histogram */
 +                              if (req->lat_hist_enabled) {
 +                                      ktime_t completion;
 +                                      u_int64_t delta_us;
 +
 +                                      completion = ktime_get();
 +                                      delta_us = ktime_us_delta(completion,
 +                                                req->lat_hist_io_start);
 +                                      blk_update_latency_hist(
 +                                              (rq_data_dir(req) == READ) ?
 +                                              &hba->io_lat_read :
 +                                              &hba->io_lat_write, delta_us);
 +                              }
 +                      }
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
 -                      __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 -                      if (hba->dev_cmd.complete)
 +                      if (hba->dev_cmd.complete) {
 +                              ufshcd_cond_add_cmd_trace(hba, index,
 +                                              "dcmp");
                                complete(hba->dev_cmd.complete);
 +                      }
                }
 +              if (ufshcd_is_clkscaling_supported(hba))
 +                      hba->clk_scaling.active_reqs--;
        }
  
        /* clear corresponding bits of completed commands */
  }
  
  /**
 + * ufshcd_transfer_req_compl - handle SCSI and query command completion
 + * @hba: per adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
 + */
 +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
 +{
 +      unsigned long completed_reqs;
 +      u32 tr_doorbell;
 +
 +      /* Resetting interrupt aggregation counters first and reading the
 +       * DOOR_BELL afterward allows us to handle all the completed requests.
 +       * In order to prevent other interrupts starvation the DB is read once
 +       * after reset. The down side of this solution is the possibility of
 +       * false interrupt if device completes another request after resetting
 +       * aggregation and before reading the DB.
 +       */
 +      if (ufshcd_is_intr_aggr_allowed(hba))
 +              ufshcd_reset_intr_aggr(hba);
 +
 +      tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 +      completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
 +
 +      if (completed_reqs) {
 +              __ufshcd_transfer_req_compl(hba, completed_reqs);
 +              return IRQ_HANDLED;
 +      } else {
 +              return IRQ_NONE;
 +      }
 +}
 +
 +/**
   * ufshcd_disable_ee - disable exception event
   * @hba: per-adapter instance
   * @mask: exception event to disable
@@@ -5720,7 -3244,7 +5720,7 @@@ static int ufshcd_disable_ee(struct ufs
  
        val = hba->ee_ctrl_mask & ~mask;
        val &= 0xFFFF; /* 2 bytes */
 -      err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +      err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask &= ~mask;
@@@ -5748,7 -3272,7 +5748,7 @@@ static int ufshcd_enable_ee(struct ufs_
  
        val = hba->ee_ctrl_mask | mask;
        val &= 0xFFFF; /* 2 bytes */
 -      err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +      err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
        if (!err)
                hba->ee_ctrl_mask |= mask;
@@@ -5774,7 -3298,7 +5774,7 @@@ static int ufshcd_enable_auto_bkops(str
        if (hba->auto_bkops_enabled)
                goto out;
  
 -      err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to enable bkops %d\n",
        }
  
        hba->auto_bkops_enabled = true;
 +      trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
  
        /* No need of URGENT_BKOPS exception from the device */
        err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@@ -5824,7 -3347,7 +5824,7 @@@ static int ufshcd_disable_auto_bkops(st
                goto out;
        }
  
 -      err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
 +      err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
        if (err) {
                dev_err(hba->dev, "%s: failed to disable bkops %d\n",
        }
  
        hba->auto_bkops_enabled = false;
 +      trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
  out:
        return err;
  }
@@@ -5863,7 -3385,7 +5863,7 @@@ static void ufshcd_force_reset_auto_bko
  
  static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
  {
 -      return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 +      return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
                        QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
  }
  
   */
  static int ufshcd_urgent_bkops(struct ufs_hba *hba)
  {
 -      return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
 +      return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
 +}
 +
 +static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
 +{
 +      return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 +                      QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
 +}
 +
 +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
 +{
 +      int err;
 +      u32 curr_status = 0;
 +
 +      if (hba->is_urgent_bkops_lvl_checked)
 +              goto enable_auto_bkops;
 +
 +      err = ufshcd_get_bkops_status(hba, &curr_status);
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
 +                              __func__, err);
 +              goto out;
 +      }
 +
 +      /*
 +       * We are seeing that some devices are raising the urgent bkops
 +       * exception events even when BKOPS status doesn't indicate performace
 +       * impacted or critical. Handle these device by determining their urgent
 +       * bkops status at runtime.
 +       */
 +      if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
 +              dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
 +                              __func__, curr_status);
 +              /* update the current status as the urgent bkops level */
 +              hba->urgent_bkops_lvl = curr_status;
 +              hba->is_urgent_bkops_lvl_checked = true;
 +      }
 +
 +enable_auto_bkops:
 +      err = ufshcd_enable_auto_bkops(hba);
 +out:
 +      if (err < 0)
 +              dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 +                              __func__, err);
 +}
 +
 +/**
 + * ufshcd_exception_event_handler - handle exceptions raised by device
 + * @work: pointer to work data
 + *
 + * Read bExceptionEventStatus attribute from the device and handle the
 + * exception event accordingly.
 + */
 +static void ufshcd_exception_event_handler(struct work_struct *work)
 +{
 +      struct ufs_hba *hba;
 +      int err;
 +      u32 status = 0;
 +      hba = container_of(work, struct ufs_hba, eeh_work);
 +
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_scsi_block_requests(hba);
 +      err = ufshcd_get_ee_status(hba, &status);
 +      if (err) {
 +              dev_err(hba->dev, "%s: failed to get exception status %d\n",
 +                              __func__, err);
 +              goto out;
 +      }
 +
 +      status &= hba->ee_ctrl_mask;
 +
 +      if (status & MASK_EE_URGENT_BKOPS)
 +              ufshcd_bkops_exception_event_handler(hba);
 +
 +out:
 +      ufshcd_scsi_unblock_requests(hba);
 +      /*
 +       * pm_runtime_get_noresume is called while scheduling
 +       * eeh_work to avoid suspend racing with exception work.
 +       * Hence decrement usage counter using pm_runtime_put_noidle
 +       * to allow suspend on completion of exception event handler.
 +       */
 +      pm_runtime_put_noidle(hba->dev);
 +      pm_runtime_put(hba->dev);
 +      return;
  }
  
 -static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
 +/* Complete requests that have door-bell cleared */
 +static void ufshcd_complete_requests(struct ufs_hba *hba)
  {
 -      return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
 -                      QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
 +      ufshcd_transfer_req_compl(hba);
 +      ufshcd_tmc_handler(hba);
  }
  
  /**
 - * ufshcd_exception_event_handler - handle exceptions raised by device
 - * @work: pointer to work data
 + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
 + *                            to recover from the DL NAC errors or not.
 + * @hba: per-adapter instance
   *
 - * Read bExceptionEventStatus attribute from the device and handle the
 - * exception event accordingly.
 + * Returns true if error handling is required, false otherwise
   */
 -static void ufshcd_exception_event_handler(struct work_struct *work)
 +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
  {
 -      struct ufs_hba *hba;
 -      int err;
 -      u32 status = 0;
 -      hba = container_of(work, struct ufs_hba, eeh_work);
 +      unsigned long flags;
 +      bool err_handling = true;
  
 -      pm_runtime_get_sync(hba->dev);
 -      scsi_block_requests(hba->host);
 -      err = ufshcd_get_ee_status(hba, &status);
 -      if (err) {
 -              dev_err(hba->dev, "%s: failed to get exception status %d\n",
 -                              __func__, err);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      /*
 +       * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
 +       * device fatal error and/or DL NAC & REPLAY timeout errors.
 +       */
 +      if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
 +              goto out;
 +
 +      if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
 +          ((hba->saved_err & UIC_ERROR) &&
 +           (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
 +              /*
 +               * we have to do error recovery but atleast silence the error
 +               * logs.
 +               */
 +              hba->silence_err_logs = true;
                goto out;
        }
  
 -      status &= hba->ee_ctrl_mask;
 -      if (status & MASK_EE_URGENT_BKOPS) {
 -              err = ufshcd_urgent_bkops(hba);
 -              if (err < 0)
 -                      dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 -                                      __func__, err);
 +      if ((hba->saved_err & UIC_ERROR) &&
 +          (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
 +              int err;
 +              /*
 +               * wait for 50ms to see if we can get any other errors or not.
 +               */
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              msleep(50);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +
 +              /*
 +               * now check if we have got any other severe errors other than
 +               * DL NAC error?
 +               */
 +              if ((hba->saved_err & INT_FATAL_ERRORS) ||
 +                  ((hba->saved_err & UIC_ERROR) &&
 +                  (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
 +                      if (((hba->saved_err & INT_FATAL_ERRORS) ==
 +                              DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
 +                                      ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
 +                              hba->silence_err_logs = true;
 +                      goto out;
 +              }
 +
 +              /*
 +               * As DL NAC is the only error received so far, send out NOP
 +               * command to confirm if link is still active or not.
 +               *   - If we don't get any response then do error recovery.
 +               *   - If we get response then clear the DL NAC error bit.
 +               */
 +
 +              /* silence the error logs from NOP command */
 +              hba->silence_err_logs = true;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              err = ufshcd_verify_dev_init(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              hba->silence_err_logs = false;
 +
 +              if (err) {
 +                      hba->silence_err_logs = true;
 +                      goto out;
 +              }
 +
 +              /* Link seems to be alive hence ignore the DL NAC errors */
 +              if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
 +                      hba->saved_err &= ~UIC_ERROR;
 +              /* clear NAC error */
 +              hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
 +              if (!hba->saved_uic_err) {
 +                      err_handling = false;
 +                      goto out;
 +              }
 +              /*
 +               * there seems to be some errors other than NAC, so do error
 +               * recovery
 +               */
 +              hba->silence_err_logs = true;
        }
  out:
 -      scsi_unblock_requests(hba->host);
 -      pm_runtime_put_sync(hba->dev);
 -      return;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return err_handling;
  }
  
  /**
@@@ -6117,149 -3496,51 +6117,149 @@@ static void ufshcd_err_handler(struct w
  {
        struct ufs_hba *hba;
        unsigned long flags;
 -      u32 err_xfer = 0;
 -      u32 err_tm = 0;
 +      bool err_xfer = false, err_tm = false;
        int err = 0;
        int tag;
 +      bool needs_reset = false;
 +      bool clks_enabled = false;
  
        hba = container_of(work, struct ufs_hba, eh_work);
  
 -      pm_runtime_get_sync(hba->dev);
 -      ufshcd_hold(hba, false);
 -
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
 -              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufsdbg_set_err_state(hba);
 +
 +      if (hba->ufshcd_state == UFSHCD_STATE_RESET)
                goto out;
 +
 +      /*
 +       * Make sure the clocks are ON before we proceed with err
 +       * handling. For the majority of cases err handler would be
 +       * run with clocks ON. There is a possibility that the err
 +       * handler was scheduled due to auto hibern8 error interrupt,
 +       * in which case the clocks could be gated or be in the
 +       * process of gating when the err handler runs.
 +       */
 +      if (unlikely((hba->clk_gating.state != CLKS_ON) &&
 +          ufshcd_is_auto_hibern8_supported(hba))) {
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
 +              ufshcd_hold(hba, false);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              clks_enabled = true;
        }
  
        hba->ufshcd_state = UFSHCD_STATE_RESET;
        ufshcd_set_eh_in_progress(hba);
  
        /* Complete requests that have door-bell cleared by h/w */
 -      ufshcd_transfer_req_compl(hba);
 -      ufshcd_tmc_handler(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      ufshcd_complete_requests(hba);
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 +              bool ret;
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
 +              ret = ufshcd_quirk_dl_nac_errors(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +              if (!ret)
 +                      goto skip_err_handling;
 +      }
 +
 +      /*
 +       * Dump controller state before resetting. Transfer requests state
 +       * will be dump as part of the request completion.
 +       */
 +      if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
 +              dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
 +                      __func__, hba->saved_err, hba->saved_uic_err);
 +              if (!hba->silence_err_logs) {
 +                      /* release lock as print host regs sleeps */
 +                      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +                      ufshcd_print_host_regs(hba);
 +                      ufshcd_print_host_state(hba);
 +                      ufshcd_print_pwr_info(hba);
 +                      ufshcd_print_tmrs(hba, hba->outstanding_tasks);
 +                      ufshcd_print_cmd_log(hba);
 +                      spin_lock_irqsave(hba->host->host_lock, flags);
 +              }
 +      }
 +
 +      if ((hba->saved_err & INT_FATAL_ERRORS)
 +          || hba->saved_ce_err || hba->force_host_reset ||
 +          ((hba->saved_err & UIC_ERROR) &&
 +          (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
 +                                 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
 +                                 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
 +              needs_reset = true;
 +
 +      /*
 +       * if host reset is required then skip clearing the pending
 +       * transfers forcefully because they will automatically get
 +       * cleared after link startup.
 +       */
 +      if (needs_reset)
 +              goto skip_pending_xfer_clear;
  
 +      /* release lock as clear command might sleep */
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
        /* Clear pending transfer requests */
 -      for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
 -              if (ufshcd_clear_cmd(hba, tag))
 -                      err_xfer |= 1 << tag;
 +      for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
 +              if (ufshcd_clear_cmd(hba, tag)) {
 +                      err_xfer = true;
 +                      goto lock_skip_pending_xfer_clear;
 +              }
 +      }
  
        /* Clear pending task management requests */
 -      for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
 -              if (ufshcd_clear_tm_cmd(hba, tag))
 -                      err_tm |= 1 << tag;
 +      for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
 +              if (ufshcd_clear_tm_cmd(hba, tag)) {
 +                      err_tm = true;
 +                      goto lock_skip_pending_xfer_clear;
 +              }
 +      }
  
 -      /* Complete the requests that are cleared by s/w */
 +lock_skip_pending_xfer_clear:
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_transfer_req_compl(hba);
 -      ufshcd_tmc_handler(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      /* Complete the requests that are cleared by s/w */
 +      ufshcd_complete_requests(hba);
 +
 +      if (err_xfer || err_tm)
 +              needs_reset = true;
 +
 +skip_pending_xfer_clear:
        /* Fatal errors need reset */
 -      if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
 -                      ((hba->saved_err & UIC_ERROR) &&
 -                       (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
 +      if (needs_reset) {
 +              unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
 +
 +              if (hba->saved_err & INT_FATAL_ERRORS)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_INT_FATAL_ERRORS);
 +              if (hba->saved_ce_err)
 +                      ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
 +
 +              if (hba->saved_err & UIC_ERROR)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_INT_UIC_ERROR);
 +
 +              if (err_xfer || err_tm)
 +                      ufshcd_update_error_stats(hba,
 +                                                UFS_ERR_CLEAR_PEND_XFER_TM);
 +
 +              /*
 +               * ufshcd_reset_and_restore() does the link reinitialization
 +               * which will need atleast one empty doorbell slot to send the
 +               * device management commands (NOP and query commands).
 +               * If there is no slot empty at this moment then free up last
 +               * slot forcefully.
 +               */
 +              if (hba->outstanding_reqs == max_doorbells)
 +                      __ufshcd_transfer_req_compl(hba,
 +                                                  (1UL << (hba->nutrs - 1)));
 +
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
                err = ufshcd_reset_and_restore(hba);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
                if (err) {
                        dev_err(hba->dev, "%s: reset and restore failed\n",
                                        __func__);
                scsi_report_bus_reset(hba->host, 0);
                hba->saved_err = 0;
                hba->saved_uic_err = 0;
 +              hba->saved_ce_err = 0;
 +              hba->force_host_reset = false;
 +      }
 +
 +skip_err_handling:
 +      if (!needs_reset) {
 +              hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 +              if (hba->saved_err || hba->saved_uic_err)
 +                      dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
 +                          __func__, hba->saved_err, hba->saved_uic_err);
 +      }
 +
 +      hba->silence_err_logs = false;
 +
 +      if (clks_enabled) {
 +              __ufshcd_release(hba, false);
 +              hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
        }
 +out:
        ufshcd_clear_eh_in_progress(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
 +              u32 reg)
 +{
 +      reg_hist->reg[reg_hist->pos] = reg;
 +      reg_hist->tstamp[reg_hist->pos] = ktime_get();
 +      reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
 +}
 +
 +static void ufshcd_rls_handler(struct work_struct *work)
 +{
 +      struct ufs_hba *hba;
 +      int ret = 0;
 +      u32 mode;
 +
 +      hba = container_of(work, struct ufs_hba, rls_work);
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      if (ret) {
 +              dev_err(hba->dev,
 +                      "Timed out (%d) waiting for DB to clear\n",
 +                      ret);
 +              goto out;
 +      }
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
 +      if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
 +              hba->restore_needed = true;
 +
 +      if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
 +              hba->restore_needed = true;
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
 +      if (hba->pwr_info.gear_rx != mode)
 +              hba->restore_needed = true;
 +
 +      ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
 +      if (hba->pwr_info.gear_tx != mode)
 +              hba->restore_needed = true;
 +
 +      if (hba->restore_needed)
 +              ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
 +
 +      if (ret)
 +              dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 +                      __func__, ret);
 +      else
 +              hba->restore_needed = false;
  
  out:
 -      scsi_unblock_requests(hba->host);
 -      ufshcd_release(hba);
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
        pm_runtime_put_sync(hba->dev);
  }
  
  /**
   * ufshcd_update_uic_error - check and set fatal UIC error flags.
   * @hba: per-adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_update_uic_error(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
  {
        u32 reg;
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      /* PHY layer lane error */
 +      reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
 +      if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
 +          (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
 +              /*
 +               * To know whether this error is fatal or not, DB timeout
 +               * must be checked but this error is handled separately.
 +               */
 +              dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
 +                              __func__, reg);
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
 +
 +              /*
 +               * Don't ignore LINERESET indication during hibern8
 +               * enter operation.
 +               */
 +              if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
 +                      struct uic_command *cmd = hba->active_uic_cmd;
 +
 +                      if (cmd) {
 +                              if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
 +                                      dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
 +                                              __func__, reg);
 +                                      hba->full_init_linereset = true;
 +                              }
 +                      }
 +                      if (!hba->full_init_linereset)
 +                              schedule_work(&hba->rls_work);
 +              }
 +              retval |= IRQ_HANDLED;
 +      }
  
        /* PA_INIT_ERROR is fatal and needs UIC reset */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
 -      if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
 -              hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
 +      if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
 +          (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
 +
 +              if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
 +                      hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
 +              } else if (hba->dev_quirks &
 +                         UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 +                      if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
 +                              hba->uic_error |=
 +                                      UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
 +                      else if (reg &
 +                               UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
 +                              hba->uic_error |=
 +                                      UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
 +              }
 +              retval |= IRQ_HANDLED;
 +      }
  
        /* UIC NL/TL/DME errors needs software retry */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
 -      if (reg)
 +      if ((reg & UIC_NETWORK_LAYER_ERROR) &&
 +          (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
                hba->uic_error |= UFSHCD_UIC_NL_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
 -      if (reg)
 +      if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
 +          (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
                hba->uic_error |= UFSHCD_UIC_TL_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
 -      if (reg)
 +      if ((reg & UIC_DME_ERROR) &&
 +          (reg & UIC_DME_ERROR_CODE_MASK)) {
 +              ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
                hba->uic_error |= UFSHCD_UIC_DME_ERROR;
 +              retval |= IRQ_HANDLED;
 +      }
  
        dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
                        __func__, hba->uic_error);
 +      return retval;
  }
  
  /**
   * ufshcd_check_errors - Check for errors that need s/w attention
   * @hba: per-adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_check_errors(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
  {
        bool queue_eh_work = false;
 +      irqreturn_t retval = IRQ_NONE;
  
 -      if (hba->errors & INT_FATAL_ERRORS)
 +      if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
                queue_eh_work = true;
  
        if (hba->errors & UIC_ERROR) {
                hba->uic_error = 0;
 -              ufshcd_update_uic_error(hba);
 +              retval = ufshcd_update_uic_error(hba);
                if (hba->uic_error)
                        queue_eh_work = true;
        }
  
        if (queue_eh_work) {
 +              /*
 +               * update the transfer error masks to sticky bits, let's do this
 +               * irrespective of current ufshcd_state.
 +               */
 +              hba->saved_err |= hba->errors;
 +              hba->saved_uic_err |= hba->uic_error;
 +              hba->saved_ce_err |= hba->ce_error;
 +
                /* handle fatal errors only when link is functional */
                if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
 -                      /* block commands from scsi mid-layer */
 -                      scsi_block_requests(hba->host);
 -
 -                      /* transfer error masks to sticky bits */
 -                      hba->saved_err |= hba->errors;
 -                      hba->saved_uic_err |= hba->uic_error;
 +                      /*
 +                       * Set error handling in progress flag early so that we
 +                       * don't issue new requests any more.
 +                       */
 +                      ufshcd_set_eh_in_progress(hba);
  
                        hba->ufshcd_state = UFSHCD_STATE_ERROR;
                        schedule_work(&hba->eh_work);
                }
 +              retval |= IRQ_HANDLED;
        }
        /*
         * if (!queue_eh_work) -
         * itself without s/w intervention or errors that will be
         * handled by the SCSI core layer.
         */
 +      return retval;
  }
  
  /**
   * ufshcd_tmc_handler - handle task management function completion
   * @hba: per adapter instance
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_tmc_handler(struct ufs_hba *hba)
 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
  {
        u32 tm_doorbell;
  
        tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
        hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
 -      wake_up(&hba->tm_wq);
 +      if (hba->tm_condition) {
 +              wake_up(&hba->tm_wq);
 +              return IRQ_HANDLED;
 +      } else {
 +              return IRQ_NONE;
 +      }
  }
  
  /**
   * ufshcd_sl_intr - Interrupt service routine
   * @hba: per adapter instance
   * @intr_status: contains interrupts generated by the controller
 + *
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
 -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  {
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      ufsdbg_error_inject_dispatcher(hba,
 +              ERR_INJECT_INTR, intr_status, &intr_status);
 +
 +      ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
 +
        hba->errors = UFSHCD_ERROR_MASK & intr_status;
 -      if (hba->errors)
 -              ufshcd_check_errors(hba);
 +      if (hba->errors || hba->ce_error)
 +              retval |= ufshcd_check_errors(hba);
  
        if (intr_status & UFSHCD_UIC_MASK)
 -              ufshcd_uic_cmd_compl(hba, intr_status);
 +              retval |= ufshcd_uic_cmd_compl(hba, intr_status);
  
        if (intr_status & UTP_TASK_REQ_COMPL)
 -              ufshcd_tmc_handler(hba);
 +              retval |= ufshcd_tmc_handler(hba);
  
        if (intr_status & UTP_TRANSFER_REQ_COMPL)
 -              ufshcd_transfer_req_compl(hba);
 +              retval |= ufshcd_transfer_req_compl(hba);
 +
 +      return retval;
  }
  
  /**
   * @irq: irq number
   * @__hba: pointer to adapter instance
   *
 - * Returns IRQ_HANDLED - If interrupt is valid
 - *            IRQ_NONE - If invalid interrupt
 + * Returns
 + *  IRQ_HANDLED - If interrupt is valid
 + *  IRQ_NONE    - If invalid interrupt
   */
  static irqreturn_t ufshcd_intr(int irq, void *__hba)
  {
 -      u32 intr_status;
 +      u32 intr_status, enabled_intr_status;
        irqreturn_t retval = IRQ_NONE;
        struct ufs_hba *hba = __hba;
 +      int retries = hba->nutrs;
  
        spin_lock(hba->host->host_lock);
        intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +      hba->ufs_stats.last_intr_status = intr_status;
 +      hba->ufs_stats.last_intr_ts = ktime_get();
 +      /*
 +       * There could be max of hba->nutrs reqs in flight and in worst case
 +       * if the reqs get finished 1 by 1 after the interrupt status is
 +       * read, make sure we handle them by checking the interrupt status
 +       * again in a loop until we process all of the reqs before returning.
 +       */
 +      do {
 +              enabled_intr_status =
 +                      intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 +              if (intr_status)
 +                      ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 +              if (enabled_intr_status)
 +                      retval |= ufshcd_sl_intr(hba, enabled_intr_status);
  
 -      if (intr_status) {
 -              ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 -              ufshcd_sl_intr(hba, intr_status);
 -              retval = IRQ_HANDLED;
 +              intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 +      } while (intr_status && --retries);
 +
 +      if (retval == IRQ_NONE) {
 +              dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
 +                                      __func__, intr_status);
 +              ufshcd_hex_dump("host regs: ", hba->mmio_base,
 +                                      UFSHCI_REG_SPACE_SIZE);
        }
 +
        spin_unlock(hba->host->host_lock);
        return retval;
  }
@@@ -6621,7 -3708,7 +6621,7 @@@ static int ufshcd_clear_tm_cmd(struct u
        /* poll for max. 1 sec to clear door bell register by h/w */
        err = ufshcd_wait_for_register(hba,
                        REG_UTP_TASK_REQ_DOOR_BELL,
 -                      mask, 0, 1000, 1000);
 +                      mask, 0, 1000, 1000, true);
  out:
        return err;
  }
@@@ -6655,8 -3742,7 +6655,8 @@@ static int ufshcd_issue_tm_cmd(struct u
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
        wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
 -      ufshcd_hold(hba, false);
 +      hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
 +      ufshcd_hold_all(hba);
  
        spin_lock_irqsave(host->host_lock, flags);
        task_req_descp = hba->utmrdl_base_addr;
  
        /* send command to the controller */
        __set_bit(free_slot, &hba->outstanding_tasks);
 +
 +      /* Make sure descriptors are ready before ringing the task doorbell */
 +      wmb();
 +
        ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
 +      /* Make sure that doorbell is committed immediately */
 +      wmb();
  
        spin_unlock_irqrestore(host->host_lock, flags);
  
        clear_bit(free_slot, &hba->tm_condition);
        ufshcd_put_tm_slot(hba, free_slot);
        wake_up(&hba->tm_tag_wq);
 +      hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
  
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
  
@@@ -6741,7 -3820,6 +6741,7 @@@ static int ufshcd_eh_device_reset_handl
        hba = shost_priv(host);
        tag = cmd->request->tag;
  
 +      ufshcd_print_cmd_log(hba);
        lrbp = &hba->lrb[tag];
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
        spin_lock_irqsave(host->host_lock, flags);
        ufshcd_transfer_req_compl(hba);
        spin_unlock_irqrestore(host->host_lock, flags);
 +
  out:
 +      hba->req_abort_count = 0;
        if (!err) {
                err = SUCCESS;
        } else {
        return err;
  }
  
 +static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
 +{
 +      struct ufshcd_lrb *lrbp;
 +      int tag;
 +
 +      for_each_set_bit(tag, &bitmap, hba->nutrs) {
 +              lrbp = &hba->lrb[tag];
 +              lrbp->req_abort_skip = true;
 +      }
 +}
 +
  /**
   * ufshcd_abort - abort a specific command
   * @cmd: SCSI command pointer
@@@ -6811,87 -3876,31 +6811,87 @@@ static int ufshcd_abort(struct scsi_cmn
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
 +      if (!ufshcd_valid_tag(hba, tag)) {
 +              dev_err(hba->dev,
 +                      "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
 +                      __func__, tag, cmd, cmd->request);
 +              BUG();
 +      }
  
 -      ufshcd_hold(hba, false);
 +      lrbp = &hba->lrb[tag];
 +
 +      ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
 +
 +      /*
 +       * Task abort to the device W-LUN is illegal. When this command
 +       * will fail, due to spec violation, scsi err handling next step
 +       * will be to send LU reset which, again, is a spec violation.
 +       * To avoid these unnecessary/illegal step we skip to the last error
 +       * handling stage: reset and restore.
 +       */
 +      if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
 +              return ufshcd_eh_host_reset_handler(cmd);
 +
 +      ufshcd_hold_all(hba);
 +      reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* If command is already aborted/completed, return SUCCESS */
 -      if (!(test_bit(tag, &hba->outstanding_reqs)))
 +      if (!(test_bit(tag, &hba->outstanding_reqs))) {
 +              dev_err(hba->dev,
 +                      "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
 +                      __func__, tag, hba->outstanding_reqs, reg);
                goto out;
 +      }
  
 -      reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        if (!(reg & (1 << tag))) {
                dev_err(hba->dev,
                "%s: cmd was completed, but without a notifying intr, tag = %d",
                __func__, tag);
        }
  
 -      lrbp = &hba->lrb[tag];
 +      /* Print Transfer Request of aborted task */
 +      dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
 +
 +      /*
 +       * Print detailed info about aborted request.
 +       * As more than one request might get aborted at the same time,
 +       * print full information only for the first aborted request in order
 +       * to reduce repeated printouts. For other aborted requests only print
 +       * basic details.
 +       */
 +      scsi_print_command(cmd);
 +      if (!hba->req_abort_count) {
 +              ufshcd_print_fsm_state(hba);
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_host_state(hba);
 +              ufshcd_print_pwr_info(hba);
 +              ufshcd_print_trs(hba, 1 << tag, true);
 +      } else {
 +              ufshcd_print_trs(hba, 1 << tag, false);
 +      }
 +      hba->req_abort_count++;
 +
 +
 +      /* Skip task abort in case previous aborts failed and report failure */
 +      if (lrbp->req_abort_skip) {
 +              err = -EIO;
 +              goto out;
 +      }
 +
        for (poll_cnt = 100; poll_cnt; poll_cnt--) {
                err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                                UFS_QUERY_TASK, &resp);
                if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
                        /* cmd pending in the device */
 +                      dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
 +                              __func__, tag);
                        break;
                } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                        /*
                         * cmd not pending in the device, check if it is
                         * in transition.
                         */
 +                      dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
 +                              __func__, tag);
                        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
                        if (reg & (1 << tag)) {
                                /* sleep for max. 200us to stabilize */
                                continue;
                        }
                        /* command completed already */
 +                      dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
 +                              __func__, tag);
                        goto out;
                } else {
 +                      dev_err(hba->dev,
 +                              "%s: no response from device. tag = %d, err %d",
 +                              __func__, tag, err);
                        if (!err)
                                err = resp; /* service response error */
                        goto out;
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                        UFS_ABORT_TASK, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
 -              if (!err)
 +              if (!err) {
                        err = resp; /* service response error */
 +                      dev_err(hba->dev, "%s: issued. tag = %d, err %d",
 +                              __func__, tag, err);
 +              }
                goto out;
        }
  
        err = ufshcd_clear_cmd(hba, tag);
 -      if (err)
 +      if (err) {
 +              dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
 +                      __func__, tag, err);
                goto out;
 +      }
  
        scsi_dma_unmap(cmd);
  
        spin_lock_irqsave(host->host_lock, flags);
 -      __clear_bit(tag, &hba->outstanding_reqs);
 +      ufshcd_outstanding_req_clear(hba, tag);
        hba->lrb[tag].cmd = NULL;
        spin_unlock_irqrestore(host->host_lock, flags);
  
                err = SUCCESS;
        } else {
                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
 +              ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
                err = FAILED;
        }
  
        /*
 -       * This ufshcd_release() corresponds to the original scsi cmd that got
 -       * aborted here (as we won't get any IRQ for it).
 +       * This ufshcd_release_all() corresponds to the original scsi cmd that
 +       * got aborted here (as we won't get any IRQ for it).
         */
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        return err;
  }
  
@@@ -6979,12 -3976,9 +6979,12 @@@ static int ufshcd_host_reset_and_restor
  
        /* Reset the host controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
 -      ufshcd_hba_stop(hba);
 +      ufshcd_hba_stop(hba, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 +      /* scale up clocks to max frequency before full reinitialization */
 +      ufshcd_set_clk_freq(hba, true);
 +
        err = ufshcd_hba_enable(hba);
        if (err)
                goto out;
        /* Establish the link again and restore the device */
        err = ufshcd_probe_hba(hba);
  
 -      if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
 +      if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
                err = -EIO;
 +              goto out;
 +      }
 +
 +      if (!err) {
 +              err = ufshcd_vops_crypto_engine_reset(hba);
 +              if (err) {
 +                      dev_err(hba->dev,
 +                              "%s: failed to reset crypto engine %d\n",
 +                              __func__, err);
 +                      goto out;
 +              }
 +      }
 +
  out:
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@@ -7030,26 -4011,10 +7030,26 @@@ static int ufshcd_reset_and_restore(str
        int retries = MAX_HOST_RESET_RETRIES;
  
        do {
 +              err = ufshcd_vops_full_reset(hba);
 +              if (err)
 +                      dev_warn(hba->dev, "%s: full reset returned %d\n",
 +                               __func__, err);
 +
 +              err = ufshcd_reset_device(hba);
 +              if (err)
 +                      dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 +                               __func__, err);
 +
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
  
        /*
 +       * There is no point proceeding even after failing
 +       * to recover after multiple retries.
 +       */
 +      if (err)
 +              BUG();
 +      /*
         * After reset the door-bell might be cleared, complete
         * outstanding requests in s/w here.
         */
   */
  static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
  {
 -      int err;
 +      int err = SUCCESS;
        unsigned long flags;
        struct ufs_hba *hba;
  
        hba = shost_priv(cmd->device->host);
  
 -      ufshcd_hold(hba, false);
        /*
         * Check if there is any race with fatal error handling.
         * If so, wait for it to complete. Even though fatal error
                                hba->ufshcd_state == UFSHCD_STATE_RESET))
                        break;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
 +              dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
                flush_work(&hba->eh_work);
        } while (1);
  
 -      hba->ufshcd_state = UFSHCD_STATE_RESET;
 -      ufshcd_set_eh_in_progress(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      /*
 +       * we don't know if previous reset had really reset the host controller
 +       * or not. So let's force reset here to be sure.
 +       */
 +      hba->ufshcd_state = UFSHCD_STATE_ERROR;
 +      hba->force_host_reset = true;
 +      schedule_work(&hba->eh_work);
  
 -      err = ufshcd_reset_and_restore(hba);
 +      /* wait for the reset work to finish */
 +      do {
 +              if (!(work_pending(&hba->eh_work) ||
 +                              hba->ufshcd_state == UFSHCD_STATE_RESET))
 +                      break;
 +              spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
 +              flush_work(&hba->eh_work);
 +              spin_lock_irqsave(hba->host->host_lock, flags);
 +      } while (1);
  
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (!err) {
 -              err = SUCCESS;
 -              hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
 -      } else {
 +      if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
 +            ufshcd_is_link_active(hba))) {
                err = FAILED;
                hba->ufshcd_state = UFSHCD_STATE_ERROR;
        }
 -      ufshcd_clear_eh_in_progress(hba);
 +
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
 -      ufshcd_release(hba);
        return err;
  }
  
@@@ -7229,9 -4186,9 +7229,9 @@@ static void ufshcd_init_icc_levels(stru
        dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
                        __func__, hba->init_prefetch_data.icc_level);
  
 -      ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 -                      QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 -                      &hba->init_prefetch_data.icc_level);
 +      ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
 +              QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
 +              &hba->init_prefetch_data.icc_level);
  
        if (ret)
                dev_err(hba->dev,
  }
  
  /**
 + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
 + * @hba: per-adapter instance
 + *
 + * PA_TActivate parameter can be tuned manually if UniPro version is less than
 + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
 + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
 + * the hibern8 exit latency.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
 +
 +      if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
 +              return 0;
 +
 +      ret = ufshcd_dme_peer_get(hba,
 +                                UIC_ARG_MIB_SEL(
 +                                      RX_MIN_ACTIVATETIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                                &peer_rx_min_activatetime);
 +      if (ret)
 +              goto out;
 +
 +      /* make sure proper unit conversion is applied */
 +      tuned_pa_tactivate =
 +              ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
 +               / PA_TACTIVATE_TIME_UNIT_US);
 +      ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                           tuned_pa_tactivate);
 +
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
 + * @hba: per-adapter instance
 + *
 + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
 + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
 + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
 + * This optimal value can help reduce the hibern8 exit latency.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
 +      u32 max_hibern8_time, tuned_pa_hibern8time;
 +
 +      ret = ufshcd_dme_get(hba,
 +                           UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 +                                &local_tx_hibern8_time_cap);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba,
 +                                UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
 +                                      UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
 +                                &peer_rx_hibern8_time_cap);
 +      if (ret)
 +              goto out;
 +
 +      max_hibern8_time = max(local_tx_hibern8_time_cap,
 +                             peer_rx_hibern8_time_cap);
 +      /* make sure proper unit conversion is applied */
 +      tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
 +                              / PA_HIBERN8_TIME_UNIT_US);
 +      ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
 +                           tuned_pa_hibern8time);
 +out:
 +      return ret;
 +}
 +
 +/**
 + * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
 + * less than device PA_TACTIVATE time.
 + * @hba: per-adapter instance
 + *
 + * Some UFS devices require host PA_TACTIVATE to be lower than device
 + * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
 + * for such devices.
 + *
 + * Returns zero on success, non-zero error value on failure.
 + */
 +static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
 +{
 +      int ret = 0;
 +      u32 granularity, peer_granularity;
 +      u32 pa_tactivate, peer_pa_tactivate;
 +      u32 pa_tactivate_us, peer_pa_tactivate_us;
 +      u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
 +
 +      ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 +                                &granularity);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 +                                &peer_granularity);
 +      if (ret)
 +              goto out;
 +
 +      if ((granularity < PA_GRANULARITY_MIN_VAL) ||
 +          (granularity > PA_GRANULARITY_MAX_VAL)) {
 +              dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
 +                      __func__, granularity);
 +              return -EINVAL;
 +      }
 +
 +      if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
 +          (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
 +              dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
 +                      __func__, peer_granularity);
 +              return -EINVAL;
 +      }
 +
 +      ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
 +      if (ret)
 +              goto out;
 +
 +      ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                                &peer_pa_tactivate);
 +      if (ret)
 +              goto out;
 +
 +      pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
 +      peer_pa_tactivate_us = peer_pa_tactivate *
 +                           gran_to_us_table[peer_granularity - 1];
 +
 +      if (pa_tactivate_us > peer_pa_tactivate_us) {
 +              u32 new_peer_pa_tactivate;
 +
 +              new_peer_pa_tactivate = pa_tactivate_us /
 +                                    gran_to_us_table[peer_granularity - 1];
 +              new_peer_pa_tactivate++;
 +              ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
 +                                        new_peer_pa_tactivate);
 +      }
 +
 +out:
 +      return ret;
 +}
 +
 +static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 +{
 +      if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
 +              ufshcd_tune_pa_tactivate(hba);
 +              ufshcd_tune_pa_hibern8time(hba);
 +      }
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
 +              /* set 1ms timeout for PA_TACTIVATE */
 +              ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
 +
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
 +              ufshcd_quirk_tune_host_pa_tactivate(hba);
 +
 +      ufshcd_vops_apply_dev_quirks(hba);
 +}
 +
 +static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
 +{
 +      int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
 +
 +      memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
 +      memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
 +
 +      hba->req_abort_count = 0;
 +}
 +
 +static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
 +{
 +      if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
 +              if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
 +                  UIC_LINK_OFF_STATE) {
 +                      hba->rpm_lvl =
 +                              ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                              UFS_SLEEP_PWR_MODE,
 +                                              UIC_LINK_HIBERN8_STATE);
 +                      dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
 +                              hba->rpm_lvl);
 +              }
 +              if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
 +                  UIC_LINK_OFF_STATE) {
 +                      hba->spm_lvl =
 +                              ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                              UFS_SLEEP_PWR_MODE,
 +                                              UIC_LINK_HIBERN8_STATE);
 +                      dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
 +                              hba->spm_lvl);
 +              }
 +      }
 +}
 +
 +/**
   * ufshcd_probe_hba - probe hba to detect device and initialize
   * @hba: per-adapter instance
   *
  static int ufshcd_probe_hba(struct ufs_hba *hba)
  {
        int ret;
 +      ktime_t start = ktime_get();
  
        ret = ufshcd_link_startup(hba);
        if (ret)
                goto out;
  
 -      ufshcd_init_pwr_info(hba);
 +      /* Debug counters initialization */
 +      ufshcd_clear_dbg_ufs_stats(hba);
 +      /* set the default level for urgent bkops */
 +      hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
 +      hba->is_urgent_bkops_lvl_checked = false;
  
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
        if (ret)
                goto out;
  
 +      ufs_advertise_fixup_device(hba);
 +      ufshcd_tune_unipro_params(hba);
 +
 +      ufshcd_apply_pm_quirks(hba);
 +      ret = ufshcd_set_vccq_rail_unused(hba,
 +              (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
 +      if (ret)
 +              goto out;
 +
        /* UFS device is also active now */
        ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
 -      hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        hba->wlun_dev_clr_ua = true;
  
        if (ufshcd_get_max_pwr_mode(hba)) {
                        __func__);
        } else {
                ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
 -              if (ret)
 +              if (ret) {
                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
                                        __func__, ret);
 +                      goto out;
 +              }
        }
  
 +      /* set the state as operational after switching to desired gear */
 +      hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
        /*
         * If we are in error handling context or in power management callbacks
         * context, no need to scan the host
  
                /* clear any previous UFS device information */
                memset(&hba->dev_info, 0, sizeof(hba->dev_info));
 -              if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 -                                     QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
 +              if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
 +                              QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
                        hba->dev_info.f_power_on_wp_en = flag;
  
                if (!hba->is_init_prefetch)
                        ufshcd_init_icc_levels(hba);
  
                /* Add required well known logical units to scsi mid layer */
-               if (ufshcd_scsi_add_wlus(hba))
+               ret = ufshcd_scsi_add_wlus(hba);
+               if (ret)
                        goto out;
  
 +              /* Initialize devfreq after UFS device is detected */
 +              if (ufshcd_is_clkscaling_supported(hba)) {
 +                      memcpy(&hba->clk_scaling.saved_pwr_info.info,
 +                          &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
 +                      hba->clk_scaling.saved_pwr_info.is_valid = true;
 +                      hba->clk_scaling.is_scaled_up = true;
 +                      if (!hba->devfreq) {
 +                              hba->devfreq = devfreq_add_device(hba->dev,
 +                                                      &ufs_devfreq_profile,
 +                                                      "simple_ondemand",
 +                                                      gov_data);
 +                              if (IS_ERR(hba->devfreq)) {
 +                                      ret = PTR_ERR(hba->devfreq);
 +                                      dev_err(hba->dev, "Unable to register with devfreq %d\n",
 +                                              ret);
 +                                      goto out;
 +                              }
 +                      }
 +                      hba->clk_scaling.is_allowed = true;
 +              }
 +
                scsi_scan_host(hba->host);
                pm_runtime_put_sync(hba->dev);
        }
        if (!hba->is_init_prefetch)
                hba->is_init_prefetch = true;
  
 -      /* Resume devfreq after UFS device is detected */
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 -
 +      /*
 +       * Enable auto hibern8 if supported, after full host and
 +       * device initialization.
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba))
 +              ufshcd_set_auto_hibern8_timer(hba,
 +                                    hba->hibern8_on_idle.delay_ms);
  out:
        /*
         * If we failed to initialize the device or the device is not
                ufshcd_hba_exit(hba);
        }
  
 +      trace_ufshcd_init(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
        return ret;
  }
  
@@@ -7651,296 -4361,7 +7652,296 @@@ static void ufshcd_async_scan(void *dat
  {
        struct ufs_hba *hba = (struct ufs_hba *)data;
  
 +      /*
 +       * Don't allow clock gating and hibern8 enter for faster device
 +       * detection.
 +       */
 +      ufshcd_hold_all(hba);
        ufshcd_probe_hba(hba);
 +      ufshcd_release_all(hba);
 +}
 +
 +/**
 + * ufshcd_query_ioctl - perform user read queries
 + * @hba: per-adapter instance
 + * @lun: used for lun specific queries
 + * @buffer: user space buffer for reading and submitting query data and params
 + * @return: 0 for success negative error code otherwise
 + *
 + * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
 + * It will read the opcode, idn and buf_length parameters, and, put the
 + * response in the buffer field while updating the used size in buf_length.
 + */
 +static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
 +{
 +      struct ufs_ioctl_query_data *ioctl_data;
 +      int err = 0;
 +      int length = 0;
 +      void *data_ptr;
 +      bool flag;
 +      u32 att;
 +      u8 index;
 +      u8 *desc = NULL;
 +
 +      ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
 +      if (!ioctl_data) {
 +              dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
 +                              sizeof(struct ufs_ioctl_query_data));
 +              err = -ENOMEM;
 +              goto out;
 +      }
 +
 +      /* extract params from user buffer */
 +      err = copy_from_user(ioctl_data, buffer,
 +                      sizeof(struct ufs_ioctl_query_data));
 +      if (err) {
 +              dev_err(hba->dev,
 +                      "%s: Failed copying buffer from user, err %d\n",
 +                      __func__, err);
 +              goto out_release_mem;
 +      }
 +
 +      /* verify legal parameters & send query */
 +      switch (ioctl_data->opcode) {
 +      case UPIU_QUERY_OPCODE_READ_DESC:
 +              switch (ioctl_data->idn) {
 +              case QUERY_DESC_IDN_DEVICE:
 +              case QUERY_DESC_IDN_CONFIGURAION:
 +              case QUERY_DESC_IDN_INTERCONNECT:
 +              case QUERY_DESC_IDN_GEOMETRY:
 +              case QUERY_DESC_IDN_POWER:
 +                      index = 0;
 +                      break;
 +              case QUERY_DESC_IDN_UNIT:
 +                      if (!ufs_is_valid_unit_desc_lun(lun)) {
 +                              dev_err(hba->dev,
 +                                      "%s: No unit descriptor for lun 0x%x\n",
 +                                      __func__, lun);
 +                              err = -EINVAL;
 +                              goto out_release_mem;
 +                      }
 +                      index = lun;
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              length = min_t(int, QUERY_DESC_MAX_SIZE,
 +                              ioctl_data->buf_size);
 +              desc = kzalloc(length, GFP_KERNEL);
 +              if (!desc) {
 +                      dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
 +                                      __func__, length);
 +                      err = -ENOMEM;
 +                      goto out_release_mem;
 +              }
 +              err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
 +                              ioctl_data->idn, index, 0, desc, &length);
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_ATTR:
 +              switch (ioctl_data->idn) {
 +              case QUERY_ATTR_IDN_BOOT_LU_EN:
 +              case QUERY_ATTR_IDN_POWER_MODE:
 +              case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
 +              case QUERY_ATTR_IDN_OOO_DATA_EN:
 +              case QUERY_ATTR_IDN_BKOPS_STATUS:
 +              case QUERY_ATTR_IDN_PURGE_STATUS:
 +              case QUERY_ATTR_IDN_MAX_DATA_IN:
 +              case QUERY_ATTR_IDN_MAX_DATA_OUT:
 +              case QUERY_ATTR_IDN_REF_CLK_FREQ:
 +              case QUERY_ATTR_IDN_CONF_DESC_LOCK:
 +              case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
 +              case QUERY_ATTR_IDN_EE_CONTROL:
 +              case QUERY_ATTR_IDN_EE_STATUS:
 +              case QUERY_ATTR_IDN_SECONDS_PASSED:
 +                      index = 0;
 +                      break;
 +              case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
 +              case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
 +                      index = lun;
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
 +                                      index, 0, &att);
 +              break;
 +
 +      case UPIU_QUERY_OPCODE_WRITE_ATTR:
 +              err = copy_from_user(&att,
 +                              buffer + sizeof(struct ufs_ioctl_query_data),
 +                              sizeof(u32));
 +              if (err) {
 +                      dev_err(hba->dev,
 +                              "%s: Failed copying buffer from user, err %d\n",
 +                              __func__, err);
 +                      goto out_release_mem;
 +              }
 +
 +              switch (ioctl_data->idn) {
 +              case QUERY_ATTR_IDN_BOOT_LU_EN:
 +                      index = 0;
 +                      if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
 +                              dev_err(hba->dev,
 +                                      "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
 +                                      __func__, ioctl_data->opcode,
 +                                      (unsigned int)ioctl_data->idn, att);
 +                              err = -EINVAL;
 +                              goto out_release_mem;
 +                      }
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_attr(hba, ioctl_data->opcode,
 +                                      ioctl_data->idn, index, 0, &att);
 +              break;
 +
 +      case UPIU_QUERY_OPCODE_READ_FLAG:
 +              switch (ioctl_data->idn) {
 +              case QUERY_FLAG_IDN_FDEVICEINIT:
 +              case QUERY_FLAG_IDN_PERMANENT_WPE:
 +              case QUERY_FLAG_IDN_PWR_ON_WPE:
 +              case QUERY_FLAG_IDN_BKOPS_EN:
 +              case QUERY_FLAG_IDN_PURGE_ENABLE:
 +              case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
 +              case QUERY_FLAG_IDN_BUSY_RTC:
 +                      break;
 +              default:
 +                      goto out_einval;
 +              }
 +              err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
 +                              ioctl_data->idn, &flag);
 +              break;
 +      default:
 +              goto out_einval;
 +      }
 +
 +      if (err) {
 +              dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
 +                              ioctl_data->idn);
 +              goto out_release_mem;
 +      }
 +
 +      /*
 +       * copy response data
 +       * As we might end up reading less data then what is specified in
 +       * "ioctl_data->buf_size". So we are updating "ioctl_data->
 +       * buf_size" to what exactly we have read.
 +       */
 +      switch (ioctl_data->opcode) {
 +      case UPIU_QUERY_OPCODE_READ_DESC:
 +              ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
 +              data_ptr = desc;
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_ATTR:
 +              ioctl_data->buf_size = sizeof(u32);
 +              data_ptr = &att;
 +              break;
 +      case UPIU_QUERY_OPCODE_READ_FLAG:
 +              ioctl_data->buf_size = 1;
 +              data_ptr = &flag;
 +              break;
 +      case UPIU_QUERY_OPCODE_WRITE_ATTR:
 +              goto out_release_mem;
 +      default:
 +              goto out_einval;
 +      }
 +
 +      /* copy to user */
 +      err = copy_to_user(buffer, ioctl_data,
 +                      sizeof(struct ufs_ioctl_query_data));
 +      if (err)
 +              dev_err(hba->dev, "%s: Failed copying back to user.\n",
 +                      __func__);
 +      err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
 +                      data_ptr, ioctl_data->buf_size);
 +      if (err)
 +              dev_err(hba->dev, "%s: err %d copying back to user.\n",
 +                              __func__, err);
 +      goto out_release_mem;
 +
 +out_einval:
 +      dev_err(hba->dev,
 +              "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
 +              __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
 +      err = -EINVAL;
 +out_release_mem:
 +      kfree(ioctl_data);
 +      kfree(desc);
 +out:
 +      return err;
 +}
 +
 +/**
 + * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
 + * @dev: scsi device required for per LUN queries
 + * @cmd: command opcode
 + * @buffer: user space buffer for transferring data
 + *
 + * Supported commands:
 + * UFS_IOCTL_QUERY
 + */
 +static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
 +{
 +      struct ufs_hba *hba = shost_priv(dev->host);
 +      int err = 0;
 +
 +      BUG_ON(!hba);
 +      if (!buffer) {
 +              dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
 +              return -EINVAL;
 +      }
 +
 +      switch (cmd) {
 +      case UFS_IOCTL_QUERY:
 +              pm_runtime_get_sync(hba->dev);
 +              err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
 +                              buffer);
 +              pm_runtime_put_sync(hba->dev);
 +              break;
 +      default:
 +              err = -ENOIOCTLCMD;
 +              dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
 +                      cmd);
 +              break;
 +      }
 +
 +      return err;
 +}
 +
 +static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
 +{
 +      unsigned long flags;
 +      struct Scsi_Host *host;
 +      struct ufs_hba *hba;
 +      int index;
 +      bool found = false;
 +
 +      if (!scmd || !scmd->device || !scmd->device->host)
 +              return BLK_EH_NOT_HANDLED;
 +
 +      host = scmd->device->host;
 +      hba = shost_priv(host);
 +      if (!hba)
 +              return BLK_EH_NOT_HANDLED;
 +
 +      spin_lock_irqsave(host->host_lock, flags);
 +
 +      for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
 +              if (hba->lrb[index].cmd == scmd) {
 +                      found = true;
 +                      break;
 +              }
 +      }
 +
 +      spin_unlock_irqrestore(host->host_lock, flags);
 +
 +      /*
 +       * Bypass SCSI error handling and reset the block layer timer if this
 +       * SCSI command was not actually dispatched to UFS driver, otherwise
 +       * let SCSI layer handle the error as usual.
 +       */
 +      return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
  }
  
  static struct scsi_host_template ufshcd_driver_template = {
        .eh_abort_handler       = ufshcd_abort,
        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
 +      .eh_timed_out           = ufshcd_eh_timed_out,
 +      .ioctl                  = ufshcd_ioctl,
 +#ifdef CONFIG_COMPAT
 +      .compat_ioctl           = ufshcd_ioctl,
 +#endif
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@@ -7997,13 -4413,7 +7998,13 @@@ static int ufshcd_config_vreg_load(stru
  static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
  {
 -      return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 +      if (!vreg)
 +              return 0;
 +      else if (vreg->unused)
 +              return 0;
 +      else
 +              return ufshcd_config_vreg_load(hba->dev, vreg,
 +                                             UFS_VREG_LPM_LOAD_UA);
  }
  
  static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
  {
        if (!vreg)
                return 0;
 -
 -      return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 +      else if (vreg->unused)
 +              return 0;
 +      else
 +              return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
  }
  
  static int ufshcd_config_vreg(struct device *dev,
        name = vreg->name;
  
        if (regulator_count_voltages(reg) > 0) {
 +              uA_load = on ? vreg->max_uA : 0;
 +              ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
 +              if (ret)
 +                      goto out;
 +
                if (vreg->min_uV && vreg->max_uV) {
                        min_uV = on ? vreg->min_uV : 0;
                        ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
                        if (ret) {
                                dev_err(dev,
                                        "%s: %s set voltage failed, err=%d\n",
 -                                      __func__, name, ret);
 -                              goto out;
 -                      }
 -              }
 -
 -              uA_load = on ? vreg->max_uA : 0;
 -              ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
 -              if (ret)
 -                      goto out;
 +                                      __func__, name, ret);
 +                              goto out;
 +                      }
 +              }
        }
  out:
        return ret;
@@@ -8055,9 -4463,7 +8056,9 @@@ static int ufshcd_enable_vreg(struct de
  {
        int ret = 0;
  
 -      if (!vreg || vreg->enabled)
 +      if (!vreg)
 +              goto out;
 +      else if (vreg->enabled || vreg->unused)
                goto out;
  
        ret = ufshcd_config_vreg(dev, vreg, true);
@@@ -8077,9 -4483,7 +8078,9 @@@ static int ufshcd_disable_vreg(struct d
  {
        int ret = 0;
  
 -      if (!vreg || !vreg->enabled)
 +      if (!vreg)
 +              goto out;
 +      else if (!vreg->enabled || vreg->unused)
                goto out;
  
        ret = regulator_disable(vreg->reg);
  static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
  {
        struct ufs_vreg_info *info = &hba->vreg_info;
 +      int ret = 0;
  
 -      if (info)
 -              return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 +      if (info->vdd_hba) {
 +              ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
  
 -      return 0;
 +              if (!ret)
 +                      ufshcd_vops_update_sec_cfg(hba, on);
 +      }
 +
 +      return ret;
  }
  
  static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@@ -8190,73 -4589,22 +8191,73 @@@ static int ufshcd_init_hba_vreg(struct 
        return 0;
  }
  
 -static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 -                                      bool skip_ref_clk)
 +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
 +{
 +      int ret = 0;
 +      struct ufs_vreg_info *info = &hba->vreg_info;
 +
 +      if (!info)
 +              goto out;
 +      else if (!info->vccq)
 +              goto out;
 +
 +      if (unused) {
 +              /* shut off the rail here */
 +              ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
 +              /*
 +               * Mark this rail as no longer used, so it doesn't get enabled
 +               * later by mistake
 +               */
 +              if (!ret)
 +                      info->vccq->unused = true;
 +      } else {
 +              /*
 +               * rail should have been already enabled hence just make sure
 +               * that unused flag is cleared.
 +               */
 +              info->vccq->unused = false;
 +      }
 +out:
 +      return ret;
 +}
 +
 +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 +                             bool skip_ref_clk, bool is_gating_context)
  {
        int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
        unsigned long flags;
 +      ktime_t start = ktime_get();
 +      bool clk_state_changed = false;
  
        if (!head || list_empty(head))
                goto out;
  
 +      /* call vendor specific bus vote before enabling the clocks */
 +      if (on) {
 +              ret = ufshcd_vops_set_bus_vote(hba, on);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      /*
 +       * vendor specific setup_clocks ops may depend on clocks managed by
 +       * this standard driver hence call the vendor specific setup_clocks
 +       * before disabling the clocks managed here.
 +       */
 +      if (!on) {
 +              ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
 +              if (ret)
 +                      return ret;
 +      }
 +
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
                                continue;
  
 +                      clk_state_changed = on ^ clki->enabled;
                        if (on && !clki->enabled) {
                                ret = clk_prepare_enable(clki->clk);
                                if (ret) {
                }
        }
  
 -      ret = ufshcd_vops_setup_clocks(hba, on);
 +      /*
 +       * vendor specific setup_clocks ops may depend on clocks managed by
 +       * this standard driver hence call the vendor specific setup_clocks
 +       * after enabling the clocks managed here.
 +       */
 +      if (on) {
 +              ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
 +              if (ret)
 +                      goto out;
 +      }
 +
 +      /*
 +       * call vendor specific bus vote to remove the vote after
 +       * disabling the clocks.
 +       */
 +      if (!on)
 +              ret = ufshcd_vops_set_bus_vote(hba, on);
 +
  out:
        if (ret) {
 +              if (on)
 +                      /* Can't do much if this fails */
 +                      (void) ufshcd_vops_set_bus_vote(hba, false);
                list_for_each_entry(clki, head, list) {
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
 -      } else if (on) {
 +      } else if (!ret && on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                      hba->clk_gating.state);
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 +              /* restore the secure configuration as clocks are enabled */
 +              ufshcd_vops_update_sec_cfg(hba, true);
        }
 +
 +      if (clk_state_changed)
 +              trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
 +                      (on ? "on" : "off"),
 +                      ktime_to_us(ktime_sub(ktime_get(), start)), ret);
        return ret;
  }
  
 -static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 +static int ufshcd_enable_clocks(struct ufs_hba *hba)
 +{
 +      return  ufshcd_setup_clocks(hba, true, false, false);
 +}
 +
 +static int ufshcd_disable_clocks(struct ufs_hba *hba,
 +                               bool is_gating_context)
 +{
 +      return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
 +}
 +
 +static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 +                                            bool is_gating_context)
  {
 -      return  __ufshcd_setup_clocks(hba, on, false);
 +      return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
  }
  
  static int ufshcd_init_clocks(struct ufs_hba *hba)
@@@ -8377,7 -4684,7 +8378,7 @@@ static int ufshcd_variant_hba_init(stru
  {
        int err = 0;
  
 -      if (!hba->vops)
 +      if (!hba->var || !hba->var->vops)
                goto out;
  
        err = ufshcd_vops_init(hba);
@@@ -8401,9 -4708,11 +8402,9 @@@ out
  
  static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
  {
 -      if (!hba->vops)
 +      if (!hba->var || !hba->var->vops)
                return;
  
 -      ufshcd_vops_setup_clocks(hba, false);
 -
        ufshcd_vops_setup_regulators(hba, false);
  
        ufshcd_vops_exit(hba);
@@@ -8432,7 -4741,7 +8433,7 @@@ static int ufshcd_hba_init(struct ufs_h
        if (err)
                goto out_disable_hba_vreg;
  
 -      err = ufshcd_setup_clocks(hba, true);
 +      err = ufshcd_enable_clocks(hba);
        if (err)
                goto out_disable_hba_vreg;
  
  out_disable_vreg:
        ufshcd_setup_vreg(hba, false);
  out_disable_clks:
 -      ufshcd_setup_clocks(hba, false);
 +      ufshcd_disable_clocks(hba, false);
  out_disable_hba_vreg:
        ufshcd_setup_hba_vreg(hba, false);
  out:
@@@ -8466,13 -4775,7 +8467,13 @@@ static void ufshcd_hba_exit(struct ufs_
        if (hba->is_powered) {
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
 -              ufshcd_setup_clocks(hba, false);
 +              if (ufshcd_is_clkscaling_supported(hba)) {
 +                      if (hba->devfreq)
 +                              ufshcd_suspend_clkscaling(hba);
 +                      if (hba->clk_scaling.workq)
 +                              destroy_workqueue(hba->clk_scaling.workq);
 +              }
 +              ufshcd_disable_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
        }
@@@ -8485,19 -4788,19 +8486,19 @@@ ufshcd_send_request_sense(struct ufs_hb
                                0,
                                0,
                                0,
 -                              SCSI_SENSE_BUFFERSIZE,
 +                              UFSHCD_REQ_SENSE_SIZE,
                                0};
        char *buffer;
        int ret;
  
 -      buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
 +      buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
                goto out;
        }
  
        ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
 -                              SCSI_SENSE_BUFFERSIZE, NULL,
 +                              UFSHCD_REQ_SENSE_SIZE, NULL,
                                msecs_to_jiffies(1000), 3, NULL, REQ_PM);
        if (ret)
                pr_err("%s: failed with err %d\n", __func__, ret);
@@@ -8605,20 -4908,10 +8606,20 @@@ static int ufshcd_link_state_transition
                   (!check_for_bkops || (check_for_bkops &&
                    !hba->auto_bkops_enabled))) {
                /*
 +               * Let's make sure that link is in low power mode, we are doing
 +               * this currently by putting the link in Hibern8. Otherway to
 +               * put the link in low power mode is to send the DME end point
 +               * to device and then send the DME reset command to local
 +               * unipro. But putting the link in hibern8 is much faster.
 +               */
 +              ret = ufshcd_uic_hibern8_enter(hba);
 +              if (ret)
 +                      goto out;
 +              /*
                 * Change controller state to "reset state" which
                 * should also put the link in off/reset state
                 */
 -              ufshcd_hba_stop(hba);
 +              ufshcd_hba_stop(hba, true);
                /*
                 * TODO: Check if we need any delay to make sure that
                 * controller is reset
  static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
  {
        /*
 +       * It seems some UFS devices may keep drawing more than sleep current
 +       * (atleast for 500us) from UFS rails (especially from VCCQ rail).
 +       * To avoid this situation, add 2ms delay before putting these UFS
 +       * rails in LPM mode.
 +       */
 +      if (!ufshcd_is_link_active(hba))
 +              usleep_range(2000, 2100);
 +
 +      /*
         * If UFS device is either in UFS_Sleep turn off VCC rail to save some
         * power.
         *
@@@ -8673,6 -4957,7 +8674,6 @@@ static int ufshcd_vreg_set_hpm(struct u
            !hba->dev_info.is_lu_power_on_wp) {
                ret = ufshcd_setup_vreg(hba, true);
        } else if (!ufshcd_is_ufs_dev_active(hba)) {
 -              ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
                if (!ret && !ufshcd_is_link_active(hba)) {
                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
                        if (ret)
                        if (ret)
                                goto vccq_lpm;
                }
 +              ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
        }
        goto out;
  
  
  static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
  {
 -      if (ufshcd_is_link_off(hba))
 +      if (ufshcd_is_link_off(hba) ||
 +          (ufshcd_is_link_hibern8(hba)
 +           && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, false);
  }
  
  static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
  {
 -      if (ufshcd_is_link_off(hba))
 +      if (ufshcd_is_link_off(hba) ||
 +          (ufshcd_is_link_hibern8(hba)
 +           && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
                ufshcd_setup_hba_vreg(hba, true);
  }
  
@@@ -8747,17 -5027,8 +8748,17 @@@ static int ufshcd_suspend(struct ufs_hb
         * If we can't transition into any of the low power modes
         * just gate the clocks.
         */
 -      ufshcd_hold(hba, false);
 +      WARN_ON(hba->hibern8_on_idle.is_enabled &&
 +              hba->hibern8_on_idle.active_reqs);
 +      ufshcd_hold_all(hba);
        hba->clk_gating.is_suspended = true;
 +      hba->hibern8_on_idle.is_suspended = true;
 +
 +      if (hba->clk_scaling.is_allowed) {
 +              cancel_work_sync(&hba->clk_scaling.suspend_work);
 +              cancel_work_sync(&hba->clk_scaling.resume_work);
 +              ufshcd_suspend_clkscaling(hba);
 +      }
  
        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
                        req_link_state == UIC_LINK_ACTIVE_STATE) {
  
        if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
            (req_link_state == hba->uic_link_state))
 -              goto out;
 +              goto enable_gating;
  
        /* UFS device & link must be active before we enter in this function */
        if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
                ret = -EINVAL;
 -              goto out;
 +              goto enable_gating;
        }
  
        if (ufshcd_is_runtime_pm(pm_op)) {
        if (ret)
                goto set_dev_active;
  
 +      if (ufshcd_is_link_hibern8(hba) &&
 +          ufshcd_is_hibern8_on_idle_allowed(hba))
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 +
        ufshcd_vreg_set_lpm(hba);
  
  disable_clks:
        /*
 -       * The clock scaling needs access to controller registers. Hence, Wait
 -       * for pending clock scaling work to be done before clocks are
 -       * turned off.
 -       */
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 -      }
 -      /*
         * Call vendor specific suspend callback. As these callbacks may access
         * vendor specific host controller register space call them before the
         * host clocks are ON.
        if (ret)
                goto set_link_active;
  
 -      ret = ufshcd_vops_setup_clocks(hba, false);
 -      if (ret)
 -              goto vops_resume;
 -
        if (!ufshcd_is_link_active(hba))
 -              ufshcd_setup_clocks(hba, false);
 +              ret = ufshcd_disable_clocks(hba, false);
        else
                /* If link is active, device ref_clk can't be switched off */
 -              __ufshcd_setup_clocks(hba, false, true);
 +              ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
 +      if (ret)
 +              goto set_link_active;
  
 -      hba->clk_gating.state = CLKS_OFF;
 +      if (ufshcd_is_clkgating_allowed(hba)) {
 +              hba->clk_gating.state = CLKS_OFF;
 +              trace_ufshcd_clk_gating(dev_name(hba->dev),
 +                                      hba->clk_gating.state);
 +      }
        /*
         * Disable the host irq as host controller as there won't be any
         * host controller transaction expected till resume.
        ufshcd_hba_vreg_set_lpm(hba);
        goto out;
  
 -vops_resume:
 -      ufshcd_vops_resume(hba, pm_op);
  set_link_active:
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
        ufshcd_vreg_set_hpm(hba);
 -      if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
 +      if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
                ufshcd_set_link_active(hba);
 -      else if (ufshcd_is_link_off(hba))
 +      } else if (ufshcd_is_link_off(hba)) {
 +              ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
                ufshcd_host_reset_and_restore(hba);
 +      }
  set_dev_active:
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
                ufshcd_disable_auto_bkops(hba);
  enable_gating:
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
 +      hba->hibern8_on_idle.is_suspended = false;
        hba->clk_gating.is_suspended = false;
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
  out:
        hba->pm_op_in_progress = 0;
 +
 +      if (ret)
 +              ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
 +
        return ret;
  }
  
@@@ -8890,12 -5155,14 +8891,12 @@@ static int ufshcd_resume(struct ufs_hb
  
        ufshcd_hba_vreg_set_hpm(hba);
        /* Make sure clocks are enabled before accessing controller */
 -      ret = ufshcd_setup_clocks(hba, true);
 +      ret = ufshcd_enable_clocks(hba);
        if (ret)
                goto out;
  
        /* enable the host irq as host controller would be active soon */
 -      ret = ufshcd_enable_irq(hba);
 -      if (ret)
 -              goto disable_irq_and_vops_clks;
 +      ufshcd_enable_irq(hba);
  
        ret = ufshcd_vreg_set_hpm(hba);
        if (ret)
  
        if (ufshcd_is_link_hibern8(hba)) {
                ret = ufshcd_uic_hibern8_exit(hba);
 -              if (!ret)
 +              if (!ret) {
                        ufshcd_set_link_active(hba);
 -              else
 +                      if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +                              hba->hibern8_on_idle.state = HIBERN8_EXITED;
 +              } else {
                        goto vendor_suspend;
 +              }
        } else if (ufshcd_is_link_off(hba)) {
 -              ret = ufshcd_host_reset_and_restore(hba);
                /*
 -               * ufshcd_host_reset_and_restore() should have already
 +               * A full initialization of the host and the device is required
 +               * since the link was put to off during suspend.
 +               */
 +              ret = ufshcd_reset_and_restore(hba);
 +              /*
 +               * ufshcd_reset_and_restore() should have already
                 * set the link state as active
                 */
                if (ret || !ufshcd_is_link_active(hba))
                        goto vendor_suspend;
 +              /* mark link state as hibern8 exited */
 +              if (ufshcd_is_hibern8_on_idle_allowed(hba))
 +                      hba->hibern8_on_idle.state = HIBERN8_EXITED;
        }
  
        if (!ufshcd_is_ufs_dev_active(hba)) {
                ufshcd_urgent_bkops(hba);
  
        hba->clk_gating.is_suspended = false;
 +      hba->hibern8_on_idle.is_suspended = false;
  
 -      if (ufshcd_is_clkscaling_enabled(hba))
 -              devfreq_resume_device(hba->devfreq);
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_resume_clkscaling(hba);
  
        /* Schedule clock gating in case of no access to UFS device yet */
 -      ufshcd_release(hba);
 +      ufshcd_release_all(hba);
        goto out;
  
  set_old_link_state:
        ufshcd_link_state_transition(hba, old_link_state, 0);
 +      if (ufshcd_is_link_hibern8(hba) &&
 +          ufshcd_is_hibern8_on_idle_allowed(hba))
 +              hba->hibern8_on_idle.state = HIBERN8_ENTERED;
  vendor_suspend:
        ufshcd_vops_suspend(hba, pm_op);
  disable_vreg:
        ufshcd_vreg_set_lpm(hba);
  disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
 -      ufshcd_setup_clocks(hba, false);
 +      if (hba->clk_scaling.is_allowed)
 +              ufshcd_suspend_clkscaling(hba);
 +      ufshcd_disable_clocks(hba, false);
 +      if (ufshcd_is_clkgating_allowed(hba))
 +              hba->clk_gating.state = CLKS_OFF;
  out:
        hba->pm_op_in_progress = 0;
 +
 +      if (ret)
 +              ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
 +
        return ret;
  }
  
  int ufshcd_system_suspend(struct ufs_hba *hba)
  {
        int ret = 0;
 +      ktime_t start = ktime_get();
  
        if (!hba || !hba->is_powered)
                return 0;
  
 -      if (pm_runtime_suspended(hba->dev)) {
 -              if (hba->rpm_lvl == hba->spm_lvl)
 -                      /*
 -                       * There is possibility that device may still be in
 -                       * active state during the runtime suspend.
 -                       */
 -                      if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
 -                          hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
 -                              goto out;
 +      if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
 +           hba->curr_dev_pwr_mode) &&
 +          (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
 +           hba->uic_link_state))
 +              goto out;
  
 +      if (pm_runtime_suspended(hba->dev)) {
                /*
                 * UFS device and/or UFS link low power states during runtime
                 * suspend seems to be different than what is expected during
  
        ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
  out:
 +      trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
        if (!ret)
                hba->is_sys_suspended = true;
        return ret;
@@@ -9043,9 -5287,6 +9044,9 @@@ EXPORT_SYMBOL(ufshcd_system_suspend)
  
  int ufshcd_system_resume(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
                 * Let the runtime resume take care of resuming
                 * if runtime suspended.
                 */
 -              return 0;
 -
 -      return ufshcd_resume(hba, UFS_SYSTEM_PM);
 +              goto out;
 +      else
 +              ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
 +out:
 +      trace_ufshcd_system_resume(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode, hba->uic_link_state);
 +      return ret;
  }
  EXPORT_SYMBOL(ufshcd_system_resume);
  
   */
  int ufshcd_runtime_suspend(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
        if (!hba->is_powered)
 -              return 0;
 +              goto out;
 +      else
 +              ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
 +out:
 +      trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode,
 +              hba->uic_link_state);
 +      return ret;
  
 -      return ufshcd_suspend(hba, UFS_RUNTIME_PM);
  }
  EXPORT_SYMBOL(ufshcd_runtime_suspend);
  
   */
  int ufshcd_runtime_resume(struct ufs_hba *hba)
  {
 +      int ret = 0;
 +      ktime_t start = ktime_get();
 +
        if (!hba)
                return -EINVAL;
  
        if (!hba->is_powered)
 -              return 0;
 -
 -      return ufshcd_resume(hba, UFS_RUNTIME_PM);
 +              goto out;
 +      else
 +              ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
 +out:
 +      trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
 +              ktime_to_us(ktime_sub(ktime_get(), start)),
 +              hba->curr_dev_pwr_mode,
 +              hba->uic_link_state);
 +      return ret;
  }
  EXPORT_SYMBOL(ufshcd_runtime_resume);
  
@@@ -9143,246 -5360,6 +9144,246 @@@ int ufshcd_runtime_idle(struct ufs_hba 
  }
  EXPORT_SYMBOL(ufshcd_runtime_idle);
  
 +static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
 +                                         struct device_attribute *attr,
 +                                         const char *buf, size_t count,
 +                                         bool rpm)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      unsigned long flags, value;
 +
 +      if (kstrtoul(buf, 0, &value))
 +              return -EINVAL;
 +
 +      if (value >= UFS_PM_LVL_MAX)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (rpm)
 +              hba->rpm_lvl = value;
 +      else
 +              hba->spm_lvl = value;
 +      ufshcd_apply_pm_quirks(hba);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +      return count;
 +}
 +
 +static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      int curr_len;
 +      u8 lvl;
 +
 +      curr_len = snprintf(buf, PAGE_SIZE,
 +                          "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                          hba->rpm_lvl,
 +                          ufschd_ufs_dev_pwr_mode_to_string(
 +                              ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
 +                          ufschd_uic_link_state_to_string(
 +                              ufs_pm_lvl_states[hba->rpm_lvl].link_state));
 +
 +      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                           "\nAll available Runtime PM levels info:\n");
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 +              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                                   "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                                  lvl,
 +                                  ufschd_ufs_dev_pwr_mode_to_string(
 +                                      ufs_pm_lvl_states[lvl].dev_state),
 +                                  ufschd_uic_link_state_to_string(
 +                                      ufs_pm_lvl_states[lvl].link_state));
 +
 +      return curr_len;
 +}
 +
 +static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
 +}
 +
 +static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
 +      hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
 +      sysfs_attr_init(&hba->rpm_lvl_attr.attr);
 +      hba->rpm_lvl_attr.attr.name = "rpm_lvl";
 +      hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
 +}
 +
 +static ssize_t ufshcd_spm_lvl_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      int curr_len;
 +      u8 lvl;
 +
 +      curr_len = snprintf(buf, PAGE_SIZE,
 +                          "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                          hba->spm_lvl,
 +                          ufschd_ufs_dev_pwr_mode_to_string(
 +                              ufs_pm_lvl_states[hba->spm_lvl].dev_state),
 +                          ufschd_uic_link_state_to_string(
 +                              ufs_pm_lvl_states[hba->spm_lvl].link_state));
 +
 +      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                           "\nAll available System PM levels info:\n");
 +      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 +              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 +                                   "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
 +                                  lvl,
 +                                  ufschd_ufs_dev_pwr_mode_to_string(
 +                                      ufs_pm_lvl_states[lvl].dev_state),
 +                                  ufschd_uic_link_state_to_string(
 +                                      ufs_pm_lvl_states[lvl].link_state));
 +
 +      return curr_len;
 +}
 +
 +static ssize_t ufshcd_spm_lvl_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
 +}
 +
 +static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
 +      hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
 +      sysfs_attr_init(&hba->spm_lvl_attr.attr);
 +      hba->spm_lvl_attr.attr.name = "spm_lvl";
 +      hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->spm_lvl_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
 +}
 +
 +static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
 +                                enum desc_idn desc_id,
 +                                u8 desc_index,
 +                                u8 param_offset,
 +                                u8 *sysfs_buf,
 +                                u8 param_size)
 +{
 +      u8 desc_buf[8] = {0};
 +      int ret;
 +
 +      if (param_size > 8)
 +              return -EINVAL;
 +
 +      pm_runtime_get_sync(hba->dev);
 +      ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
 +                              param_offset, desc_buf, param_size);
 +      pm_runtime_put_sync(hba->dev);
 +
 +      if (ret)
 +              return -EINVAL;
 +      switch (param_size) {
 +      case 1:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%02X\n", *desc_buf);
 +              break;
 +      case 2:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%04X\n",
 +                      get_unaligned_be16(desc_buf));
 +              break;
 +      case 4:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%08X\n",
 +                      get_unaligned_be32(desc_buf));
 +              break;
 +      case 8:
 +              ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%016llX\n",
 +                      get_unaligned_be64(desc_buf));
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +
 +#define UFS_DESC_PARAM(_name, _puname, _duname, _size)                        \
 +      static ssize_t _name##_show(struct device *dev,                 \
 +              struct device_attribute *attr, char *buf)                       \
 +{                                                                     \
 +      struct ufs_hba *hba = dev_get_drvdata(dev);             \
 +      return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
 +              0, _duname##_DESC_PARAM##_puname, buf, _size);          \
 +}                                                                     \
 +static DEVICE_ATTR_RO(_name)
 +
 +#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size)                   \
 +              UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
 +
 +UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
 +UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
 +UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
 +
 +static struct attribute *ufs_sysfs_health_descriptor[] = {
 +      &dev_attr_eol_info.attr,
 +      &dev_attr_life_time_estimation_a.attr,
 +      &dev_attr_life_time_estimation_b.attr,
 +      NULL,
 +};
 +
 +static const struct attribute_group ufs_sysfs_health_descriptor_group = {
 +      .name = "health_descriptor",
 +      .attrs = ufs_sysfs_health_descriptor,
 +};
 +
 +static const struct attribute_group *ufs_sysfs_groups[] = {
 +      &ufs_sysfs_health_descriptor_group,
 +      NULL,
 +};
 +
 +
 +static void ufshcd_add_desc_sysfs_nodes(struct device *dev)
 +{
 +      int ret;
 +
 +      ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
 +      if (ret)
 +              dev_err(dev,
 +                      "%s: sysfs groups creation failed (err = %d)\n",
 +                      __func__, ret);
 +}
 +
 +static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
 +{
 +      ufshcd_add_rpm_lvl_sysfs_nodes(hba);
 +      ufshcd_add_spm_lvl_sysfs_nodes(hba);
 +      ufshcd_add_desc_sysfs_nodes(hba->dev);
 +}
 +
 +static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
 +{
 +      bool suspend = false;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_scaling.is_allowed) {
 +              hba->clk_scaling.is_allowed = false;
 +              suspend = true;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      /**
 +       * Scaling may be scheduled before, hence make sure it
 +       * doesn't race with shutdown
 +       */
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 +              cancel_work_sync(&hba->clk_scaling.suspend_work);
 +              cancel_work_sync(&hba->clk_scaling.resume_work);
 +              if (suspend)
 +                      ufshcd_suspend_clkscaling(hba);
 +      }
 +
 +      /* Unregister so that devfreq_monitor can't race with shutdown */
 +      if (hba->devfreq)
 +              devfreq_remove_device(hba->devfreq);
 +}
 +
  /**
   * ufshcd_shutdown - shutdown routine
   * @hba: per adapter instance
@@@ -9401,88 -5378,20 +9402,88 @@@ int ufshcd_shutdown(struct ufs_hba *hba
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
  
 -      if (pm_runtime_suspended(hba->dev)) {
 -              ret = ufshcd_runtime_resume(hba);
 -              if (ret)
 -                      goto out;
 -      }
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold_all(hba);
 +      ufshcd_mark_shutdown_ongoing(hba);
 +      ufshcd_shutdown_clkscaling(hba);
 +      /**
 +       * (1) Acquire the lock to stop any more requests
 +       * (2) Wait for all issued requests to complete
 +       */
 +      ufshcd_get_write_lock(hba);
 +      ufshcd_scsi_block_requests(hba);
 +      ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 +      if (ret)
 +              dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
 +                      __func__, ret);
 +      /* Requests may have errored out above, let it be handled */
 +      flush_work(&hba->eh_work);
 +      /* reqs issued from contexts other than shutdown will fail from now */
 +      ufshcd_scsi_unblock_requests(hba);
 +      ufshcd_release_all(hba);
 +      ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
 +out:
 +      if (ret)
 +              dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
 +      /* allow force shutdown even in case of errors */
 +      return 0;
 +}
 +EXPORT_SYMBOL(ufshcd_shutdown);
 +
 +/*
 + * Values permitted 0, 1, 2.
 + * 0 -> Disable IO latency histograms (default)
 + * 1 -> Enable IO latency histograms
 + * 2 -> Zero out IO latency histograms
 + */
 +static ssize_t
 +latency_hist_store(struct device *dev, struct device_attribute *attr,
 +                 const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      long value;
 +
 +      if (kstrtol(buf, 0, &value))
 +              return -EINVAL;
 +      if (value == BLK_IO_LAT_HIST_ZERO) {
 +              memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
 +              memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
 +      } else if (value == BLK_IO_LAT_HIST_ENABLE ||
 +               value == BLK_IO_LAT_HIST_DISABLE)
 +              hba->latency_hist_enabled = value;
 +      return count;
 +}
 +
 +ssize_t
 +latency_hist_show(struct device *dev, struct device_attribute *attr,
 +                char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      size_t written_bytes;
 +
 +      written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
 +                      buf, PAGE_SIZE);
 +      written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
 +                      buf + written_bytes, PAGE_SIZE - written_bytes);
 +
 +      return written_bytes;
 +}
 +
 +static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
 +                 latency_hist_show, latency_hist_store);
 +
 +static void
 +ufshcd_init_latency_hist(struct ufs_hba *hba)
 +{
 +      if (device_create_file(hba->dev, &dev_attr_latency_hist))
 +              dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
 +}
  
 -      ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
 -out:
 -      if (ret)
 -              dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
 -      /* allow force shutdown even in case of errors */
 -      return 0;
 +static void
 +ufshcd_exit_latency_hist(struct ufs_hba *hba)
 +{
 +      device_create_file(hba->dev, &dev_attr_latency_hist);
  }
 -EXPORT_SYMBOL(ufshcd_shutdown);
  
  /**
   * ufshcd_remove - de-allocate SCSI host and host memory space
@@@ -9494,17 -5403,12 +9495,17 @@@ void ufshcd_remove(struct ufs_hba *hba
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
 -      ufshcd_hba_stop(hba);
 +      ufshcd_hba_stop(hba, true);
  
        ufshcd_exit_clk_gating(hba);
 -      if (ufshcd_is_clkscaling_enabled(hba))
 +      ufshcd_exit_hibern8_on_idle(hba);
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 +              ufshcd_exit_latency_hist(hba);
                devfreq_remove_device(hba->devfreq);
 +      }
        ufshcd_hba_exit(hba);
 +      ufsdbg_remove_debugfs(hba);
  }
  EXPORT_SYMBOL_GPL(ufshcd_remove);
  
@@@ -9570,370 -5474,66 +9571,370 @@@ out_error
  }
  EXPORT_SYMBOL(ufshcd_alloc_host);
  
 -static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 +/**
 + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
 + * @hba: per adapter instance
 + * @scale_up: True if scaling up and false if scaling down
 + *
 + * Returns true if scaling is required, false otherwise.
 + */
 +static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
 +                                             bool scale_up)
  {
 -      int ret = 0;
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
  
        if (!head || list_empty(head))
 -              goto out;
 -
 -      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 -      if (ret)
 -              return ret;
 +              return false;
  
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (scale_up && clki->max_freq) {
                                if (clki->curr_freq == clki->max_freq)
                                        continue;
 -                              ret = clk_set_rate(clki->clk, clki->max_freq);
 -                              if (ret) {
 -                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 -                                              __func__, clki->name,
 -                                              clki->max_freq, ret);
 -                                      break;
 -                              }
 -                              clki->curr_freq = clki->max_freq;
 -
 +                              return true;
                        } else if (!scale_up && clki->min_freq) {
                                if (clki->curr_freq == clki->min_freq)
                                        continue;
 -                              ret = clk_set_rate(clki->clk, clki->min_freq);
 -                              if (ret) {
 -                                      dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 -                                              __func__, clki->name,
 -                                              clki->min_freq, ret);
 -                                      break;
 -                              }
 -                              clki->curr_freq = clki->min_freq;
 +                              return true;
                        }
                }
 -              dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 -                              clki->name, clk_get_rate(clki->clk));
        }
  
 -      ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 +      return false;
 +}
 +
 +/**
 + * ufshcd_scale_gear - scale up/down UFS gear
 + * @hba: per adapter instance
 + * @scale_up: True for scaling up gear and false for scaling down
 + *
 + * Returns 0 for success,
 + * Returns -EBUSY if scaling can't happen at this time
 + * Returns non-zero for any other errors
 + */
 +static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +      struct ufs_pa_layer_attr new_pwr_info;
 +      u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
 +
 +      BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
 +
 +      if (scale_up) {
 +              memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
 +                     sizeof(struct ufs_pa_layer_attr));
 +              /*
 +               * Some UFS devices may stop responding after switching from
 +               * HS-G1 to HS-G3. Also, it is found that these devices work
 +               * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
 +               * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
 +               * quirk is enabled for such devices, this 2 steps gear switch
 +               * workaround will be applied.
 +               */
 +              if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
 +                  && (hba->pwr_info.gear_tx == UFS_HS_G1)
 +                  && (new_pwr_info.gear_tx == UFS_HS_G3)) {
 +                      /* scale up to G2 first */
 +                      new_pwr_info.gear_tx = UFS_HS_G2;
 +                      new_pwr_info.gear_rx = UFS_HS_G2;
 +                      ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +                      if (ret)
 +                              goto out;
 +
 +                      /* scale up to G3 now */
 +                      new_pwr_info.gear_tx = UFS_HS_G3;
 +                      new_pwr_info.gear_rx = UFS_HS_G3;
 +                      /* now, fall through to set the HS-G3 */
 +              }
 +              ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +              if (ret)
 +                      goto out;
 +      } else {
 +              memcpy(&new_pwr_info, &hba->pwr_info,
 +                     sizeof(struct ufs_pa_layer_attr));
 +
 +              if (hba->pwr_info.gear_tx > scale_down_gear
 +                  || hba->pwr_info.gear_rx > scale_down_gear) {
 +                      /* save the current power mode */
 +                      memcpy(&hba->clk_scaling.saved_pwr_info.info,
 +                              &hba->pwr_info,
 +                              sizeof(struct ufs_pa_layer_attr));
 +
 +                      /* scale down gear */
 +                      new_pwr_info.gear_tx = scale_down_gear;
 +                      new_pwr_info.gear_rx = scale_down_gear;
 +                      if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
 +                              new_pwr_info.pwr_tx = FASTAUTO_MODE;
 +                              new_pwr_info.pwr_rx = FASTAUTO_MODE;
 +                      }
 +              }
 +              ret = ufshcd_change_power_mode(hba, &new_pwr_info);
 +      }
  
  out:
 +      if (ret)
 +              dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
 +                      __func__, ret,
 +                      hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
 +                      new_pwr_info.gear_tx, new_pwr_info.gear_rx,
 +                      scale_up);
 +
 +      return ret;
 +}
 +
 +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
 +{
 +      #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
 +      int ret = 0;
 +      /*
 +       * make sure that there are no outstanding requests when
 +       * clock scaling is in progress
 +       */
 +      ufshcd_scsi_block_requests(hba);
 +      down_write(&hba->lock);
 +      if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 +              ret = -EBUSY;
 +              up_write(&hba->lock);
 +              ufshcd_scsi_unblock_requests(hba);
 +      }
 +
 +      return ret;
 +}
 +
 +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
 +{
 +      up_write(&hba->lock);
 +      ufshcd_scsi_unblock_requests(hba);
 +}
 +
 +/**
 + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
 + * @hba: per adapter instance
 + * @scale_up: True for scaling up and false for scalin down
 + *
 + * Returns 0 for success,
 + * Returns -EBUSY if scaling can't happen at this time
 + * Returns non-zero for any other errors
 + */
 +static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 +{
 +      int ret = 0;
 +
 +      /* let's not get into low power until clock scaling is completed */
 +      hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 +      ufshcd_hold_all(hba);
 +
 +      ret = ufshcd_clock_scaling_prepare(hba);
 +      if (ret)
 +              goto out;
 +
 +      /* scale down the gear before scaling down clocks */
 +      if (!scale_up) {
 +              ret = ufshcd_scale_gear(hba, false);
 +              if (ret)
 +                      goto clk_scaling_unprepare;
 +      }
 +
 +      /*
 +       * If auto hibern8 is supported then put the link in
 +       * hibern8 manually, this is to avoid auto hibern8
 +       * racing during clock frequency scaling sequence.
 +       */
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              ret = ufshcd_uic_hibern8_enter(hba);
 +              if (ret)
 +                      /* link will be bad state so no need to scale_up_gear */
 +                      return ret;
 +      }
 +
 +      ret = ufshcd_scale_clks(hba, scale_up);
 +      if (ret)
 +              goto scale_up_gear;
 +
 +      if (ufshcd_is_auto_hibern8_supported(hba)) {
 +              ret = ufshcd_uic_hibern8_exit(hba);
 +              if (ret)
 +                      /* link will be bad state so no need to scale_up_gear */
 +                      return ret;
 +      }
 +
 +      /* scale up the gear after scaling up clocks */
 +      if (scale_up) {
 +              ret = ufshcd_scale_gear(hba, true);
 +              if (ret) {
 +                      ufshcd_scale_clks(hba, false);
 +                      goto clk_scaling_unprepare;
 +              }
 +      }
 +
 +      if (!ret) {
 +              hba->clk_scaling.is_scaled_up = scale_up;
 +              if (scale_up)
 +                      hba->clk_gating.delay_ms =
 +                              hba->clk_gating.delay_ms_perf;
 +              else
 +                      hba->clk_gating.delay_ms =
 +                              hba->clk_gating.delay_ms_pwr_save;
 +      }
 +
 +      goto clk_scaling_unprepare;
 +
 +scale_up_gear:
 +      if (!scale_up)
 +              ufshcd_scale_gear(hba, true);
 +clk_scaling_unprepare:
 +      ufshcd_clock_scaling_unprepare(hba);
 +out:
 +      hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
 +      ufshcd_release_all(hba);
        return ret;
  }
  
 +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +
 +      devfreq_suspend_device(hba->devfreq);
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      hba->clk_scaling.window_start_t = 0;
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
 +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool suspend = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (!hba->clk_scaling.is_suspended) {
 +              suspend = true;
 +              hba->clk_scaling.is_suspended = true;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (suspend)
 +              __ufshcd_suspend_clkscaling(hba);
 +}
 +
 +static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +      bool resume = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      if (hba->clk_scaling.is_suspended) {
 +              resume = true;
 +              hba->clk_scaling.is_suspended = false;
 +      }
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +
 +      if (resume)
 +              devfreq_resume_device(hba->devfreq);
 +}
 +
 +static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
 +}
 +
 +static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 +              struct device_attribute *attr, const char *buf, size_t count)
 +{
 +      struct ufs_hba *hba = dev_get_drvdata(dev);
 +      u32 value;
 +      int err;
 +
 +      if (kstrtou32(buf, 0, &value))
 +              return -EINVAL;
 +
 +      value = !!value;
 +      if (value == hba->clk_scaling.is_allowed)
 +              goto out;
 +
 +      pm_runtime_get_sync(hba->dev);
 +      ufshcd_hold(hba, false);
 +
 +      cancel_work_sync(&hba->clk_scaling.suspend_work);
 +      cancel_work_sync(&hba->clk_scaling.resume_work);
 +
 +      hba->clk_scaling.is_allowed = value;
 +
 +      if (value) {
 +              ufshcd_resume_clkscaling(hba);
 +      } else {
 +              ufshcd_suspend_clkscaling(hba);
 +              err = ufshcd_devfreq_scale(hba, true);
 +              if (err)
 +                      dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
 +                                      __func__, err);
 +      }
 +
 +      ufshcd_release(hba, false);
 +      pm_runtime_put_sync(hba->dev);
 +out:
 +      return count;
 +}
 +
 +static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         clk_scaling.suspend_work);
 +      unsigned long irq_flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 +      if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              return;
 +      }
 +      hba->clk_scaling.is_suspended = true;
 +      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +
 +      __ufshcd_suspend_clkscaling(hba);
 +}
 +
 +static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
 +{
 +      struct ufs_hba *hba = container_of(work, struct ufs_hba,
 +                                         clk_scaling.resume_work);
 +      unsigned long irq_flags;
 +
 +      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 +      if (!hba->clk_scaling.is_suspended) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              return;
 +      }
 +      hba->clk_scaling.is_suspended = false;
 +      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +
 +      devfreq_resume_device(hba->devfreq);
 +}
 +
  static int ufshcd_devfreq_target(struct device *dev,
                                unsigned long *freq, u32 flags)
  {
 -      int err = 0;
 +      int ret = 0;
        struct ufs_hba *hba = dev_get_drvdata(dev);
 -      bool release_clk_hold = false;
        unsigned long irq_flags;
 +      ktime_t start;
 +      bool scale_up, sched_clk_scaling_suspend_work = false;
 +
 +      if (!ufshcd_is_clkscaling_supported(hba))
 +              return -EINVAL;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if ((*freq > 0) && (*freq < UINT_MAX)) {
 +              dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
                return -EINVAL;
 +      }
  
        spin_lock_irqsave(hba->host->host_lock, irq_flags);
        if (ufshcd_eh_in_progress(hba)) {
                return 0;
        }
  
 -      if (ufshcd_is_clkgating_allowed(hba) &&
 -          (hba->clk_gating.state != CLKS_ON)) {
 -              if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 -                      /* hold the vote until the scaling work is completed */
 -                      hba->clk_gating.active_reqs++;
 -                      release_clk_hold = true;
 -                      hba->clk_gating.state = CLKS_ON;
 -              } else {
 -                      /*
 -                       * Clock gating work seems to be running in parallel
 -                       * hence skip scaling work to avoid deadlock between
 -                       * current scaling work and gating work.
 -                       */
 -                      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 -                      return 0;
 -              }
 +      if (!hba->clk_scaling.active_reqs)
 +              sched_clk_scaling_suspend_work = true;
 +
 +      scale_up = (*freq == UINT_MAX) ? true : false;
 +      if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
 +              spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +              ret = 0;
 +              goto out; /* no state change required */
        }
        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  
 -      if (*freq == UINT_MAX)
 -              err = ufshcd_scale_clks(hba, true);
 -      else if (*freq == 0)
 -              err = ufshcd_scale_clks(hba, false);
 +      start = ktime_get();
 +      ret = ufshcd_devfreq_scale(hba, scale_up);
 +      trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
 +              (scale_up ? "up" : "down"),
 +              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
  
 -      spin_lock_irqsave(hba->host->host_lock, irq_flags);
 -      if (release_clk_hold)
 -              __ufshcd_release(hba);
 -      spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 +out:
 +      if (sched_clk_scaling_suspend_work)
 +              queue_work(hba->clk_scaling.workq,
 +                         &hba->clk_scaling.suspend_work);
  
 -      return err;
 +      return ret;
  }
  
  static int ufshcd_devfreq_get_dev_status(struct device *dev,
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
        unsigned long flags;
  
 -      if (!ufshcd_is_clkscaling_enabled(hba))
 +      if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
  
        memset(stat, 0, sizeof(*stat));
@@@ -10004,31 -5611,12 +10005,31 @@@ start_window
        return 0;
  }
  
 -static struct devfreq_dev_profile ufs_devfreq_profile = {
 -      .polling_ms     = 100,
 -      .target         = ufshcd_devfreq_target,
 -      .get_dev_status = ufshcd_devfreq_get_dev_status,
 -};
 +static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
 +{
 +      hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
 +      hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
 +      sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
 +      hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
 +      hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
 +      if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
 +              dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
 +}
  
 +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
 +{
 +      struct device *dev = hba->dev;
 +      int ret;
 +
 +      ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
 +              &hba->lanes_per_direction);
 +      if (ret) {
 +              dev_dbg(hba->dev,
 +                      "%s: failed to read lanes-per-direction, ret=%d\n",
 +                      __func__, ret);
 +              hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
 +      }
 +}
  /**
   * ufshcd_init - Driver initialization routine
   * @hba: per-adapter instance
@@@ -10052,8 -5640,6 +10053,8 @@@ int ufshcd_init(struct ufs_hba *hba, vo
        hba->mmio_base = mmio_base;
        hba->irq = irq;
  
 +      ufshcd_init_lanes_per_dir(hba);
 +
        err = ufshcd_hba_init(hba);
        if (err)
                goto out_error;
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
  
 +      /* print error message if ufs_version is not valid */
 +      if ((hba->ufs_version != UFSHCI_VERSION_10) &&
 +          (hba->ufs_version != UFSHCI_VERSION_11) &&
 +          (hba->ufs_version != UFSHCI_VERSION_20) &&
 +          (hba->ufs_version != UFSHCI_VERSION_21))
 +              dev_err(hba->dev, "invalid UFS version 0x%x\n",
 +                      hba->ufs_version);
 +
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
  
 +      /* Enable debug prints */
 +      hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
 +
        err = ufshcd_set_dma_mask(hba);
        if (err) {
                dev_err(hba->dev, "set dma mask failed\n");
        host->max_channel = UFSHCD_MAX_CHANNEL;
        host->unique_id = host->host_no;
        host->max_cmd_len = MAX_CDB_SIZE;
 +      host->set_dbd_for_caching = 1;
  
        hba->max_pwr_info.is_valid = false;
  
        /* Initialize work queues */
        INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 +      INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
  
        /* Initialize UIC command mutex */
        mutex_init(&hba->uic_cmd_mutex);
        /* Initialize mutex for device management commands */
        mutex_init(&hba->dev_cmd.lock);
  
 +      init_rwsem(&hba->lock);
 +
        /* Initialize device management tag acquire wait queue */
        init_waitqueue_head(&hba->dev_cmd.tag_wq);
  
        ufshcd_init_clk_gating(hba);
 +      ufshcd_init_hibern8_on_idle(hba);
 +
 +      /*
 +       * In order to avoid any spurious interrupt immediately after
 +       * registering UFS controller interrupt handler, clear any pending UFS
 +       * interrupt status and disable all the UFS interrupts.
 +       */
 +      ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
 +                    REG_INTERRUPT_STATUS);
 +      ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
 +      /*
 +       * Make sure that UFS interrupts are disabled and any pending interrupt
 +       * status is cleared before registering UFS interrupt handler.
 +       */
 +      mb();
 +
        /* IRQ registration */
        err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
                goto exit_gating;
        }
  
 +      /* Reset controller to power on reset (POR) state */
 +      ufshcd_vops_full_reset(hba);
 +
 +      /* reset connected UFS device */
 +      err = ufshcd_reset_device(hba);
 +      if (err)
 +              dev_warn(hba->dev, "%s: device reset failed. err %d\n",
 +                       __func__, err);
 +
        /* Host controller enable */
        err = ufshcd_hba_enable(hba);
        if (err) {
                dev_err(hba->dev, "Host controller enable failed\n");
 +              ufshcd_print_host_regs(hba);
 +              ufshcd_print_host_state(hba);
                goto out_remove_scsi_host;
        }
  
 -      if (ufshcd_is_clkscaling_enabled(hba)) {
 -              hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
 -                                                 "simple_ondemand", NULL);
 -              if (IS_ERR(hba->devfreq)) {
 -                      dev_err(hba->dev, "Unable to register with devfreq %ld\n",
 -                                      PTR_ERR(hba->devfreq));
 -                      goto out_remove_scsi_host;
 -              }
 -              /* Suspend devfreq until the UFS device is detected */
 -              devfreq_suspend_device(hba->devfreq);
 -              hba->clk_scaling.window_start_t = 0;
 +      if (ufshcd_is_clkscaling_supported(hba)) {
 +              char wq_name[sizeof("ufs_clkscaling_00")];
 +
 +              INIT_WORK(&hba->clk_scaling.suspend_work,
 +                        ufshcd_clk_scaling_suspend_work);
 +              INIT_WORK(&hba->clk_scaling.resume_work,
 +                        ufshcd_clk_scaling_resume_work);
 +
 +              snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
 +                       host->host_no);
 +              hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
 +
 +              ufshcd_clkscaling_init_sysfs(hba);
        }
  
 +      /*
 +       * If rpm_lvl and and spm_lvl are not already set to valid levels,
 +       * set the default power management level for UFS runtime and system
 +       * suspend. Default power saving mode selected is keeping UFS link in
 +       * Hibern8 state and UFS device in sleep.
 +       */
 +      if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
 +              hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                                      UFS_SLEEP_PWR_MODE,
 +                                                      UIC_LINK_HIBERN8_STATE);
 +      if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
 +              hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
 +                                                      UFS_SLEEP_PWR_MODE,
 +                                                      UIC_LINK_HIBERN8_STATE);
 +
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
  
 +      ufshcd_init_latency_hist(hba);
 +
        /*
 -       * The device-initialize-sequence hasn't been invoked yet.
 -       * Set the device to power-off state
 +       * We are assuming that device wasn't put in sleep/power-down
 +       * state exclusively during the boot stage before kernel.
 +       * This assumption helps avoid doing link startup twice during
 +       * ufshcd_probe_hba().
         */
 -      ufshcd_set_ufs_dev_poweroff(hba);
 +      ufshcd_set_ufs_dev_active(hba);
 +
 +      ufshcd_cmd_log_init(hba);
  
        async_schedule(ufshcd_async_scan, hba);
  
 +      ufsdbg_add_debugfs(hba);
 +
 +      ufshcd_add_sysfs_nodes(hba);
 +
        return 0;
  
  out_remove_scsi_host:
        scsi_remove_host(hba->host);
  exit_gating:
        ufshcd_exit_clk_gating(hba);
 +      ufshcd_exit_latency_hist(hba);
  out_disable:
        hba->is_irq_enabled = false;
        ufshcd_hba_exit(hba);
@@@ -101,7 -101,7 +101,7 @@@ static int vnt_int_report_rate(struct v
                else if (context->fb_option == AUTO_FB_1)
                        tx_rate = fallback_rate1[tx_rate][retry];
  
 -              if (info->band == IEEE80211_BAND_5GHZ)
 +              if (info->band == NL80211_BAND_5GHZ)
                        idx = tx_rate - RATE_6M;
                else
                        idx = tx_rate;
  
        info->status.rates[0].count = tx_retry;
  
-       if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) {
+       if (!(tsr & TSR_TMO)) {
                info->status.rates[0].idx = idx;
-               info->flags |= IEEE80211_TX_STAT_ACK;
+               if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+                       info->flags |= IEEE80211_TX_STAT_ACK;
        }
  
        ieee80211_tx_status_irqsafe(priv->hw, context->skb);
@@@ -666,7 -666,7 +666,7 @@@ static int vnt_config(struct ieee80211_
                        (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
                vnt_set_channel(priv, conf->chandef.chan->hw_value);
  
 -              if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
 +              if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
                        bb_type = BB_TYPE_11A;
                else
                        bb_type = BB_TYPE_11G;
@@@ -1002,6 -1002,7 +1002,7 @@@ vt6656_probe(struct usb_interface *intf
        ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
        ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
        ieee80211_hw_set(priv->hw, SUPPORTS_PS);
+       ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
  
        priv->hw->max_signal = 100;
  
@@@ -280,11 -280,9 +280,9 @@@ static u16 vnt_rxtx_datahead_g(struct v
                                                        PK_TYPE_11B, &buf->b);
  
        /* Get Duration and TimeStamp */
-       if (ieee80211_is_pspoll(hdr->frame_control)) {
-               __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-               buf->duration_a = dur;
-               buf->duration_b = dur;
+       if (ieee80211_is_nullfunc(hdr->frame_control)) {
+               buf->duration_a = hdr->duration_id;
+               buf->duration_b = hdr->duration_id;
        } else {
                buf->duration_a = vnt_get_duration_le(priv,
                                                tx_context->pkt_type, need_ack);
@@@ -373,10 -371,8 +371,8 @@@ static u16 vnt_rxtx_datahead_ab(struct 
                          tx_context->pkt_type, &buf->ab);
  
        /* Get Duration and TimeStampOff */
-       if (ieee80211_is_pspoll(hdr->frame_control)) {
-               __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-               buf->duration = dur;
+       if (ieee80211_is_nullfunc(hdr->frame_control)) {
+               buf->duration = hdr->duration_id;
        } else {
                buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type,
                                                    need_ack);
@@@ -812,13 -808,17 +808,17 @@@ int vnt_tx_packet(struct vnt_private *p
        }
  
        if (current_rate > RATE_11M) {
 -              if (info->band == IEEE80211_BAND_5GHZ) {
 +              if (info->band == NL80211_BAND_5GHZ) {
                        pkt_type = PK_TYPE_11A;
                } else {
-                       if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
-                               pkt_type = PK_TYPE_11GB;
-                       else
-                               pkt_type = PK_TYPE_11GA;
+                       if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+                               if (priv->basic_rates & VNT_B_RATES)
+                                       pkt_type = PK_TYPE_11GB;
+                               else
+                                       pkt_type = PK_TYPE_11GA;
+                       } else {
+                               pkt_type = PK_TYPE_11A;
+                       }
                }
        } else {
                pkt_type = PK_TYPE_11B;
diff --combined drivers/usb/dwc3/core.c
@@@ -35,7 -35,6 +35,7 @@@
  #include <linux/of.h>
  #include <linux/acpi.h>
  #include <linux/pinctrl/consumer.h>
 +#include <linux/irq.h>
  
  #include <linux/usb/ch9.h>
  #include <linux/usb/gadget.h>
  
  /* -------------------------------------------------------------------------- */
  
 +void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
 +{
 +      u32                     reg;
 +
 +      reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 +
 +      if (suspend)
 +              reg |= DWC3_GUSB3PIPECTL_SUSPHY;
 +      else
 +              reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
 +
 +      dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 +}
 +
  void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
  {
        u32 reg;
        reg = dwc3_readl(dwc->regs, DWC3_GCTL);
        reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
        reg |= DWC3_GCTL_PRTCAPDIR(mode);
 +      /*
 +       * Set this bit so that device attempts three more times at SS, even
 +       * if it failed previously to operate in SS mode.
 +       */
 +      reg |= DWC3_GCTL_U2RSTECN;
 +      reg &= ~(DWC3_GCTL_SOFITPSYNC);
 +      reg &= ~(DWC3_GCTL_PWRDNSCALEMASK);
 +      reg |= DWC3_GCTL_PWRDNSCALE(2);
 +      reg |= DWC3_GCTL_U2EXIT_LFPS;
        dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 +
 +      if (mode == DWC3_GCTL_PRTCAP_OTG || mode == DWC3_GCTL_PRTCAP_HOST) {
 +              /*
 +               * Allow ITP generated off of ref clk based counter instead
 +               * of UTMI/ULPI clk based counter, when superspeed only is
 +               * active so that UTMI/ULPI PHY can be suspened.
 +               *
 +               * Starting with revision 2.50A, GFLADJ_REFCLK_LPM_SEL is used
 +               * instead.
 +               */
 +              if (dwc->revision < DWC3_REVISION_250A) {
 +                      reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 +                      reg |= DWC3_GCTL_SOFITPSYNC;
 +                      dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 +              } else {
 +                      reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
 +                      reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
 +                      dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
 +              }
 +      }
  }
  
  /**
 - * dwc3_core_soft_reset - Issues core soft reset and PHY reset
 + * Peforms initialization of HS and SS PHYs.
 + * If used as a part of POR or init sequence it is recommended
 + * that we should perform hard reset of the PHYs prior to invoking
 + * this function.
   * @dwc: pointer to our context structure
 - */
 -static int dwc3_core_soft_reset(struct dwc3 *dwc)
 +*/
 +static int dwc3_init_usb_phys(struct dwc3 *dwc)
  {
 -      u32             reg;
        int             ret;
  
 -      /* Before Resetting PHY, put Core in Reset */
 -      reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 -      reg |= DWC3_GCTL_CORESOFTRESET;
 -      dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 +      /* Bring up PHYs */
 +      ret = usb_phy_init(dwc->usb2_phy);
 +      if (ret) {
 +              pr_err("%s: usb_phy_init(dwc->usb2_phy) returned %d\n",
 +                              __func__, ret);
 +              return ret;
 +      }
  
 -      /* Assert USB3 PHY reset */
 -      reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 -      reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
 -      dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 +      if (dwc->maximum_speed == USB_SPEED_HIGH)
 +              goto generic_phy_init;
  
 -      /* Assert USB2 PHY reset */
 -      reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 -      reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
 -      dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 +      ret = usb_phy_init(dwc->usb3_phy);
 +      if (ret == -EBUSY) {
 +              /*
 +               * Setting Max speed as high when USB3 PHY initialiation
 +               * is failing and USB superspeed can't be supported.
 +               */
 +              dwc->maximum_speed = USB_SPEED_HIGH;
 +      } else if (ret) {
 +              pr_err("%s: usb_phy_init(dwc->usb3_phy) returned %d\n",
 +                              __func__, ret);
 +              return ret;
 +      }
  
 -      usb_phy_init(dwc->usb2_phy);
 -      usb_phy_init(dwc->usb3_phy);
 +generic_phy_init:
        ret = phy_init(dwc->usb2_generic_phy);
        if (ret < 0)
                return ret;
                phy_exit(dwc->usb2_generic_phy);
                return ret;
        }
 -      mdelay(100);
  
 -      /* Clear USB3 PHY reset */
 +      return 0;
 +}
 +
 +/**
 + * dwc3_core_reset - Issues core soft reset and PHY reset
 + * @dwc: pointer to our context structure
 + */
 +static int dwc3_core_reset(struct dwc3 *dwc)
 +{
 +      int             ret;
 +      u32     reg;
 +
 +      /* Reset PHYs */
 +      usb_phy_reset(dwc->usb2_phy);
 +
 +      if (dwc->maximum_speed == USB_SPEED_SUPER)
 +              usb_phy_reset(dwc->usb3_phy);
 +
 +      /* Initialize PHYs */
 +      ret = dwc3_init_usb_phys(dwc);
 +      if (ret) {
 +              pr_err("%s: dwc3_init_phys returned %d\n",
 +                              __func__, ret);
 +              return ret;
 +      }
 +
        reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 -      reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
 -      dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 +      reg &= ~DWC3_GUSB3PIPECTL_DELAYP1TRANS;
  
 -      /* Clear USB2 PHY reset */
 -      reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 -      reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
 -      dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 +      /* core exits U1/U2/U3 only in PHY power state P1/P2/P3 respectively */
 +      if (dwc->revision <= DWC3_REVISION_310A)
 +              reg |= DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX;
  
 -      mdelay(100);
 +      dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
  
 -      /* After PHYs are stable we can take Core out of reset state */
 -      reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 -      reg &= ~DWC3_GCTL_CORESOFTRESET;
 -      dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 +      dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT, 0);
 +
 +      dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT, 0);
  
        return 0;
  }
@@@ -265,7 -190,7 +265,7 @@@ static void dwc3_free_one_event_buffer(
   * otherwise ERR_PTR(errno).
   */
  static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
 -              unsigned length)
 +              unsigned length, enum event_buf_type type)
  {
        struct dwc3_event_buffer        *evt;
  
  
        evt->dwc        = dwc;
        evt->length     = length;
 +      evt->type       = type;
        evt->buf        = dma_alloc_coherent(dwc->dev, length,
                        &evt->dma, GFP_KERNEL);
        if (!evt->buf)
@@@ -310,40 -234,26 +310,40 @@@ static void dwc3_free_event_buffers(str
   */
  static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
  {
 -      int                     num;
 -      int                     i;
 +      int     i;
 +      int     j = 0;
  
 -      num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
 -      dwc->num_event_buffers = num;
 +      dwc->num_event_buffers = dwc->num_normal_event_buffers +
 +              dwc->num_gsi_event_buffers;
  
 -      dwc->ev_buffs = devm_kzalloc(dwc->dev, sizeof(*dwc->ev_buffs) * num,
 +      dwc->ev_buffs = devm_kzalloc(dwc->dev,
 +                      sizeof(*dwc->ev_buffs) * dwc->num_event_buffers,
                        GFP_KERNEL);
        if (!dwc->ev_buffs)
                return -ENOMEM;
  
 -      for (i = 0; i < num; i++) {
 +      for (i = 0; i < dwc->num_normal_event_buffers; i++) {
                struct dwc3_event_buffer        *evt;
  
 -              evt = dwc3_alloc_one_event_buffer(dwc, length);
 +              evt = dwc3_alloc_one_event_buffer(dwc, length,
 +                              EVT_BUF_TYPE_NORMAL);
                if (IS_ERR(evt)) {
                        dev_err(dwc->dev, "can't allocate event buffer\n");
                        return PTR_ERR(evt);
                }
 -              dwc->ev_buffs[i] = evt;
 +              dwc->ev_buffs[j++] = evt;
 +      }
 +
 +      for (i = 0; i < dwc->num_gsi_event_buffers; i++) {
 +              struct dwc3_event_buffer        *evt;
 +
 +              evt = dwc3_alloc_one_event_buffer(dwc, length,
 +                              EVT_BUF_TYPE_GSI);
 +              if (IS_ERR(evt)) {
 +                      dev_err(dwc->dev, "can't allocate event buffer\n");
 +                      return PTR_ERR(evt);
 +              }
 +              dwc->ev_buffs[j++] = evt;
        }
  
        return 0;
   *
   * Returns 0 on success otherwise negative errno.
   */
 -static int dwc3_event_buffers_setup(struct dwc3 *dwc)
 +int dwc3_event_buffers_setup(struct dwc3 *dwc)
  {
        struct dwc3_event_buffer        *evt;
        int                             n;
  
        for (n = 0; n < dwc->num_event_buffers; n++) {
                evt = dwc->ev_buffs[n];
 -              dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
 +              dev_dbg(dwc->dev, "Event buf %pK dma %08llx length %d\n",
                                evt->buf, (unsigned long long) evt->dma,
                                evt->length);
  
 +              memset(evt->buf, 0, evt->length);
 +
                evt->lpos = 0;
  
                dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
                                lower_32_bits(evt->dma));
 -              dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
 -                              upper_32_bits(evt->dma));
 -              dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
 -                              DWC3_GEVNTSIZ_SIZE(evt->length));
 +
 +              if (evt->type == EVT_BUF_TYPE_NORMAL) {
 +                      dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
 +                                      upper_32_bits(evt->dma));
 +                      dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
 +                                      DWC3_GEVNTSIZ_SIZE(evt->length));
 +              } else {
 +                      dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
 +                              DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
 +                                      DWC3_GEVENT_TYPE_GSI) |
 +                              DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n));
 +
 +                      dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
 +                              DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
 +                              ((evt->length) & 0xffff));
 +              }
 +
                dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
        }
  
@@@ -634,7 -529,7 +634,7 @@@ static int dwc3_phy_setup(struct dwc3 *
   *
   * Returns 0 on success otherwise negative errno.
   */
 -static int dwc3_core_init(struct dwc3 *dwc)
 +int dwc3_core_init(struct dwc3 *dwc)
  {
        u32                     hwparams4 = dwc->hwparams.hwparams4;
        u32                     reg;
        /* Handle USB2.0-only core configuration */
        if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
                        DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
 -              if (dwc->maximum_speed == USB_SPEED_SUPER)
 -                      dwc->maximum_speed = USB_SPEED_HIGH;
 +              if (dwc->max_hw_supp_speed == USB_SPEED_SUPER) {
 +                      dwc->max_hw_supp_speed = USB_SPEED_HIGH;
 +                      dwc->maximum_speed = dwc->max_hw_supp_speed;
 +              }
        }
  
 -      /* issue device SoftReset too */
 -      ret = dwc3_soft_reset(dwc);
 +      /*
 +       * Workaround for STAR 9000961433 which affects only version
 +       * 3.00a of the DWC_usb3 core. This prevents the controller
 +       * interrupt from being masked while handling events. IMOD
 +       * allows us to work around this issue. Enable it for the
 +       * affected version.
 +       */
 +       if (!dwc->imod_interval && (dwc->revision == DWC3_REVISION_300A))
 +              dwc->imod_interval = 1;
 +
 +      ret = dwc3_core_reset(dwc);
        if (ret)
                goto err0;
  
 -      ret = dwc3_core_soft_reset(dwc);
 +      /* issue device SoftReset too */
 +      ret = dwc3_soft_reset(dwc);
        if (ret)
                goto err0;
  
  
        dwc3_core_num_eps(dwc);
  
 +      /*
 +       * Disable clock gating to work around a known HW bug that causes the
 +       * internal RAM clock to get stuck when entering low power modes.
 +       */
 +      if (dwc->disable_clk_gating) {
 +              dev_dbg(dwc->dev, "Disabling controller clock gating.\n");
 +              reg |= DWC3_GCTL_DSBLCLKGTNG;
 +      }
 +
        dwc3_writel(dwc->regs, DWC3_GCTL, reg);
  
        ret = dwc3_alloc_scratch_buffers(dwc);
        if (ret)
                goto err2;
  
 +      /*
 +       * clear Elastic buffer mode in GUSBPIPE_CTRL(0) register, otherwise
 +       * it results in high link errors and could cause SS mode transfer
 +       * failure.
 +       */
 +      if (!dwc->nominal_elastic_buffer) {
 +              reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 +              reg &= ~DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE;
 +              dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 +      }
 +
        return 0;
  
  err2:
@@@ -880,16 -743,38 +880,16 @@@ static int dwc3_core_get_phy(struct dwc
  static int dwc3_core_init_mode(struct dwc3 *dwc)
  {
        struct device *dev = dwc->dev;
 -      int ret;
  
        switch (dwc->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
                dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
 -              ret = dwc3_gadget_init(dwc);
 -              if (ret) {
 -                      dev_err(dev, "failed to initialize gadget\n");
 -                      return ret;
 -              }
                break;
        case USB_DR_MODE_HOST:
                dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
 -              ret = dwc3_host_init(dwc);
 -              if (ret) {
 -                      dev_err(dev, "failed to initialize host\n");
 -                      return ret;
 -              }
                break;
        case USB_DR_MODE_OTG:
                dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
 -              ret = dwc3_host_init(dwc);
 -              if (ret) {
 -                      dev_err(dev, "failed to initialize host\n");
 -                      return ret;
 -              }
 -
 -              ret = dwc3_gadget_init(dwc);
 -              if (ret) {
 -                      dev_err(dev, "failed to initialize gadget\n");
 -                      return ret;
 -              }
                break;
        default:
                dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@@ -916,109 -801,13 +916,112 @@@ static void dwc3_core_exit_mode(struct 
                /* do nothing */
                break;
        }
+       /* de-assert DRVVBUS for HOST and OTG mode */
+       dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
  }
  
 +/* XHCI reset, resets other CORE registers as well, re-init those */
 +void dwc3_post_host_reset_core_init(struct dwc3 *dwc)
 +{
 +      dwc3_core_init(dwc);
 +      dwc3_gadget_restart(dwc);
 +}
 +
 +static void (*notify_event)(struct dwc3 *, unsigned, unsigned);
 +void dwc3_set_notifier(void (*notify)(struct dwc3 *, unsigned, unsigned))
 +{
 +      notify_event = notify;
 +}
 +EXPORT_SYMBOL(dwc3_set_notifier);
 +
 +int dwc3_notify_event(struct dwc3 *dwc, unsigned event, unsigned value)
 +{
 +      int ret = 0;
 +
 +      if (dwc->notify_event)
 +              dwc->notify_event(dwc, event, value);
 +      else
 +              ret = -ENODEV;
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL(dwc3_notify_event);
 +
 +int dwc3_core_pre_init(struct dwc3 *dwc)
 +{
 +      int ret;
 +
 +      dwc3_cache_hwparams(dwc);
 +
 +      ret = dwc3_phy_setup(dwc);
 +      if (ret)
 +              goto err0;
 +
 +      if (!dwc->ev_buffs) {
 +              ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
 +              if (ret) {
 +                      dev_err(dwc->dev, "failed to allocate event buffers\n");
 +                      ret = -ENOMEM;
 +                      goto err1;
 +              }
 +      }
 +
 +      ret = dwc3_core_init(dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "failed to initialize core\n");
 +              goto err2;
 +      }
 +
 +      ret = phy_power_on(dwc->usb2_generic_phy);
 +      if (ret < 0)
 +              goto err3;
 +
 +      ret = phy_power_on(dwc->usb3_generic_phy);
 +      if (ret < 0)
 +              goto err4;
 +
 +      ret = dwc3_event_buffers_setup(dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "failed to setup event buffers\n");
 +              goto err5;
 +      }
 +
 +      ret = dwc3_core_init_mode(dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "failed to set mode with dwc3 core\n");
 +              goto err6;
 +      }
 +
 +      return ret;
 +
 +err6:
 +      dwc3_event_buffers_cleanup(dwc);
 +err5:
 +      phy_power_off(dwc->usb3_generic_phy);
 +err4:
 +      phy_power_off(dwc->usb2_generic_phy);
 +err3:
 +      dwc3_core_exit(dwc);
 +err2:
 +      dwc3_free_event_buffers(dwc);
 +err1:
 +      dwc3_ulpi_exit(dwc);
 +err0:
 +      return ret;
 +}
 +
  #define DWC3_ALIGN_MASK               (16 - 1)
  
 +/* check whether the core supports IMOD */
 +bool dwc3_has_imod(struct dwc3 *dwc)
 +{
 +      return ((dwc3_is_usb3(dwc) &&
 +              dwc->revision >= DWC3_REVISION_300A) ||
 +              (dwc3_is_usb31(dwc) &&
 +              dwc->revision >= DWC3_USB31_REVISION_120A));
 +}
 +
  static int dwc3_probe(struct platform_device *pdev)
  {
        struct device           *dev = &pdev->dev;
        u8                      tx_de_emphasis;
        u8                      hird_threshold;
        u32                     fladj = 0;
 -
 +      u32                     num_evt_buffs;
 +      int                     irq;
        int                     ret;
  
        void __iomem            *regs;
        dwc->mem = mem;
        dwc->dev = dev;
  
 +      dwc->notify_event = notify_event;
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(dev, "missing IRQ\n");
        dwc->xhci_resources[1].flags = res->flags;
        dwc->xhci_resources[1].name = res->name;
  
 +      irq = platform_get_irq(to_platform_device(dwc->dev), 0);
 +
 +      /* will be enabled in dwc3_msm_resume() */
 +      irq_set_status_flags(irq, IRQ_NOAUTOEN);
 +      ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
 +                      dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
 +                              irq, ret);
 +              return -ENODEV;
 +      }
 +
 +      dwc->irq = irq;
 +
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "missing memory resource\n");
                return -ENODEV;
        }
  
 +      dwc->reg_phys = res->start;
        dwc->xhci_resources[0].start = res->start;
        dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
                                        DWC3_XHCI_REGS_END;
        hird_threshold = 12;
  
        dwc->maximum_speed = usb_get_maximum_speed(dev);
 +      dwc->max_hw_supp_speed = dwc->maximum_speed;
        dwc->dr_mode = usb_get_dr_mode(dev);
  
        dwc->has_lpm_erratum = device_property_read_bool(dev,
        device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
                                 &fladj);
  
 +      dwc->nominal_elastic_buffer = device_property_read_bool(dev,
 +                              "snps,nominal-elastic-buffer");
 +      dwc->usb3_u1u2_disable = device_property_read_bool(dev,
 +                              "snps,usb3-u1u2-disable");
 +      dwc->disable_clk_gating = device_property_read_bool(dev,
 +                              "snps,disable-clk-gating");
 +      dwc->enable_bus_suspend = device_property_read_bool(dev,
 +                              "snps,bus-suspend-enable");
 +
 +      dwc->num_normal_event_buffers = 1;
 +      ret = device_property_read_u32(dev,
 +              "snps,num-normal-evt-buffs", &num_evt_buffs);
 +      if (!ret)
 +              dwc->num_normal_event_buffers = num_evt_buffs;
 +
 +      ret = device_property_read_u32(dev,
 +              "snps,num-gsi-evt-buffs", &dwc->num_gsi_event_buffers);
 +
 +      if (dwc->enable_bus_suspend) {
 +              pm_runtime_set_autosuspend_delay(dev, 500);
 +              pm_runtime_use_autosuspend(dev);
 +      }
 +
        if (pdata) {
                dwc->maximum_speed = pdata->maximum_speed;
 +              dwc->max_hw_supp_speed = dwc->maximum_speed;
                dwc->has_lpm_erratum = pdata->has_lpm_erratum;
                if (pdata->lpm_nyet_threshold)
                        lpm_nyet_threshold = pdata->lpm_nyet_threshold;
  
        /* default to superspeed if no maximum_speed passed */
        if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
 -              dwc->maximum_speed = USB_SPEED_SUPER;
 +              dwc->max_hw_supp_speed = dwc->maximum_speed = USB_SPEED_SUPER;
  
        dwc->lpm_nyet_threshold = lpm_nyet_threshold;
        dwc->tx_de_emphasis = tx_de_emphasis;
        dwc->hird_threshold = hird_threshold
                | (dwc->is_utmi_l1_suspend << 4);
  
 +      init_waitqueue_head(&dwc->wait_linkstate);
        platform_set_drvdata(pdev, dwc);
 -      dwc3_cache_hwparams(dwc);
 -
 -      ret = dwc3_phy_setup(dwc);
 -      if (ret)
 -              goto err0;
 -
        ret = dwc3_core_get_phy(dwc);
        if (ret)
                goto err0;
  
        spin_lock_init(&dwc->lock);
  
 -      if (!dev->dma_mask) {
 -              dev->dma_mask = dev->parent->dma_mask;
 -              dev->dma_parms = dev->parent->dma_parms;
 -              dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
 +      dev->dma_mask   = dev->parent->dma_mask;
 +      dev->dma_parms  = dev->parent->dma_parms;
 +      dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
 +
 +      dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
 +      if (!dwc->dwc_wq) {
 +              pr_err("%s: Unable to create workqueue dwc_wq\n", __func__);
 +              return -ENOMEM;
        }
  
 +      INIT_WORK(&dwc->bh_work, dwc3_bh_work);
 +
 +      pm_runtime_no_callbacks(dev);
 +      pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 -      pm_runtime_get_sync(dev);
        pm_runtime_forbid(dev);
  
 -      ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
 -      if (ret) {
 -              dev_err(dwc->dev, "failed to allocate event buffers\n");
 -              ret = -ENOMEM;
 -              goto err1;
 -      }
 -
        if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
                dwc->dr_mode = USB_DR_MODE_HOST;
        else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
                dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
  
 -      if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
 +      if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) {
                dwc->dr_mode = USB_DR_MODE_OTG;
 -
 -      ret = dwc3_core_init(dwc);
 -      if (ret) {
 -              dev_err(dev, "failed to initialize core\n");
 -              goto err1;
 +              dwc->is_drd = true;
        }
  
        /* Adjust Frame Length */
        dwc3_frame_length_adjustment(dwc, fladj);
  
 -      usb_phy_set_suspend(dwc->usb2_phy, 0);
 -      usb_phy_set_suspend(dwc->usb3_phy, 0);
 -      ret = phy_power_on(dwc->usb2_generic_phy);
 -      if (ret < 0)
 -              goto err2;
 -
 -      ret = phy_power_on(dwc->usb3_generic_phy);
 -      if (ret < 0)
 -              goto err3;
 +      /* Hardcode number of eps */
 +      dwc->num_in_eps = 16;
 +      dwc->num_out_eps = 16;
  
 -      ret = dwc3_event_buffers_setup(dwc);
 -      if (ret) {
 -              dev_err(dwc->dev, "failed to setup event buffers\n");
 -              goto err4;
 +      if (dwc->dr_mode == USB_DR_MODE_OTG ||
 +              dwc->dr_mode == USB_DR_MODE_PERIPHERAL) {
 +              ret = dwc3_gadget_init(dwc);
 +              if (ret) {
 +                      dev_err(dev, "failed to initialize gadget\n");
 +                      goto err0;
 +              }
        }
  
 -      ret = dwc3_core_init_mode(dwc);
 -      if (ret)
 -              goto err5;
 +      if (dwc->dr_mode == USB_DR_MODE_OTG ||
 +              dwc->dr_mode ==  USB_DR_MODE_HOST) {
 +              ret = dwc3_host_init(dwc);
 +              if (ret) {
 +                      dev_err(dev, "failed to initialize host\n");
 +                      goto err_gadget;
 +              }
 +      }
  
        ret = dwc3_debugfs_init(dwc);
        if (ret) {
                dev_err(dev, "failed to initialize debugfs\n");
 -              goto err6;
 +              goto err_host;
        }
  
        pm_runtime_allow(dev);
  
        return 0;
  
 -err6:
 -      dwc3_core_exit_mode(dwc);
 -
 -err5:
 -      dwc3_event_buffers_cleanup(dwc);
 -
 -err4:
 -      phy_power_off(dwc->usb3_generic_phy);
 -
 -err3:
 -      phy_power_off(dwc->usb2_generic_phy);
 -
 -err2:
 -      usb_phy_set_suspend(dwc->usb2_phy, 1);
 -      usb_phy_set_suspend(dwc->usb3_phy, 1);
 -      dwc3_core_exit(dwc);
 -
 -err1:
 -      dwc3_free_event_buffers(dwc);
 -      dwc3_ulpi_exit(dwc);
 -
 +err_host:
 +      if (dwc->dr_mode == USB_DR_MODE_OTG ||
 +              dwc->dr_mode ==  USB_DR_MODE_HOST)
 +              dwc3_host_exit(dwc);
 +err_gadget:
 +      if (dwc->dr_mode == USB_DR_MODE_OTG ||
 +              dwc->dr_mode == USB_DR_MODE_PERIPHERAL)
 +              dwc3_gadget_exit(dwc);
  err0:
        /*
         * restore res->start back to its original value so that, in case the
         * memory region the next time probe is called.
         */
        res->start -= DWC3_GLOBALS_REGS_START;
 +      destroy_workqueue(dwc->dwc_wq);
  
        return ret;
  }
@@@ -1333,14 -1099,14 +1336,14 @@@ static int dwc3_remove(struct platform_
        dwc3_event_buffers_cleanup(dwc);
        dwc3_free_event_buffers(dwc);
  
 -      usb_phy_set_suspend(dwc->usb2_phy, 1);
 -      usb_phy_set_suspend(dwc->usb3_phy, 1);
        phy_power_off(dwc->usb2_generic_phy);
        phy_power_off(dwc->usb3_generic_phy);
  
        dwc3_core_exit(dwc);
        dwc3_ulpi_exit(dwc);
  
 +      destroy_workqueue(dwc->dwc_wq);
 +
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
  
@@@ -1353,10 -1119,6 +1356,10 @@@ static int dwc3_suspend(struct device *
        struct dwc3     *dwc = dev_get_drvdata(dev);
        unsigned long   flags;
  
 +      /* Check if platform glue driver handling PM, if not then handle here */
 +      if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT, 0))
 +              return 0;
 +
        spin_lock_irqsave(&dwc->lock, flags);
  
        switch (dwc->dr_mode) {
@@@ -1389,10 -1151,6 +1392,10 @@@ static int dwc3_resume(struct device *d
        unsigned long   flags;
        int             ret;
  
 +      /* Check if platform glue driver handling PM, if not then handle here */
 +      if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT, 0))
 +              return 0;
 +
        pinctrl_pm_select_default_state(dev);
  
        usb_phy_init(dwc->usb3_phy);
@@@ -1435,26 -1193,8 +1438,26 @@@ err_usb2phy_init
        return ret;
  }
  
 +static int dwc3_pm_restore(struct device *dev)
 +{
 +      /*
 +       * Set the core as runtime active to prevent the runtime
 +       * PM ops being called before the PM restore is completed.
 +       */
 +      pm_runtime_disable(dev);
 +      pm_runtime_set_active(dev);
 +      pm_runtime_enable(dev);
 +
 +      return 0;
 +}
 +
  static const struct dev_pm_ops dwc3_dev_pm_ops = {
 -      SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
 +      .suspend        = dwc3_suspend,
 +      .resume         = dwc3_resume,
 +      .freeze         = dwc3_suspend,
 +      .thaw           = dwc3_pm_restore,
 +      .poweroff       = dwc3_suspend,
 +      .restore        = dwc3_pm_restore,
  };
  
  #define DWC3_PM_OPS   &(dwc3_dev_pm_ops)
@@@ -57,6 -57,7 +57,7 @@@ struct f_ncm 
        struct usb_ep                   *notify;
        struct usb_request              *notify_req;
        u8                              notify_state;
+       atomic_t                        notify_count;
        bool                            is_open;
  
        const struct ndp_parser_opts    *parser_opts;
@@@ -333,77 -334,6 +334,77 @@@ static struct usb_descriptor_header *nc
        NULL,
  };
  
 +/* Super Speed Support */
 +static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
 +      .bLength =              USB_DT_ENDPOINT_SIZE,
 +      .bDescriptorType =      USB_DT_ENDPOINT,
 +      .bEndpointAddress =     USB_DIR_IN,
 +      .bmAttributes =         USB_ENDPOINT_XFER_INT,
 +      .wMaxPacketSize =       cpu_to_le16(NCM_STATUS_BYTECOUNT),
 +      .bInterval =            USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
 +};
 +
 +static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
 +      .bLength =              sizeof(ncm_ss_notify_comp_desc),
 +      .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 +      /* the following 3 values can be tweaked if necessary */
 +      /* .bMaxBurst =         0, */
 +      /* .bmAttributes =      0, */
 +      .wBytesPerInterval =    cpu_to_le16(NCM_STATUS_BYTECOUNT),
 +};
 +
 +static struct usb_endpoint_descriptor ncm_ss_in_desc = {
 +      .bLength =              USB_DT_ENDPOINT_SIZE,
 +      .bDescriptorType =      USB_DT_ENDPOINT,
 +      .bEndpointAddress =     USB_DIR_IN,
 +      .bmAttributes =         USB_ENDPOINT_XFER_BULK,
 +      .wMaxPacketSize =       cpu_to_le16(1024),
 +};
 +
 +static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
 +      .bLength =              sizeof(ncm_ss_in_comp_desc),
 +      .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 +      /* the following 2 values can be tweaked if necessary */
 +      /* .bMaxBurst =         0, */
 +      /* .bmAttributes =      0, */
 +};
 +
 +static struct usb_endpoint_descriptor ncm_ss_out_desc = {
 +      .bLength =              USB_DT_ENDPOINT_SIZE,
 +      .bDescriptorType =      USB_DT_ENDPOINT,
 +      .bEndpointAddress =     USB_DIR_OUT,
 +      .bmAttributes =         USB_ENDPOINT_XFER_BULK,
 +      .wMaxPacketSize =       cpu_to_le16(1024),
 +};
 +
 +static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
 +      .bLength =              sizeof(ncm_ss_out_comp_desc),
 +      .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 +      /* the following 2 values can be tweaked if necessary */
 +      /* .bMaxBurst =         0, */
 +      /* .bmAttributes =      0, */
 +};
 +
 +static struct usb_descriptor_header *ncm_ss_function[] = {
 +      (struct usb_descriptor_header *) &ncm_iad_desc,
 +      /* CDC NCM control descriptors */
 +      (struct usb_descriptor_header *) &ncm_control_intf,
 +      (struct usb_descriptor_header *) &ncm_header_desc,
 +      (struct usb_descriptor_header *) &ncm_union_desc,
 +      (struct usb_descriptor_header *) &ecm_desc,
 +      (struct usb_descriptor_header *) &ncm_desc,
 +      (struct usb_descriptor_header *) &ncm_ss_notify_desc,
 +      (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
 +      /* data interface, altsettings 0 and 1 */
 +      (struct usb_descriptor_header *) &ncm_data_nop_intf,
 +      (struct usb_descriptor_header *) &ncm_data_intf,
 +      (struct usb_descriptor_header *) &ncm_ss_in_desc,
 +      (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
 +      (struct usb_descriptor_header *) &ncm_ss_out_desc,
 +      (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
 +      NULL,
 +};
 +
  /* string descriptors: */
  
  #define STRING_CTRL_IDX       0
@@@ -551,7 -481,7 +552,7 @@@ static void ncm_do_notify(struct f_ncm 
        int                             status;
  
        /* notification already in flight? */
-       if (!req)
+       if (atomic_read(&ncm->notify_count))
                return;
  
        event = req->buf;
        event->bmRequestType = 0xA1;
        event->wIndex = cpu_to_le16(ncm->ctrl_id);
  
-       ncm->notify_req = NULL;
+       atomic_inc(&ncm->notify_count);
        /*
         * In double buffering if there is a space in FIFO,
         * completion callback can be called right after the call,
        status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
        spin_lock(&ncm->lock);
        if (status < 0) {
-               ncm->notify_req = req;
+               atomic_dec(&ncm->notify_count);
                DBG(cdev, "notify --> %d\n", status);
        }
  }
@@@ -636,17 -567,19 +638,19 @@@ static void ncm_notify_complete(struct 
        case 0:
                VDBG(cdev, "Notification %02x sent\n",
                     event->bNotificationType);
+               atomic_dec(&ncm->notify_count);
                break;
        case -ECONNRESET:
        case -ESHUTDOWN:
+               atomic_set(&ncm->notify_count, 0);
                ncm->notify_state = NCM_NOTIFY_NONE;
                break;
        default:
                DBG(cdev, "event %02x --> %d\n",
                        event->bNotificationType, req->status);
+               atomic_dec(&ncm->notify_count);
                break;
        }
-       ncm->notify_req = req;
        ncm_do_notify(ncm);
        spin_unlock(&ncm->lock);
  }
@@@ -1426,39 -1359,17 +1430,39 @@@ static int ncm_bind(struct usb_configur
         */
        if (!ncm_opts->bound) {
                mutex_lock(&ncm_opts->lock);
 +              ncm_opts->net = gether_setup_default();
 +              if (IS_ERR(ncm_opts->net)) {
 +                      status = PTR_ERR(ncm_opts->net);
 +                      mutex_unlock(&ncm_opts->lock);
 +                      goto error;
 +              }
                gether_set_gadget(ncm_opts->net, cdev->gadget);
                status = gether_register_netdev(ncm_opts->net);
                mutex_unlock(&ncm_opts->lock);
 -              if (status)
 -                      return status;
 +              if (status) {
 +                      free_netdev(ncm_opts->net);
 +                      goto error;
 +              }
                ncm_opts->bound = true;
        }
 +
 +      /* export host's Ethernet address in CDC format */
 +      status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
 +                                    sizeof(ncm->ethaddr));
 +      if (status < 12) { /* strlen("01234567890a") */
 +              ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
 +              __func__, status);
 +              status = -EINVAL;
 +              goto netdev_cleanup;
 +      }
 +      ncm->port.ioport = netdev_priv(ncm_opts->net);
 +
        us = usb_gstrings_attach(cdev, ncm_strings,
                                 ARRAY_SIZE(ncm_string_defs));
 -      if (IS_ERR(us))
 -              return PTR_ERR(us);
 +      if (IS_ERR(us)) {
 +              status = PTR_ERR(us);
 +              goto netdev_cleanup;
 +      }
        ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
        ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
        ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
        hs_ncm_notify_desc.bEndpointAddress =
                fs_ncm_notify_desc.bEndpointAddress;
  
 +      if (gadget_is_superspeed(c->cdev->gadget)) {
 +              ncm_ss_in_desc.bEndpointAddress =
 +                                      fs_ncm_in_desc.bEndpointAddress;
 +              ncm_ss_out_desc.bEndpointAddress =
 +                                      fs_ncm_out_desc.bEndpointAddress;
 +              ncm_ss_notify_desc.bEndpointAddress =
 +                                      fs_ncm_notify_desc.bEndpointAddress;
 +      }
 +
        status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
 -                      NULL);
 +                      ncm_ss_function);
        if (status)
                goto fail;
  
@@@ -1562,10 -1464,7 +1566,10 @@@ fail
                kfree(ncm->notify_req->buf);
                usb_ep_free_request(ncm->notify, ncm->notify_req);
        }
 +netdev_cleanup:
 +      gether_cleanup(netdev_priv(ncm_opts->net));
  
 +error:
        ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
  
        return status;
@@@ -1613,6 -1512,8 +1617,6 @@@ static void ncm_free_inst(struct usb_fu
        opts = container_of(f, struct f_ncm_opts, func_inst);
        if (opts->bound)
                gether_cleanup(netdev_priv(opts->net));
 -      else
 -              free_netdev(opts->net);
        kfree(opts);
  }
  
@@@ -1625,6 -1526,12 +1629,6 @@@ static struct usb_function_instance *nc
                return ERR_PTR(-ENOMEM);
        mutex_init(&opts->lock);
        opts->func_inst.free_func_inst = ncm_free_inst;
 -      opts->net = gether_setup_default();
 -      if (IS_ERR(opts->net)) {
 -              struct net_device *net = opts->net;
 -              kfree(opts);
 -              return ERR_CAST(net);
 -      }
  
        config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
  
@@@ -1647,29 -1554,29 +1651,34 @@@ static void ncm_free(struct usb_functio
  static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
  {
        struct f_ncm *ncm = func_to_ncm(f);
 +      struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
 +                                      func_inst);
  
        DBG(c->cdev, "ncm unbind\n");
  
 +      opts->bound = false;
 +
        hrtimer_cancel(&ncm->task_timer);
        tasklet_kill(&ncm->tx_tasklet);
  
        ncm_string_defs[0].id = 0;
        usb_free_all_descriptors(f);
  
+       if (atomic_read(&ncm->notify_count)) {
+               usb_ep_dequeue(ncm->notify, ncm->notify_req);
+               atomic_set(&ncm->notify_count, 0);
+       }
        kfree(ncm->notify_req->buf);
        usb_ep_free_request(ncm->notify, ncm->notify_req);
 +
 +      gether_cleanup(netdev_priv(opts->net));
  }
  
  static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
  {
        struct f_ncm            *ncm;
        struct f_ncm_opts       *opts;
 -      int status;
  
        /* allocate and initialize one new instance */
        ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
        opts = container_of(fi, struct f_ncm_opts, func_inst);
        mutex_lock(&opts->lock);
        opts->refcnt++;
 -
 -      /* export host's Ethernet address in CDC format */
 -      status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
 -                                    sizeof(ncm->ethaddr));
 -      if (status < 12) { /* strlen("01234567890a") */
 -              kfree(ncm);
 -              mutex_unlock(&opts->lock);
 -              return ERR_PTR(-EINVAL);
 -      }
        ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
 -
        spin_lock_init(&ncm->lock);
        ncm_reset_values(ncm);
 -      ncm->port.ioport = netdev_priv(opts->net);
        mutex_unlock(&opts->lock);
        ncm->port.is_fixed = true;
        ncm->port.supports_multi_frame = true;
diff --combined fs/btrfs/extent_io.c
@@@ -3951,8 -3951,8 +3951,8 @@@ retry
        if (wbc->sync_mode == WB_SYNC_ALL)
                tag_pages_for_writeback(mapping, index, end);
        while (!done && !nr_to_write_done && (index <= end) &&
 -             (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
 -                      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
 +             (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
 +                      tag))) {
                unsigned i;
  
                scanned = 1;
                        if (!PagePrivate(page))
                                continue;
  
 -                      if (!wbc->range_cyclic && page->index > end) {
 -                              done = 1;
 -                              break;
 -                      }
 -
                        spin_lock(&mapping->private_lock);
                        if (!PagePrivate(page)) {
                                spin_unlock(&mapping->private_lock);
@@@ -4090,8 -4095,8 +4090,8 @@@ retry
        if (wbc->sync_mode == WB_SYNC_ALL)
                tag_pages_for_writeback(mapping, index, end);
        while (!done && !nr_to_write_done && (index <= end) &&
 -             (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
 -                      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
 +                      (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
 +                                              &index, end, tag))) {
                unsigned i;
  
                scanned = 1;
                                continue;
                        }
  
 -                      if (!wbc->range_cyclic && page->index > end) {
 -                              done = 1;
 -                              unlock_page(page);
 -                              continue;
 -                      }
 -
                        if (wbc->sync_mode != WB_SYNC_NONE) {
                                if (PageWriteback(page))
                                        flush_fn(data);
                 */
                scanned = 1;
                index = 0;
+               /*
+                * If we're looping we could run into a page that is locked by a
+                * writer and that writer could be waiting on writeback for a
+                * page in our current bio, and thus deadlock, so flush the
+                * write bio here.
+                */
+               flush_write_bio(data);
                goto retry;
        }
        btrfs_add_delayed_iput(inode);
diff --combined fs/ext2/super.c
@@@ -131,10 -131,7 +131,10 @@@ static void ext2_put_super (struct supe
  
        dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  
 -      ext2_xattr_put_super(sb);
 +      if (sbi->s_mb_cache) {
 +              ext2_xattr_destroy_cache(sbi->s_mb_cache);
 +              sbi->s_mb_cache = NULL;
 +      }
        if (!(sb->s_flags & MS_RDONLY)) {
                struct ext2_super_block *es = sbi->s_es;
  
@@@ -1054,9 -1051,9 +1054,9 @@@ static int ext2_fill_super(struct super
  
        if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
                goto cantfind_ext2;
-       sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
-                               le32_to_cpu(es->s_first_data_block) - 1)
-                                       / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+       sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
+                               le32_to_cpu(es->s_first_data_block) - 1)
+                                       / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
        db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
                   EXT2_DESC_PER_BLOCK(sb);
        sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
                ext2_msg(sb, KERN_ERR, "error: insufficient memory");
                goto failed_mount3;
        }
 +
 +#ifdef CONFIG_EXT2_FS_XATTR
 +      sbi->s_mb_cache = ext2_xattr_create_cache();
 +      if (!sbi->s_mb_cache) {
 +              ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache");
 +              goto failed_mount3;
 +      }
 +#endif
        /*
         * set up enough so that it can read an inode
         */
@@@ -1171,8 -1160,6 +1171,8 @@@ cantfind_ext2
                        sb->s_id);
        goto failed_mount;
  failed_mount3:
 +      if (sbi->s_mb_cache)
 +              ext2_xattr_destroy_cache(sbi->s_mb_cache);
        percpu_counter_destroy(&sbi->s_freeblocks_counter);
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
@@@ -1579,17 -1566,20 +1579,17 @@@ MODULE_ALIAS_FS("ext2")
  
  static int __init init_ext2_fs(void)
  {
 -      int err = init_ext2_xattr();
 -      if (err)
 -              return err;
 +      int err;
 +
        err = init_inodecache();
        if (err)
 -              goto out1;
 +              return err;
          err = register_filesystem(&ext2_fs_type);
        if (err)
                goto out;
        return 0;
  out:
        destroy_inodecache();
 -out1:
 -      exit_ext2_xattr();
        return err;
  }
  
@@@ -1597,6 -1587,7 +1597,6 @@@ static void __exit exit_ext2_fs(void
  {
        unregister_filesystem(&ext2_fs_type);
        destroy_inodecache();
 -      exit_ext2_xattr();
  }
  
  MODULE_AUTHOR("Remy Card and others");
diff --combined fs/namei.c
@@@ -40,9 -40,6 +40,9 @@@
  #include "internal.h"
  #include "mount.h"
  
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/namei.h>
 +
  /* [Feb-1997 T. Schoebel-Theuer]
   * Fundamental changes in the pathname lookup mechanisms (namei)
   * were necessary because of omirr.  The reason is that omirr needs
@@@ -377,11 -374,9 +377,11 @@@ EXPORT_SYMBOL(generic_permission)
   * flag in inode->i_opflags, that says "this has not special
   * permission function, use the fast case".
   */
 -static inline int do_inode_permission(struct inode *inode, int mask)
 +static inline int do_inode_permission(struct vfsmount *mnt, struct inode *inode, int mask)
  {
        if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
 +              if (likely(mnt && inode->i_op->permission2))
 +                      return inode->i_op->permission2(mnt, inode, mask);
                if (likely(inode->i_op->permission))
                        return inode->i_op->permission(inode, mask);
  
   * This does not check for a read-only file system.  You probably want
   * inode_permission().
   */
 -int __inode_permission(struct inode *inode, int mask)
 +int __inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
  {
        int retval;
  
                        return -EACCES;
        }
  
 -      retval = do_inode_permission(inode, mask);
 +      retval = do_inode_permission(mnt, inode, mask);
        if (retval)
                return retval;
  
        if (retval)
                return retval;
  
 -      return security_inode_permission(inode, mask);
 +      retval = security_inode_permission(inode, mask);
 +      return retval;
 +}
 +EXPORT_SYMBOL(__inode_permission2);
 +
 +int __inode_permission(struct inode *inode, int mask)
 +{
 +      return __inode_permission2(NULL, inode, mask);
  }
  EXPORT_SYMBOL(__inode_permission);
  
@@@ -468,20 -456,14 +468,20 @@@ static int sb_permission(struct super_b
   *
   * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
   */
 -int inode_permission(struct inode *inode, int mask)
 +int inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
  {
        int retval;
  
        retval = sb_permission(inode->i_sb, inode, mask);
        if (retval)
                return retval;
 -      return __inode_permission(inode, mask);
 +      return __inode_permission2(mnt, inode, mask);
 +}
 +EXPORT_SYMBOL(inode_permission2);
 +
 +int inode_permission(struct inode *inode, int mask)
 +{
 +      return inode_permission2(NULL, inode, mask);
  }
  EXPORT_SYMBOL(inode_permission);
  
@@@ -787,81 -769,6 +787,81 @@@ static inline int d_revalidate(struct d
        return dentry->d_op->d_revalidate(dentry, flags);
  }
  
 +#define INIT_PATH_SIZE 64
 +
 +static void success_walk_trace(struct nameidata *nd)
 +{
 +      struct path *pt = &nd->path;
 +      struct inode *i = nd->inode;
 +      char buf[INIT_PATH_SIZE], *try_buf;
 +      int cur_path_size;
 +      char *p;
 +
 +      /* When eBPF/ tracepoint is disabled, keep overhead low. */
 +      if (!trace_inodepath_enabled())
 +              return;
 +
 +      /* First try stack allocated buffer. */
 +      try_buf = buf;
 +      cur_path_size = INIT_PATH_SIZE;
 +
 +      while (cur_path_size <= PATH_MAX) {
 +              /* Free previous heap allocation if we are now trying
 +               * a second or later heap allocation.
 +               */
 +              if (try_buf != buf)
 +                      kfree(try_buf);
 +
 +              /* All but the first alloc are on the heap. */
 +              if (cur_path_size != INIT_PATH_SIZE) {
 +                      try_buf = kmalloc(cur_path_size, GFP_KERNEL);
 +                      if (!try_buf) {
 +                              try_buf = buf;
 +                              sprintf(try_buf, "error:buf_alloc_failed");
 +                              break;
 +                      }
 +              }
 +
 +              p = d_path(pt, try_buf, cur_path_size);
 +
 +              if (!IS_ERR(p)) {
 +                      char *end = mangle_path(try_buf, p, "\n");
 +
 +                      if (end) {
 +                              try_buf[end - try_buf] = 0;
 +                              break;
 +                      } else {
 +                              /* On mangle errors, double path size
 +                               * till PATH_MAX.
 +                               */
 +                              cur_path_size = cur_path_size << 1;
 +                              continue;
 +                      }
 +              }
 +
 +              if (PTR_ERR(p) == -ENAMETOOLONG) {
 +                      /* If d_path complains that name is too long,
 +                       * then double path size till PATH_MAX.
 +                       */
 +                      cur_path_size = cur_path_size << 1;
 +                      continue;
 +              }
 +
 +              sprintf(try_buf, "error:d_path_failed_%lu",
 +                      -1 * PTR_ERR(p));
 +              break;
 +      }
 +
 +      if (cur_path_size > PATH_MAX)
 +              sprintf(try_buf, "error:d_path_name_too_long");
 +
 +      trace_inodepath(i, try_buf);
 +
 +      if (try_buf != buf)
 +              kfree(try_buf);
 +      return;
 +}
 +
  /**
   * complete_walk - successful completion of path walk
   * @nd:  pointer nameidata
@@@ -884,21 -791,15 +884,21 @@@ static int complete_walk(struct nameida
                        return -ECHILD;
        }
  
 -      if (likely(!(nd->flags & LOOKUP_JUMPED)))
 +      if (likely(!(nd->flags & LOOKUP_JUMPED))) {
 +              success_walk_trace(nd);
                return 0;
 +      }
  
 -      if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
 +      if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) {
 +              success_walk_trace(nd);
                return 0;
 +      }
  
        status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
 -      if (status > 0)
 +      if (status > 0) {
 +              success_walk_trace(nd);
                return 0;
 +      }
  
        if (!status)
                status = -ESTALE;
@@@ -1788,13 -1689,13 +1788,13 @@@ static int lookup_slow(struct nameidat
  static inline int may_lookup(struct nameidata *nd)
  {
        if (nd->flags & LOOKUP_RCU) {
 -              int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
 +              int err = inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
                if (err != -ECHILD)
                        return err;
                if (unlazy_walk(nd, NULL, 0))
                        return -ECHILD;
        }
 -      return inode_permission(nd->inode, MAY_EXEC);
 +      return inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC);
  }
  
  static inline int handle_dots(struct nameidata *nd, int type)
@@@ -2151,12 -2052,11 +2151,12 @@@ static const char *path_init(struct nam
        nd->depth = 0;
        if (flags & LOOKUP_ROOT) {
                struct dentry *root = nd->root.dentry;
 +              struct vfsmount *mnt = nd->root.mnt;
                struct inode *inode = root->d_inode;
                if (*s) {
                        if (!d_can_lookup(root))
                                return ERR_PTR(-ENOTDIR);
 -                      retval = inode_permission(inode, MAY_EXEC);
 +                      retval = inode_permission2(mnt, inode, MAY_EXEC);
                        if (retval)
                                return ERR_PTR(retval);
                }
@@@ -2290,7 -2190,6 +2290,7 @@@ static int path_lookupat(struct nameida
        if (!err && nd->flags & LOOKUP_DIRECTORY)
                if (!d_can_lookup(nd->path.dentry))
                        err = -ENOTDIR;
 +
        if (!err) {
                *path = nd->path;
                nd->path.mnt = NULL;
@@@ -2428,14 -2327,13 +2428,14 @@@ EXPORT_SYMBOL(vfs_path_lookup)
  /**
   * lookup_one_len - filesystem helper to lookup single pathname component
   * @name:     pathname component to lookup
 + * @mnt:      mount we are looking up on
   * @base:     base directory to lookup from
   * @len:      maximum length @len should be interpreted to
   *
   * Note that this routine is purely a helper for filesystem usage and should
   * not be called by generic code.
   */
 -struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 +struct dentry *lookup_one_len2(const char *name, struct vfsmount *mnt, struct dentry *base, int len)
  {
        struct qstr this;
        unsigned int c;
                        return ERR_PTR(err);
        }
  
 -      err = inode_permission(base->d_inode, MAY_EXEC);
 +      err = inode_permission2(mnt, base->d_inode, MAY_EXEC);
        if (err)
                return ERR_PTR(err);
  
        return __lookup_hash(&this, base, 0);
  }
 +EXPORT_SYMBOL(lookup_one_len2);
 +
 +struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 +{
 +      return lookup_one_len2(name, NULL, base, len);
 +}
  EXPORT_SYMBOL(lookup_one_len);
  
  int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
@@@ -2707,7 -2599,7 +2707,7 @@@ EXPORT_SYMBOL(__check_sticky)
   * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
   *     nfs_async_unlink().
   */
 -static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
 +static int may_delete(struct vfsmount *mnt, struct inode *dir, struct dentry *victim, bool isdir)
  {
        struct inode *inode = d_backing_inode(victim);
        int error;
        BUG_ON(victim->d_parent->d_inode != dir);
        audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
  
 -      error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
 +      error = inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
        if (error)
                return error;
        if (IS_APPEND(dir))
   *  3. We should have write and exec permissions on dir
   *  4. We can't do it if dir is immutable (done in permission())
   */
 -static inline int may_create(struct inode *dir, struct dentry *child)
 +static inline int may_create(struct vfsmount *mnt, struct inode *dir, struct dentry *child)
  {
        audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
        if (child->d_inode)
                return -EEXIST;
        if (IS_DEADDIR(dir))
                return -ENOENT;
 -      return inode_permission(dir, MAY_WRITE | MAY_EXEC);
 +      return inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
  }
  
  /*
@@@ -2804,10 -2696,10 +2804,10 @@@ void unlock_rename(struct dentry *p1, s
  }
  EXPORT_SYMBOL(unlock_rename);
  
 -int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 -              bool want_excl)
 +int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
 +              umode_t mode, bool want_excl)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(mnt, dir, dentry);
        if (error)
                return error;
  
        if (error)
                return error;
        error = dir->i_op->create(dir, dentry, mode, want_excl);
 +      if (error)
 +              return error;
 +      error = security_inode_post_create(dir, dentry, mode);
 +      if (error)
 +              return error;
        if (!error)
                fsnotify_create(dir, dentry);
 +
        return error;
  }
 +EXPORT_SYMBOL(vfs_create2);
 +
 +int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 +              bool want_excl)
 +{
 +      return vfs_create2(NULL, dir, dentry, mode, want_excl);
 +}
  EXPORT_SYMBOL(vfs_create);
  
  static int may_open(struct path *path, int acc_mode, int flag)
  {
        struct dentry *dentry = path->dentry;
 +      struct vfsmount *mnt = path->mnt;
        struct inode *inode = dentry->d_inode;
        int error;
  
                break;
        }
  
 -      error = inode_permission(inode, acc_mode);
 +      error = inode_permission2(mnt, inode, acc_mode);
        if (error)
                return error;
  
@@@ -2905,7 -2783,7 +2905,7 @@@ static int handle_truncate(struct file 
        if (!error)
                error = security_path_truncate(path);
        if (!error) {
 -              error = do_truncate(path->dentry, 0,
 +              error = do_truncate2(path->mnt, path->dentry, 0,
                                    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
                                    filp);
        }
@@@ -2926,7 -2804,7 +2926,7 @@@ static int may_o_create(struct path *di
        if (error)
                return error;
  
 -      error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
 +      error = inode_permission2(dir->mnt, dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
        if (error)
                return error;
  
@@@ -3112,7 -2990,6 +3112,7 @@@ static int lookup_open(struct nameidat
                        bool got_write, int *opened)
  {
        struct dentry *dir = nd->path.dentry;
 +      struct vfsmount *mnt = nd->path.mnt;
        struct inode *dir_inode = dir->d_inode;
        struct dentry *dentry;
        int error;
                error = security_path_mknod(&nd->path, dentry, mode, 0);
                if (error)
                        goto out_dput;
 -              error = vfs_create(dir->d_inode, dentry, mode,
 +              error = vfs_create2(mnt, dir->d_inode, dentry, mode,
                                   nd->flags & LOOKUP_EXCL);
                if (error)
                        goto out_dput;
@@@ -3183,8 -3060,8 +3183,8 @@@ static int do_last(struct nameidata *nd
                   int *opened)
  {
        struct dentry *dir = nd->path.dentry;
-       kuid_t dir_uid = dir->d_inode->i_uid;
-       umode_t dir_mode = dir->d_inode->i_mode;
+       kuid_t dir_uid = nd->inode->i_uid;
+       umode_t dir_mode = nd->inode->i_mode;
        int open_flag = op->open_flag;
        bool will_truncate = (open_flag & O_TRUNC) != 0;
        bool got_write = false;
@@@ -3430,7 -3307,7 +3430,7 @@@ static int do_tmpfile(struct nameidata 
                goto out;
        dir = path.dentry->d_inode;
        /* we want directory to be writable */
 -      error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
 +      error = inode_permission2(path.mnt, dir, MAY_WRITE | MAY_EXEC);
        if (error)
                goto out2;
        if (!dir->i_op->tmpfile) {
@@@ -3519,8 -3396,6 +3519,8 @@@ out2
                                error = -ESTALE;
                }
                file = ERR_PTR(error);
 +      } else {
 +              global_filetable_add(file);
        }
        return file;
  }
@@@ -3666,9 -3541,9 +3666,9 @@@ inline struct dentry *user_path_create(
  }
  EXPORT_SYMBOL(user_path_create);
  
 -int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 +int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(mnt, dir, dentry);
  
        if (error)
                return error;
                return error;
  
        error = dir->i_op->mknod(dir, dentry, mode, dev);
 +      if (error)
 +              return error;
 +
 +      error = security_inode_post_create(dir, dentry, mode);
 +      if (error)
 +              return error;
 +
        if (!error)
                fsnotify_create(dir, dentry);
 +
        return error;
  }
 +EXPORT_SYMBOL(vfs_mknod2);
 +
 +int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 +{
 +      return vfs_mknod2(NULL, dir, dentry, mode, dev);
 +}
  EXPORT_SYMBOL(vfs_mknod);
  
  static int may_mknod(umode_t mode)
@@@ -3748,10 -3609,10 +3748,10 @@@ retry
                goto out;
        switch (mode & S_IFMT) {
                case 0: case S_IFREG:
 -                      error = vfs_create(path.dentry->d_inode,dentry,mode,true);
 +                      error = vfs_create2(path.mnt, path.dentry->d_inode,dentry,mode,true);
                        break;
                case S_IFCHR: case S_IFBLK:
 -                      error = vfs_mknod(path.dentry->d_inode,dentry,mode,
 +                      error = vfs_mknod2(path.mnt, path.dentry->d_inode,dentry,mode,
                                        new_decode_dev(dev));
                        break;
                case S_IFIFO: case S_IFSOCK:
@@@ -3772,9 -3633,9 +3772,9 @@@ SYSCALL_DEFINE3(mknod, const char __use
        return sys_mknodat(AT_FDCWD, filename, mode, dev);
  }
  
 -int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 +int vfs_mkdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(mnt, dir, dentry);
        unsigned max_links = dir->i_sb->s_max_links;
  
        if (error)
                fsnotify_mkdir(dir, dentry);
        return error;
  }
 +EXPORT_SYMBOL(vfs_mkdir2);
 +
 +int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 +{
 +      return vfs_mkdir2(NULL, dir, dentry, mode);
 +}
  EXPORT_SYMBOL(vfs_mkdir);
  
  SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
@@@ -3820,7 -3675,7 +3820,7 @@@ retry
                mode &= ~current_umask();
        error = security_path_mkdir(&path, dentry, mode);
        if (!error)
 -              error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
 +              error = vfs_mkdir2(path.mnt, path.dentry->d_inode, dentry, mode);
        done_path_create(&path, dentry);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
@@@ -3859,9 -3714,9 +3859,9 @@@ void dentry_unhash(struct dentry *dentr
  }
  EXPORT_SYMBOL(dentry_unhash);
  
 -int vfs_rmdir(struct inode *dir, struct dentry *dentry)
 +int vfs_rmdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry)
  {
 -      int error = may_delete(dir, dentry, 1);
 +      int error = may_delete(mnt, dir, dentry, 1);
  
        if (error)
                return error;
                d_delete(dentry);
        return error;
  }
 +EXPORT_SYMBOL(vfs_rmdir2);
 +
 +int vfs_rmdir(struct inode *dir, struct dentry *dentry)
 +{
 +      return vfs_rmdir2(NULL, dir, dentry);
 +}
  EXPORT_SYMBOL(vfs_rmdir);
  
  static long do_rmdir(int dfd, const char __user *pathname)
@@@ -3947,7 -3796,7 +3947,7 @@@ retry
        error = security_path_rmdir(&path, dentry);
        if (error)
                goto exit3;
 -      error = vfs_rmdir(path.dentry->d_inode, dentry);
 +      error = vfs_rmdir2(path.mnt, path.dentry->d_inode, dentry);
  exit3:
        dput(dentry);
  exit2:
@@@ -3986,10 -3835,10 +3986,10 @@@ SYSCALL_DEFINE1(rmdir, const char __use
   * be appropriate for callers that expect the underlying filesystem not
   * to be NFS exported.
   */
 -int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
 +int vfs_unlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
  {
        struct inode *target = dentry->d_inode;
 -      int error = may_delete(dir, dentry, 0);
 +      int error = may_delete(mnt, dir, dentry, 0);
  
        if (error)
                return error;
  
        return error;
  }
 +EXPORT_SYMBOL(vfs_unlink2);
 +
 +int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
 +{
 +      return vfs_unlink2(NULL, dir, dentry, delegated_inode);
 +}
  EXPORT_SYMBOL(vfs_unlink);
  
  /*
@@@ -4077,7 -3920,7 +4077,7 @@@ retry_deleg
                error = security_path_unlink(&path, dentry);
                if (error)
                        goto exit2;
 -              error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
 +              error = vfs_unlink2(path.mnt, path.dentry->d_inode, dentry, &delegated_inode);
  exit2:
                dput(dentry);
        }
@@@ -4127,9 -3970,9 +4127,9 @@@ SYSCALL_DEFINE1(unlink, const char __us
        return do_unlinkat(AT_FDCWD, pathname);
  }
  
 -int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
 +int vfs_symlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, const char *oldname)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(mnt, dir, dentry);
  
        if (error)
                return error;
                fsnotify_create(dir, dentry);
        return error;
  }
 +EXPORT_SYMBOL(vfs_symlink2);
 +
 +int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
 +{
 +      return vfs_symlink2(NULL, dir, dentry, oldname);
 +}
  EXPORT_SYMBOL(vfs_symlink);
  
  SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
@@@ -4174,7 -4011,7 +4174,7 @@@ retry
  
        error = security_path_symlink(&path, dentry, from->name);
        if (!error)
 -              error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
 +              error = vfs_symlink2(path.mnt, path.dentry->d_inode, dentry, from->name);
        done_path_create(&path, dentry);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
@@@ -4209,7 -4046,7 +4209,7 @@@ SYSCALL_DEFINE2(symlink, const char __u
   * be appropriate for callers that expect the underlying filesystem not
   * to be NFS exported.
   */
 -int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
 +int vfs_link2(struct vfsmount *mnt, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
  {
        struct inode *inode = old_dentry->d_inode;
        unsigned max_links = dir->i_sb->s_max_links;
        if (!inode)
                return -ENOENT;
  
 -      error = may_create(dir, new_dentry);
 +      error = may_create(mnt, dir, new_dentry);
        if (error)
                return error;
  
                fsnotify_link(dir, inode, new_dentry);
        return error;
  }
 +EXPORT_SYMBOL(vfs_link2);
 +
 +int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
 +{
 +      return vfs_link2(NULL, old_dentry, dir, new_dentry, delegated_inode);
 +}
  EXPORT_SYMBOL(vfs_link);
  
  /*
@@@ -4322,7 -4153,7 +4322,7 @@@ retry
        error = security_path_link(old_path.dentry, &new_path, new_dentry);
        if (error)
                goto out_dput;
 -      error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
 +      error = vfs_link2(old_path.mnt, old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
  out_dput:
        done_path_create(&new_path, new_dentry);
        if (delegated_inode) {
@@@ -4397,8 -4228,7 +4397,8 @@@ SYSCALL_DEFINE2(link, const char __use
   *       ->i_mutex on parents, which works but leads to some truly excessive
   *       locking].
   */
 -int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 +int vfs_rename2(struct vfsmount *mnt,
 +             struct inode *old_dir, struct dentry *old_dentry,
               struct inode *new_dir, struct dentry *new_dentry,
               struct inode **delegated_inode, unsigned int flags)
  {
        if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
                return 0;
  
 -      error = may_delete(old_dir, old_dentry, is_dir);
 +      error = may_delete(mnt, old_dir, old_dentry, is_dir);
        if (error)
                return error;
  
        if (!target) {
 -              error = may_create(new_dir, new_dentry);
 +              error = may_create(mnt, new_dir, new_dentry);
        } else {
                new_is_dir = d_is_dir(new_dentry);
  
                if (!(flags & RENAME_EXCHANGE))
 -                      error = may_delete(new_dir, new_dentry, is_dir);
 +                      error = may_delete(mnt, new_dir, new_dentry, is_dir);
                else
 -                      error = may_delete(new_dir, new_dentry, new_is_dir);
 +                      error = may_delete(mnt, new_dir, new_dentry, new_is_dir);
        }
        if (error)
                return error;
         */
        if (new_dir != old_dir) {
                if (is_dir) {
 -                      error = inode_permission(source, MAY_WRITE);
 +                      error = inode_permission2(mnt, source, MAY_WRITE);
                        if (error)
                                return error;
                }
                if ((flags & RENAME_EXCHANGE) && new_is_dir) {
 -                      error = inode_permission(target, MAY_WRITE);
 +                      error = inode_permission2(mnt, target, MAY_WRITE);
                        if (error)
                                return error;
                }
  
        return error;
  }
 +EXPORT_SYMBOL(vfs_rename2);
 +
 +int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 +             struct inode *new_dir, struct dentry *new_dentry,
 +             struct inode **delegated_inode, unsigned int flags)
 +{
 +      return vfs_rename2(NULL, old_dir, old_dentry, new_dir, new_dentry, delegated_inode, flags);
 +}
  EXPORT_SYMBOL(vfs_rename);
  
  SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
@@@ -4655,7 -4477,7 +4655,7 @@@ retry_deleg
                                     &new_path, new_dentry, flags);
        if (error)
                goto exit5;
 -      error = vfs_rename(old_path.dentry->d_inode, old_dentry,
 +      error = vfs_rename2(old_path.mnt, old_path.dentry->d_inode, old_dentry,
                           new_path.dentry->d_inode, new_dentry,
                           &delegated_inode, flags);
  exit5:
@@@ -4700,7 -4522,7 +4700,7 @@@ SYSCALL_DEFINE2(rename, const char __us
  
  int vfs_whiteout(struct inode *dir, struct dentry *dentry)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(NULL, dir, dentry);
        if (error)
                return error;
  
diff --combined kernel/events/core.c
@@@ -158,7 -158,6 +158,7 @@@ enum event_type_t 
  struct static_key_deferred perf_sched_events __read_mostly;
  static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
  static DEFINE_PER_CPU(int, perf_sched_cb_usages);
 +static DEFINE_PER_CPU(bool, is_idle);
  
  static atomic_t nr_mmap_events __read_mostly;
  static atomic_t nr_comm_events __read_mostly;
@@@ -176,15 -175,8 +176,15 @@@ static struct srcu_struct pmus_srcu
   *   0 - disallow raw tracepoint access for unpriv
   *   1 - disallow cpu events for unpriv
   *   2 - disallow kernel profiling for unpriv
 + *   3 - disallow all unpriv perf event use
   */
 +#ifdef CONFIG_PERF_EVENTS_USERMODE
 +int sysctl_perf_event_paranoid __read_mostly = -1;
 +#elif defined CONFIG_SECURITY_PERF_EVENTS_RESTRICT
 +int sysctl_perf_event_paranoid __read_mostly = 3;
 +#else
  int sysctl_perf_event_paranoid __read_mostly = 1;
 +#endif
  
  /* Minimum for 512 kiB + 1 user control page */
  int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@@ -1500,17 -1492,10 +1500,17 @@@ static void perf_group_detach(struct pe
         * If this was a group event with sibling events then
         * upgrade the siblings to singleton events by adding them
         * to whatever list we are on.
 +       * If this isn't on a list, make sure we still remove the sibling's
 +       * group_entry from this sibling_list; otherwise, when that sibling
 +       * is later deallocated, it will try to remove itself from this
 +       * sibling_list, which may well have been deallocated already,
 +       * resulting in a use-after-free.
         */
        list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
                if (list)
                        list_move_tail(&sibling->group_entry, list);
 +              else
 +                      list_del_init(&sibling->group_entry);
                sibling->group_leader = sibling;
  
                /* Inherit group flags from the previous leader */
@@@ -1705,32 -1690,7 +1705,32 @@@ static int __perf_remove_from_context(v
  }
  
  
 -/*
 +#ifdef CONFIG_SMP
 +static void perf_retry_remove(struct perf_event *event,
 +                            struct remove_event *rep)
 +{
 +      int up_ret;
 +      /*
 +       * CPU was offline. Bring it online so we can
 +       * gracefully exit a perf context.
 +       */
 +      up_ret = cpu_up(event->cpu);
 +      if (!up_ret)
 +              /* Try the remove call once again. */
 +              cpu_function_call(event->cpu, __perf_remove_from_context,
 +                                rep);
 +      else
 +              pr_err("Failed to bring up CPU: %d, ret: %d\n",
 +                     event->cpu, up_ret);
 +}
 +#else
 +static void perf_retry_remove(struct perf_event *event,
 +                            struct remove_event *rep)
 +{
 +}
 +#endif
 +
 + /*
   * Remove the event from a task's (or a CPU's) list of events.
   *
   * CPU events are removed with a smp call. For task events we only
   * When called from perf_event_exit_task, it's OK because the
   * context has been detached from its task.
   */
 -static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 +static void __ref perf_remove_from_context(struct perf_event *event,
 +                                         bool detach_group)
  {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
                .event = event,
                .detach_group = detach_group,
        };
 +      int ret;
  
        lockdep_assert_held(&ctx->mutex);
  
                 * already called __perf_remove_from_context from
                 * perf_event_exit_cpu.
                 */
 -              cpu_function_call(event->cpu, __perf_remove_from_context, &re);
 +              ret = cpu_function_call(event->cpu, __perf_remove_from_context,
 +                                      &re);
 +              if (ret == -ENXIO)
 +                      perf_retry_remove(event, &re);
 +
                return;
        }
  
@@@ -1963,13 -1917,8 +1963,13 @@@ event_sched_in(struct perf_event *event
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
  
 -      event->state = PERF_EVENT_STATE_ACTIVE;
 -      event->oncpu = smp_processor_id();
 +      WRITE_ONCE(event->oncpu, smp_processor_id());
 +      /*
 +       * Order event::oncpu write to happen before the ACTIVE state
 +       * is visible.
 +       */
 +      smp_wmb();
 +      WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
  
        /*
         * Unthrottle events, since we scheduled we might have missed several
@@@ -2450,29 -2399,6 +2450,29 @@@ void perf_event_enable(struct perf_even
  }
  EXPORT_SYMBOL_GPL(perf_event_enable);
  
 +static int __perf_event_stop(void *info)
 +{
 +      struct perf_event *event = info;
 +
 +      /* for AUX events, our job is done if the event is already inactive */
 +      if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
 +              return 0;
 +
 +      /* matches smp_wmb() in event_sched_in() */
 +      smp_rmb();
 +
 +      /*
 +       * There is a window with interrupts enabled before we get here,
 +       * so we need to check again lest we try to stop another CPU's event.
 +       */
 +      if (READ_ONCE(event->oncpu) != smp_processor_id())
 +              return -EAGAIN;
 +
 +      event->pmu->stop(event, PERF_EF_UPDATE);
 +
 +      return 0;
 +}
 +
  static int _perf_event_refresh(struct perf_event *event, int refresh)
  {
        /*
@@@ -3448,30 -3374,21 +3448,30 @@@ u64 perf_event_read_local(struct perf_e
  
  static int perf_event_read(struct perf_event *event, bool group)
  {
 -      int ret = 0;
 +      int event_cpu, ret = 0;
  
        /*
         * If event is enabled and currently active on a CPU, update the
         * value in the event structure:
         */
 -      if (event->state == PERF_EVENT_STATE_ACTIVE) {
 +      event_cpu = READ_ONCE(event->oncpu);
 +
 +      if (event->state == PERF_EVENT_STATE_ACTIVE &&
 +                                              !cpu_isolated(event_cpu)) {
                struct perf_read_data data = {
                        .event = event,
                        .group = group,
                        .ret = 0,
                };
 -              smp_call_function_single(event->oncpu,
 -                                       __perf_event_read, &data, 1);
 -              ret = data.ret;
 +
 +              if ((unsigned int)event_cpu >= nr_cpu_ids)
 +                      return 0;
 +              if (!event->attr.exclude_idle ||
 +                                      !per_cpu(is_idle, event_cpu)) {
 +                      smp_call_function_single(event_cpu,
 +                              __perf_event_read, &data, 1);
 +                      ret = data.ret;
 +              }
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
                unsigned long flags;
@@@ -3566,8 -3483,7 +3566,8 @@@ find_get_context(struct pmu *pmu, struc
  
        if (!task) {
                /* Must be root to operate on a CPU event: */
 -              if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
 +              if (event->owner != EVENT_OWNER_KERNEL && perf_paranoid_cpu() &&
 +                      !capable(CAP_SYS_ADMIN))
                        return ERR_PTR(-EACCES);
  
                /*
@@@ -3808,9 -3724,6 +3808,9 @@@ static void __free_event(struct perf_ev
        if (event->destroy)
                event->destroy(event);
  
 +      if (event->pmu->free_drv_configs)
 +              event->pmu->free_drv_configs(event);
 +
        if (event->ctx)
                put_ctx(event->ctx);
  
@@@ -3954,15 -3867,6 +3954,15 @@@ EXPORT_SYMBOL_GPL(perf_event_release_ke
   */
  static int perf_release(struct inode *inode, struct file *file)
  {
 +      struct perf_event *event = file->private_data;
 +
 +      /*
 +       * Event can be in state OFF because of a constraint check.
 +       * Change to ACTIVE so that it gets cleaned up correctly.
 +       */
 +      if ((event->state == PERF_EVENT_STATE_OFF) &&
 +          event->attr.constraint_duplicate)
 +              event->state = PERF_EVENT_STATE_ACTIVE;
        put_event(file->private_data);
        return 0;
  }
@@@ -4372,8 -4276,6 +4372,8 @@@ static int perf_event_set_output(struc
                                 struct perf_event *output_event);
  static int perf_event_set_filter(struct perf_event *event, void __user *arg);
  static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
 +static int perf_event_drv_configs(struct perf_event *event,
 +                                void __user *arg);
  
  static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
  {
        case PERF_EVENT_IOC_SET_BPF:
                return perf_event_set_bpf_prog(event, arg);
  
 +      case PERF_EVENT_IOC_SET_DRV_CONFIGS:
 +              return perf_event_drv_configs(event, (void __user *)arg);
 +
        default:
                return -ENOTTY;
        }
@@@ -4465,7 -4364,6 +4465,7 @@@ static long perf_compat_ioctl(struct fi
        switch (_IOC_NR(cmd)) {
        case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
        case _IOC_NR(PERF_EVENT_IOC_ID):
 +      case _IOC_NR(PERF_EVENT_IOC_SET_DRV_CONFIGS):
                /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
                if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
                        cmd &= ~IOCSIZE_MASK;
@@@ -4750,8 -4648,6 +4750,8 @@@ static void perf_mmap_open(struct vm_ar
                event->pmu->event_mapped(event);
  }
  
 +static void perf_pmu_output_stop(struct perf_event *event);
 +
  /*
   * A buffer can be mmap()ed multiple times; either directly through the same
   * event, or through other events by use of perf_event_set_output().
@@@ -4779,22 -4675,10 +4779,22 @@@ static void perf_mmap_close(struct vm_a
         */
        if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
            atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
 +              /*
 +               * Stop all AUX events that are writing to this buffer,
 +               * so that we can free its AUX pages and corresponding PMU
 +               * data. Note that after rb::aux_mmap_count dropped to zero,
 +               * they won't start any more (see perf_aux_output_begin()).
 +               */
 +              perf_pmu_output_stop(event);
 +
 +              /* now it's safe to free the pages */
                atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
                vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
  
 +              /* this has to be the last one */
                rb_free_aux(rb);
 +              WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
 +
                mutex_unlock(&event->mmap_mutex);
        }
  
@@@ -5003,7 -4887,15 +5003,15 @@@ accounting
         */
        user_lock_limit *= num_online_cpus();
  
-       user_locked = atomic_long_read(&user->locked_vm) + user_extra;
+       user_locked = atomic_long_read(&user->locked_vm);
+       /*
+        * sysctl_perf_event_mlock may have changed, so that
+        *     user->locked_vm > user_lock_limit
+        */
+       if (user_locked > user_lock_limit)
+               user_locked = user_lock_limit;
+       user_locked += user_extra;
  
        if (user_locked > user_lock_limit)
                extra = user_locked - user_lock_limit;
@@@ -5870,80 -5762,6 +5878,80 @@@ next
        rcu_read_unlock();
  }
  
 +struct remote_output {
 +      struct ring_buffer      *rb;
 +      int                     err;
 +};
 +
 +static void __perf_event_output_stop(struct perf_event *event, void *data)
 +{
 +      struct perf_event *parent = event->parent;
 +      struct remote_output *ro = data;
 +      struct ring_buffer *rb = ro->rb;
 +
 +      if (!has_aux(event))
 +              return;
 +
 +      if (!parent)
 +              parent = event;
 +
 +      /*
 +       * In case of inheritance, it will be the parent that links to the
 +       * ring-buffer, but it will be the child that's actually using it:
 +       */
 +      if (rcu_dereference(parent->rb) == rb)
 +              ro->err = __perf_event_stop(event);
 +}
 +
 +static int __perf_pmu_output_stop(void *info)
 +{
 +      struct perf_event *event = info;
 +      struct pmu *pmu = event->pmu;
 +      struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
 +      struct remote_output ro = {
 +              .rb     = event->rb,
 +      };
 +
 +      rcu_read_lock();
 +      perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro);
 +      if (cpuctx->task_ctx)
 +              perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
 +                                 &ro);
 +      rcu_read_unlock();
 +
 +      return ro.err;
 +}
 +
 +static void perf_pmu_output_stop(struct perf_event *event)
 +{
 +      struct perf_event *iter;
 +      int err, cpu;
 +
 +restart:
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
 +              /*
 +               * For per-CPU events, we need to make sure that neither they
 +               * nor their children are running; for cpu==-1 events it's
 +               * sufficient to stop the event itself if it's active, since
 +               * it can't have children.
 +               */
 +              cpu = iter->cpu;
 +              if (cpu == -1)
 +                      cpu = READ_ONCE(iter->oncpu);
 +
 +              if (cpu == -1)
 +                      continue;
 +
 +              err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
 +              if (err == -EAGAIN) {
 +                      rcu_read_unlock();
 +                      goto restart;
 +              }
 +      }
 +      rcu_read_unlock();
 +}
 +
  /*
   * task tracking -- fork/exit
   *
@@@ -7140,8 -6958,6 +7148,8 @@@ static struct pmu perf_swevent = 
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
 +
 +      .events_across_hotplug = 1,
  };
  
  #ifdef CONFIG_EVENT_TRACING
@@@ -7265,8 -7081,6 +7273,8 @@@ static struct pmu perf_tracepoint = 
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
 +
 +      .events_across_hotplug = 1,
  };
  
  static inline void perf_tp_register(void)
@@@ -7379,15 -7193,6 +7387,15 @@@ void perf_bp_event(struct perf_event *b
  }
  #endif
  
 +static int perf_event_drv_configs(struct perf_event *event,
 +                                void __user *arg)
 +{
 +      if (!event->pmu->get_drv_configs)
 +              return -EINVAL;
 +
 +      return event->pmu->get_drv_configs(event, arg);
 +}
 +
  /*
   * hrtimer based swevent callback
   */
@@@ -7555,8 -7360,6 +7563,8 @@@ static struct pmu perf_cpu_clock = 
        .start          = cpu_clock_event_start,
        .stop           = cpu_clock_event_stop,
        .read           = cpu_clock_event_read,
 +
 +      .events_across_hotplug = 1,
  };
  
  /*
@@@ -7638,8 -7441,6 +7646,8 @@@ static struct pmu perf_task_clock = 
        .start          = task_clock_event_start,
        .stop           = task_clock_event_stop,
        .read           = task_clock_event_read,
 +
 +      .events_across_hotplug = 1,
  };
  
  static void perf_pmu_nop_void(struct pmu *pmu)
@@@ -8120,7 -7921,6 +8128,7 @@@ perf_event_alloc(struct perf_event_att
        if (!group_leader)
                group_leader = event;
  
 +      mutex_init(&event->group_leader_mutex);
        mutex_init(&event->child_mutex);
        INIT_LIST_HEAD(&event->child_list);
  
        INIT_LIST_HEAD(&event->sibling_list);
        INIT_LIST_HEAD(&event->rb_entry);
        INIT_LIST_HEAD(&event->active_entry);
 +      INIT_LIST_HEAD(&event->drv_configs);
        INIT_HLIST_NODE(&event->hlist_entry);
  
  
@@@ -8542,16 -8341,10 +8550,16 @@@ SYSCALL_DEFINE5(perf_event_open
        if (flags & ~PERF_FLAG_ALL)
                return -EINVAL;
  
 +      if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
 +              return -EACCES;
 +
        err = perf_copy_attr(attr_uptr, &attr);
        if (err)
                return err;
  
 +      if (attr.constraint_duplicate || attr.__reserved_1)
 +              return -EINVAL;
 +
        if (!attr.exclude_kernel) {
                if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
                        return -EACCES;
                        group_leader = NULL;
        }
  
 +      /*
 +       * Take the group_leader's group_leader_mutex before observing
 +       * anything in the group leader that leads to changes in ctx,
 +       * many of which may be changing on another thread.
 +       * In particular, we want to take this lock before deciding
 +       * whether we need to move_group.
 +       */
 +      if (group_leader)
 +              mutex_lock(&group_leader->group_leader_mutex);
 +
        if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
                task = find_lively_task_by_vpid(pid);
                if (IS_ERR(task)) {
        if (move_group)
                perf_event_ctx_unlock(group_leader, gctx);
        mutex_unlock(&ctx->mutex);
 +      if (group_leader)
 +              mutex_unlock(&group_leader->group_leader_mutex);
  
        if (task) {
                mutex_unlock(&task->signal->cred_guard_mutex);
@@@ -8931,8 -8712,6 +8939,8 @@@ err_task
        if (task)
                put_task_struct(task);
  err_group_fd:
 +      if (group_leader)
 +              mutex_unlock(&group_leader->group_leader_mutex);
        fdput(group);
  err_fd:
        put_unused_fd(event_fd);
@@@ -9644,90 -9423,29 +9652,90 @@@ static void __perf_event_exit_context(v
        rcu_read_unlock();
  }
  
 +static void __perf_event_stop_swclock(void *__info)
 +{
 +      struct perf_event_context *ctx = __info;
 +      struct perf_event *event, *tmp;
 +
 +      list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
 +              if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
 +                  event->attr.type == PERF_TYPE_SOFTWARE)
 +                      cpu_clock_event_stop(event, 0);
 +      }
 +}
 +
  static void perf_event_exit_cpu_context(int cpu)
  {
 +      struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
 +      unsigned long flags;
        struct pmu *pmu;
        int idx;
  
        idx = srcu_read_lock(&pmus_srcu);
        list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
 +              cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
 +              ctx = &cpuctx->ctx;
 +
 +              /* Cancel the mux hrtimer to avoid CPU migration */
 +              if (pmu->task_ctx_nr != perf_sw_context) {
 +                      raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
 +                      hrtimer_cancel(&cpuctx->hrtimer);
 +                      cpuctx->hrtimer_active = 0;
 +                      raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock,
 +                                                      flags);
 +              }
  
                mutex_lock(&ctx->mutex);
 -              smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
 +              /*
 +               * If keeping events across hotplugging is supported, do not
 +               * remove the event list, but keep it alive across CPU hotplug.
 +               * The context is exited via an fd close path when userspace
 +               * is done and the target CPU is online. If software clock
 +               * event is active, then stop hrtimer associated with it.
 +               * Start the timer when the CPU comes back online.
 +               */
 +              if (!pmu->events_across_hotplug)
 +                      smp_call_function_single(cpu, __perf_event_exit_context,
 +                                               ctx, 1);
 +              else
 +                      smp_call_function_single(cpu, __perf_event_stop_swclock,
 +                                               ctx, 1);
                mutex_unlock(&ctx->mutex);
        }
        srcu_read_unlock(&pmus_srcu, idx);
  }
  
 +static void perf_event_start_swclock(int cpu)
 +{
 +      struct perf_event_context *ctx;
 +      struct pmu *pmu;
 +      int idx;
 +      struct perf_event *event, *tmp;
 +
 +      idx = srcu_read_lock(&pmus_srcu);
 +      list_for_each_entry_rcu(pmu, &pmus, entry) {
 +              if (pmu->events_across_hotplug) {
 +                      ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
 +                      list_for_each_entry_safe(event, tmp, &ctx->event_list,
 +                                               event_entry) {
 +                              if (event->attr.config ==
 +                                  PERF_COUNT_SW_CPU_CLOCK &&
 +                                  event->attr.type == PERF_TYPE_SOFTWARE)
 +                                      cpu_clock_event_start(event, 0);
 +                      }
 +              }
 +      }
 +      srcu_read_unlock(&pmus_srcu, idx);
 +}
 +
  static void perf_event_exit_cpu(int cpu)
  {
        perf_event_exit_cpu_context(cpu);
  }
  #else
  static inline void perf_event_exit_cpu(int cpu) { }
 +static inline void perf_event_start_swclock(int cpu) { }
  #endif
  
  static int
@@@ -9766,11 -9484,6 +9774,11 @@@ perf_cpu_notify(struct notifier_block *
        case CPU_DOWN_PREPARE:
                perf_event_exit_cpu(cpu);
                break;
 +
 +      case CPU_STARTING:
 +              perf_event_start_swclock(cpu);
 +              break;
 +
        default:
                break;
        }
        return NOTIFY_OK;
  }
  
 +static int event_idle_notif(struct notifier_block *nb, unsigned long action,
 +                                                      void *data)
 +{
 +      switch (action) {
 +      case IDLE_START:
 +              __this_cpu_write(is_idle, true);
 +              break;
 +      case IDLE_END:
 +              __this_cpu_write(is_idle, false);
 +              break;
 +      }
 +
 +      return NOTIFY_OK;
 +}
 +
 +static struct notifier_block perf_event_idle_nb = {
 +      .notifier_call = event_idle_notif,
 +};
 +
  void __init perf_event_init(void)
  {
        int ret;
        perf_pmu_register(&perf_task_clock, NULL, -1);
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
 +      idle_notifier_register(&perf_event_idle_nb);
        register_reboot_notifier(&perf_reboot_notifier);
  
        ret = init_hw_breakpoint();
@@@ -108,7 -108,7 +108,7 @@@ static int finished_booting
  
  #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
  static void clocksource_watchdog_work(struct work_struct *work);
 -static void clocksource_select(void);
 +static void clocksource_select(bool force);
  
  static LIST_HEAD(watchdog_list);
  static struct clocksource *watchdog;
@@@ -272,8 -272,15 +272,15 @@@ static void clocksource_watchdog(unsign
        next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
        if (next_cpu >= nr_cpu_ids)
                next_cpu = cpumask_first(cpu_online_mask);
-       watchdog_timer.expires += WATCHDOG_INTERVAL;
-       add_timer_on(&watchdog_timer, next_cpu);
+       /*
+        * Arm timer if not already pending: could race with concurrent
+        * pair clocksource_stop_watchdog() clocksource_start_watchdog().
+        */
+       if (!timer_pending(&watchdog_timer)) {
+               watchdog_timer.expires += WATCHDOG_INTERVAL;
+               add_timer_on(&watchdog_timer, next_cpu);
+       }
  out:
        spin_unlock(&watchdog_lock);
  }
@@@ -415,7 -422,7 +422,7 @@@ static int clocksource_watchdog_kthread
  {
        mutex_lock(&clocksource_mutex);
        if (__clocksource_watchdog_kthread())
 -              clocksource_select();
 +              clocksource_select(false);
        mutex_unlock(&clocksource_mutex);
        return 0;
  }
@@@ -555,12 -562,11 +562,12 @@@ static inline void clocksource_update_m
  
  #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
  
 -static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
 +static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur,
 +                                              bool force)
  {
        struct clocksource *cs;
  
 -      if (!finished_booting || list_empty(&clocksource_list))
 +      if ((!finished_booting && !force) || list_empty(&clocksource_list))
                return NULL;
  
        /*
        return NULL;
  }
  
 -static void __clocksource_select(bool skipcur)
 +static void __clocksource_select(bool skipcur, bool force)
  {
        bool oneshot = tick_oneshot_mode_active();
        struct clocksource *best, *cs;
  
        /* Find the best suitable clocksource */
 -      best = clocksource_find_best(oneshot, skipcur);
 +      best = clocksource_find_best(oneshot, skipcur, force);
        if (!best)
                return;
  
   * Select the clocksource with the best rating, or the clocksource,
   * which is selected by userspace override.
   */
 -static void clocksource_select(void)
 +static void clocksource_select(bool force)
  {
 -      __clocksource_select(false);
 +      return __clocksource_select(false, force);
  }
  
  static void clocksource_select_fallback(void)
  {
 -      __clocksource_select(true);
 +      __clocksource_select(true, false);
  }
  
  #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
 -static inline void clocksource_select(void) { }
 +
 +static inline void clocksource_select(bool force) { }
  static inline void clocksource_select_fallback(void) { }
  
  #endif
  
 +/**
 + * clocksource_select_force - Force re-selection of the best clocksource
 + *                            among registered clocksources
 + *
 + * clocksource_select() can't select the best clocksource before
 + * calling clocksource_done_booting() and since clocksource_select()
 + * should be called with clocksource_mutex held, provide a new API
 + * can be called from other files to select best clockrouce irrespective
 + * of finished_booting flag.
 + */
 +void clocksource_select_force(void)
 +{
 +      mutex_lock(&clocksource_mutex);
 +      clocksource_select(true);
 +      mutex_unlock(&clocksource_mutex);
 +}
 +
  /*
   * clocksource_done_booting - Called near the end of core bootup
   *
@@@ -674,7 -662,7 +681,7 @@@ static int __init clocksource_done_boot
         * Run the watchdog first to eliminate unstable clock sources
         */
        __clocksource_watchdog_kthread();
 -      clocksource_select();
 +      clocksource_select(false);
        mutex_unlock(&clocksource_mutex);
        return 0;
  }
@@@ -763,7 -751,6 +770,7 @@@ void __clocksource_update_freq_scale(st
  }
  EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
  
 +
  /**
   * __clocksource_register_scale - Used to install new clocksources
   * @cs:               clocksource to be registered
@@@ -785,7 -772,7 +792,7 @@@ int __clocksource_register_scale(struc
        mutex_lock(&clocksource_mutex);
        clocksource_enqueue(cs);
        clocksource_enqueue_watchdog(cs);
 -      clocksource_select();
 +      clocksource_select(false);
        clocksource_select_watchdog(false);
        mutex_unlock(&clocksource_mutex);
        return 0;
@@@ -808,7 -795,7 +815,7 @@@ void clocksource_change_rating(struct c
  {
        mutex_lock(&clocksource_mutex);
        __clocksource_change_rating(cs, rating);
 -      clocksource_select();
 +      clocksource_select(false);
        clocksource_select_watchdog(false);
        mutex_unlock(&clocksource_mutex);
  }
@@@ -912,7 -899,7 +919,7 @@@ static ssize_t sysfs_override_clocksour
  
        ret = sysfs_get_uname(buf, override_name, count);
        if (ret >= 0)
 -              clocksource_select();
 +              clocksource_select(false);
  
        mutex_unlock(&clocksource_mutex);
  
diff --combined lib/test_kasan.c
  #define pr_fmt(fmt) "kasan test: %s " fmt, __func__
  
  #include <linux/kernel.h>
 +#include <linux/mman.h>
 +#include <linux/mm.h>
  #include <linux/printk.h>
  #include <linux/slab.h>
  #include <linux/string.h>
 +#include <linux/uaccess.h>
  #include <linux/module.h>
 +#include <linux/kasan.h>
 +
 +/*
 + * Note: test functions are marked noinline so that their names appear in
 + * reports.
 + */
  
  static noinline void __init kmalloc_oob_right(void)
  {
@@@ -74,34 -65,11 +74,34 @@@ static noinline void __init kmalloc_nod
        kfree(ptr);
  }
  
 -static noinline void __init kmalloc_large_oob_right(void)
 +#ifdef CONFIG_SLUB
 +static noinline void __init kmalloc_pagealloc_oob_right(void)
  {
        char *ptr;
        size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  
 +      /* Allocate a chunk that does not fit into a SLUB cache to trigger
 +       * the page allocator fallback.
 +       */
 +      pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
 +      ptr = kmalloc(size, GFP_KERNEL);
 +      if (!ptr) {
 +              pr_err("Allocation failed\n");
 +              return;
 +      }
 +
 +      ptr[size] = 0;
 +      kfree(ptr);
 +}
 +#endif
 +
 +static noinline void __init kmalloc_large_oob_right(void)
 +{
 +      char *ptr;
 +      size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
 +      /* Allocate a chunk that is large enough, but still fits into a slab
 +       * and does not trigger the page allocator fallback in SLUB.
 +       */
        pr_info("kmalloc large allocation: out-of-bounds to right\n");
        ptr = kmalloc(size, GFP_KERNEL);
        if (!ptr) {
@@@ -125,6 -93,7 +125,7 @@@ static noinline void __init kmalloc_oob
        if (!ptr1 || !ptr2) {
                pr_err("Allocation failed\n");
                kfree(ptr1);
+               kfree(ptr2);
                return;
        }
  
@@@ -303,8 -272,6 +304,8 @@@ static noinline void __init kmalloc_uaf
        }
  
        ptr1[40] = 'x';
 +      if (ptr1 == ptr2)
 +              pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
        kfree(ptr2);
  }
  
@@@ -353,107 -320,11 +354,107 @@@ static noinline void __init kasan_stack
        *(volatile char *)p;
  }
  
 +static noinline void __init ksize_unpoisons_memory(void)
 +{
 +      char *ptr;
 +      size_t size = 123, real_size = size;
 +
 +      pr_info("ksize() unpoisons the whole allocated chunk\n");
 +      ptr = kmalloc(size, GFP_KERNEL);
 +      if (!ptr) {
 +              pr_err("Allocation failed\n");
 +              return;
 +      }
 +      real_size = ksize(ptr);
 +      /* This access doesn't trigger an error. */
 +      ptr[size] = 'x';
 +      /* This one does. */
 +      ptr[real_size] = 'y';
 +      kfree(ptr);
 +}
 +
 +static noinline void __init copy_user_test(void)
 +{
 +      char *kmem;
 +      char __user *usermem;
 +      size_t size = 10;
 +      int unused;
 +
 +      kmem = kmalloc(size, GFP_KERNEL);
 +      if (!kmem)
 +              return;
 +
 +      usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
 +                          PROT_READ | PROT_WRITE | PROT_EXEC,
 +                          MAP_ANONYMOUS | MAP_PRIVATE, 0);
 +      if (IS_ERR(usermem)) {
 +              pr_err("Failed to allocate user memory\n");
 +              kfree(kmem);
 +              return;
 +      }
 +
 +      pr_info("out-of-bounds in copy_from_user()\n");
 +      unused = copy_from_user(kmem, usermem, size + 1);
 +
 +      pr_info("out-of-bounds in copy_to_user()\n");
 +      unused = copy_to_user(usermem, kmem, size + 1);
 +
 +      pr_info("out-of-bounds in __copy_from_user()\n");
 +      unused = __copy_from_user(kmem, usermem, size + 1);
 +
 +      pr_info("out-of-bounds in __copy_to_user()\n");
 +      unused = __copy_to_user(usermem, kmem, size + 1);
 +
 +      pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
 +      unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
 +
 +      pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
 +      unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
 +
 +      pr_info("out-of-bounds in strncpy_from_user()\n");
 +      unused = strncpy_from_user(kmem, usermem, size + 1);
 +
 +      vm_munmap((unsigned long)usermem, PAGE_SIZE);
 +      kfree(kmem);
 +}
 +
 +static noinline void __init use_after_scope_test(void)
 +{
 +      volatile char *volatile p;
 +
 +      pr_info("use-after-scope on int\n");
 +      {
 +              int local = 0;
 +
 +              p = (char *)&local;
 +      }
 +      p[0] = 1;
 +      p[3] = 1;
 +
 +      pr_info("use-after-scope on array\n");
 +      {
 +              char local[1024] = {0};
 +
 +              p = local;
 +      }
 +      p[0] = 1;
 +      p[1023] = 1;
 +}
 +
  static int __init kmalloc_tests_init(void)
  {
 +      /*
 +       * Temporarily enable multi-shot mode. Otherwise, we'd only get a
 +       * report for the first case.
 +       */
 +      bool multishot = kasan_save_enable_multi_shot();
 +
        kmalloc_oob_right();
        kmalloc_oob_left();
        kmalloc_node_oob_right();
 +#ifdef CONFIG_SLUB
 +      kmalloc_pagealloc_oob_right();
 +#endif
        kmalloc_large_oob_right();
        kmalloc_oob_krealloc_more();
        kmalloc_oob_krealloc_less();
        kmem_cache_oob();
        kasan_stack_oob();
        kasan_global_oob();
 +      ksize_unpoisons_memory();
 +      copy_user_test();
 +      use_after_scope_test();
 +
 +      kasan_restore_multi_shot(multishot);
 +
        return -EAGAIN;
  }
  
diff --combined mm/mempolicy.c
@@@ -724,8 -724,7 +724,8 @@@ static int mbind_range(struct mm_struc
                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                 vma->anon_vma, vma->vm_file, pgoff,
 -                               new_pol, vma->vm_userfaultfd_ctx);
 +                               new_pol, vma->vm_userfaultfd_ctx,
 +                               vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
@@@ -2570,7 -2569,9 +2570,7 @@@ static void __init check_numabalancing_
                set_numabalancing_state(numabalancing_override == 1);
  
        if (num_online_nodes() > 1 && !numabalancing_override) {
 -              pr_info("%s automatic NUMA balancing. "
 -                      "Configure with numa_balancing= or the "
 -                      "kernel.numa_balancing sysctl",
 +              pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
                        numabalancing_default ? "Enabling" : "Disabling");
                set_numabalancing_state(numabalancing_default);
        }
@@@ -2700,6 -2701,9 +2700,9 @@@ int mpol_parse_str(char *str, struct me
        char *flags = strchr(str, '=');
        int err = 1;
  
+       if (flags)
+               *flags++ = '\0';        /* terminate mode string */
        if (nodelist) {
                /* NUL-terminate mode or flags string */
                *nodelist++ = '\0';
        } else
                nodes_clear(nodes);
  
-       if (flags)
-               *flags++ = '\0';        /* terminate mode string */
        for (mode = 0; mode < MPOL_MAX; mode++) {
                if (!strcmp(str, policy_modes[mode])) {
                        break;
diff --combined net/ipv4/tcp.c
@@@ -302,12 -302,6 +302,12 @@@ EXPORT_SYMBOL(sysctl_tcp_wmem)
  atomic_long_t tcp_memory_allocated;   /* Current allocated memory. */
  EXPORT_SYMBOL(tcp_memory_allocated);
  
 +int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
 +EXPORT_SYMBOL(sysctl_tcp_delack_seg);
 +
 +int sysctl_tcp_use_userconfig __read_mostly;
 +EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
 +
  /*
   * Current number of TCP sockets.
   */
@@@ -1413,11 -1407,8 +1413,11 @@@ static void tcp_cleanup_rbuf(struct soc
                   /* Delayed ACKs frequently hit locked sockets during bulk
                    * receive. */
                if (icsk->icsk_ack.blocked ||
 -                  /* Once-per-two-segments ACK was not sent by tcp_input.c */
 -                  tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
 +                  /* Once-per-sysctl_tcp_delack_seg segments
 +                        * ACK was not sent by tcp_input.c
 +                        */
 +                  tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
 +                                              sysctl_tcp_delack_seg ||
                    /*
                     * If this read emptied read buffer, we send ACK, if
                     * connection is not bidirectional, user drained
@@@ -2270,6 -2261,7 +2270,7 @@@ int tcp_disconnect(struct sock *sk, in
        tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
+       tp->total_retrans = 0;
        inet_csk_delack_init(sk);
        /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
         * issue in __tcp_select_window()
        dst_release(sk->sk_rx_dst);
        sk->sk_rx_dst = NULL;
        tcp_saved_syn_free(tp);
+       tp->segs_in = 0;
+       tp->segs_out = 0;
        tp->bytes_acked = 0;
        tp->bytes_received = 0;
  
@@@ -2748,14 -2742,6 +2751,14 @@@ void tcp_get_info(struct sock *sk, stru
        rate64 = rate != ~0U ? rate : ~0ULL;
        put_unaligned(rate64, &info->tcpi_max_pacing_rate);
  
 +      /* Expose reference count for socket */
 +      if (sk->sk_socket) {
 +              struct file *filep = sk->sk_socket->file;
 +
 +              if (filep)
 +                      info->tcpi_count = file_count(filep);
 +      }
 +
        do {
                start = u64_stats_fetch_begin_irq(&tp->syncp);
                put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
@@@ -3129,52 -3115,6 +3132,52 @@@ void tcp_done(struct sock *sk
  }
  EXPORT_SYMBOL_GPL(tcp_done);
  
 +int tcp_abort(struct sock *sk, int err)
 +{
 +      if (!sk_fullsock(sk)) {
 +              if (sk->sk_state == TCP_NEW_SYN_RECV) {
 +                      struct request_sock *req = inet_reqsk(sk);
 +
 +                      local_bh_disable();
 +                      inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
 +                                                        req);
 +                      local_bh_enable();
 +                      return 0;
 +              }
 +              sock_gen_put(sk);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      /* Don't race with userspace socket closes such as tcp_close. */
 +      lock_sock(sk);
 +
 +      if (sk->sk_state == TCP_LISTEN) {
 +              tcp_set_state(sk, TCP_CLOSE);
 +              inet_csk_listen_stop(sk);
 +      }
 +
 +      /* Don't race with BH socket closes such as inet_csk_listen_stop. */
 +      local_bh_disable();
 +      bh_lock_sock(sk);
 +
 +      if (!sock_flag(sk, SOCK_DEAD)) {
 +              sk->sk_err = err;
 +              /* This barrier is coupled with smp_rmb() in tcp_poll() */
 +              smp_wmb();
 +              sk->sk_error_report(sk);
 +              if (tcp_need_reset(sk->sk_state))
 +                      tcp_send_active_reset(sk, GFP_ATOMIC);
 +              tcp_done(sk);
 +      }
 +
 +      bh_unlock_sock(sk);
 +      local_bh_enable();
 +      release_sock(sk);
 +      sock_put(sk);
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(tcp_abort);
 +
  extern struct tcp_congestion_ops tcp_reno;
  
  static __initdata unsigned long thash_entries;
diff --combined net/ipv6/ip6_vti.c
@@@ -441,8 -441,17 +441,17 @@@ vti6_xmit(struct sk_buff *skb, struct n
        int err = -1;
        int mtu;
  
-       if (!dst)
-               goto tx_err_link_failure;
+       if (!dst) {
+               fl->u.ip6.flowi6_oif = dev->ifindex;
+               fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+               dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+               if (dst->error) {
+                       dst_release(dst);
+                       dst = NULL;
+                       goto tx_err_link_failure;
+               }
+               skb_dst_set(skb, dst);
+       }
  
        dst_hold(dst);
        dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
@@@ -606,10 -615,9 +615,10 @@@ static int vti6_err(struct sk_buff *skb
                return 0;
  
        if (type == NDISC_REDIRECT)
 -              ip6_redirect(skb, net, skb->dev->ifindex, 0);
 +              ip6_redirect(skb, net, skb->dev->ifindex, 0,
 +                           sock_net_uid(net, NULL));
        else
 -              ip6_update_pmtu(skb, net, info, 0, 0);
 +              ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
        xfrm_state_put(x);
  
        return 0;
diff --combined sound/core/pcm_native.c
@@@ -29,7 -29,6 +29,7 @@@
  #include <linux/dma-mapping.h>
  #include <sound/core.h>
  #include <sound/control.h>
 +#include <sound/compress_offload.h>
  #include <sound/info.h>
  #include <sound/pcm.h>
  #include <sound/pcm_params.h>
@@@ -198,6 -197,7 +198,6 @@@ static inline void snd_leave_user(mm_se
  
  int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
  {
 -      struct snd_pcm_runtime *runtime;
        struct snd_pcm *pcm = substream->pcm;
        struct snd_pcm_str *pstr = substream->pstr;
  
        info->subdevices_count = pstr->substream_count;
        info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
        strlcpy(info->subname, substream->name, sizeof(info->subname));
 -      runtime = substream->runtime;
  
        return 0;
  }
@@@ -583,12 -584,11 +583,12 @@@ static int snd_pcm_hw_params(struct snd
        runtime->silence_threshold = 0;
        runtime->silence_size = 0;
        runtime->boundary = runtime->buffer_size;
 -      while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
 +      while (runtime->boundary * 2 * runtime->channels <=
 +                                      LONG_MAX - runtime->buffer_size)
                runtime->boundary *= 2;
  
        /* clear the buffer for avoiding possible kernel info leaks */
-       if (runtime->dma_area)
+       if (runtime->dma_area && !substream->ops->copy)
                memset(runtime->dma_area, 0, runtime->dma_bytes);
  
        snd_pcm_timer_resolution_change(substream);
@@@ -653,9 -653,7 +653,9 @@@ static int snd_pcm_hw_free(struct snd_p
        if (substream->ops->hw_free)
                result = substream->ops->hw_free(substream);
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
 -      pm_qos_remove_request(&substream->latency_pm_qos_req);
 +      if (pm_qos_request_active(&substream->latency_pm_qos_req))
 +              pm_qos_remove_request(&substream->latency_pm_qos_req);
 +
        return result;
  }
  
@@@ -1037,7 -1035,6 +1037,7 @@@ static int snd_pcm_pre_start(struct snd
        if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
                return -EBADFD;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
 +          !substream->hw_no_buffer &&
            !snd_pcm_playback_data(substream))
                return -EPIPE;
        runtime->trigger_tstamp_latched = false;
@@@ -1091,33 -1088,6 +1091,33 @@@ int snd_pcm_start(struct snd_pcm_substr
                              SNDRV_PCM_STATE_RUNNING);
  }
  
 +static int snd_compressed_ioctl(struct snd_pcm_substream *substream,
 +                               unsigned int cmd, void __user *arg)
 +{
 +      struct snd_pcm_runtime *runtime;
 +      int err = 0;
 +
 +      if (PCM_RUNTIME_CHECK(substream))
 +              return -ENXIO;
 +      runtime = substream->runtime;
 +      pr_debug("%s called with cmd = %d\n", __func__, cmd);
 +      err = substream->ops->ioctl(substream, cmd, arg);
 +      return err;
 +}
 +
 +static int snd_user_ioctl(struct snd_pcm_substream *substream,
 +                        unsigned int cmd, void __user *arg)
 +{
 +      struct snd_pcm_runtime *runtime;
 +      int err = 0;
 +
 +      if (PCM_RUNTIME_CHECK(substream))
 +              return -ENXIO;
 +      runtime = substream->runtime;
 +      err = substream->ops->ioctl(substream, cmd, arg);
 +      return err;
 +}
 +
  /*
   * stop callbacks
   */
@@@ -2032,8 -2002,7 +2032,8 @@@ static int snd_pcm_hw_rule_sample_bits(
  #endif
  
  static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100,
 -                                 48000, 64000, 88200, 96000, 176400, 192000 };
 +                              48000, 64000, 88200, 96000, 176400, 192000,
 +                              352800, 384000 };
  
  const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
        .count = ARRAY_SIZE(rates),
@@@ -2749,7 -2718,6 +2749,7 @@@ static int snd_pcm_sync_ptr(struct snd_
        volatile struct snd_pcm_mmap_status *status;
        volatile struct snd_pcm_mmap_control *control;
        int err;
 +      snd_pcm_uframes_t hw_avail;
  
        memset(&sync_ptr, 0, sizeof(sync_ptr));
        if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
                control->avail_min = sync_ptr.c.control.avail_min;
        else
                sync_ptr.c.control.avail_min = control->avail_min;
 +
 +      if (runtime->render_flag & SNDRV_NON_DMA_MODE) {
 +              hw_avail = snd_pcm_playback_hw_avail(runtime);
 +              if ((hw_avail >= runtime->start_threshold)
 +                      && (runtime->render_flag &
 +                              SNDRV_RENDER_STOPPED)) {
 +                      if (substream->ops->restart)
 +                              substream->ops->restart(substream);
 +              }
 +      }
        sync_ptr.s.status.state = status->state;
        sync_ptr.s.status.hw_ptr = status->hw_ptr;
        sync_ptr.s.status.tstamp = status->tstamp;
@@@ -2871,16 -2829,6 +2871,16 @@@ static int snd_pcm_common_ioctl1(struc
                snd_pcm_stream_unlock_irq(substream);
                return res;
        }
 +      case SNDRV_COMPRESS_GET_CAPS:
 +      case SNDRV_COMPRESS_GET_CODEC_CAPS:
 +      case SNDRV_COMPRESS_SET_PARAMS:
 +      case SNDRV_COMPRESS_GET_PARAMS:
 +      case SNDRV_COMPRESS_TSTAMP:
 +      case SNDRV_COMPRESS_DRAIN:
 +              return snd_compressed_ioctl(substream, cmd, arg);
 +      default:
 +              if (((cmd >> 8) & 0xff) == 'U')
 +                      return snd_user_ioctl(substream, cmd, arg);
        }
        pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
        return -ENOTTY;
@@@ -3050,12 -2998,10 +3050,12 @@@ static long snd_pcm_playback_ioctl(stru
                                   unsigned long arg)
  {
        struct snd_pcm_file *pcm_file;
 +      unsigned char ioctl_magic;
  
        pcm_file = file->private_data;
 +      ioctl_magic = ((cmd >> 8) & 0xff);
  
 -      if (((cmd >> 8) & 0xff) != 'A')
 +      if (ioctl_magic != 'A' && ioctl_magic != 'C' && ioctl_magic != 'U')
                return -ENOTTY;
  
        return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd,
@@@ -3066,12 -3012,10 +3066,12 @@@ static long snd_pcm_capture_ioctl(struc
                                  unsigned long arg)
  {
        struct snd_pcm_file *pcm_file;
 +      unsigned char ioctl_magic;
  
        pcm_file = file->private_data;
 +      ioctl_magic = ((cmd >> 8) & 0xff);
  
 -      if (((cmd >> 8) & 0xff) != 'A')
 +      if (ioctl_magic != 'A' && ioctl_magic != 'U')
                return -ENOTTY;
  
        return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
diff --combined sound/soc/soc-pcm.c
@@@ -25,7 -25,6 +25,7 @@@
  #include <linux/workqueue.h>
  #include <linux/export.h>
  #include <linux/debugfs.h>
 +#include <linux/dma-mapping.h>
  #include <sound/core.h>
  #include <sound/pcm.h>
  #include <sound/pcm_params.h>
@@@ -53,26 -52,6 +53,26 @@@ static bool snd_soc_dai_stream_valid(st
        return codec_stream->channels_min;
  }
  
 +static const struct snd_pcm_hardware no_host_hardware = {
 +      .info                   = SNDRV_PCM_INFO_MMAP |
 +                                      SNDRV_PCM_INFO_MMAP_VALID |
 +                                      SNDRV_PCM_INFO_INTERLEAVED |
 +                                      SNDRV_PCM_INFO_PAUSE |
 +                                      SNDRV_PCM_INFO_RESUME,
 +      .formats                = SNDRV_PCM_FMTBIT_S16_LE |
 +                                      SNDRV_PCM_FMTBIT_S32_LE,
 +      .period_bytes_min       = PAGE_SIZE >> 2,
 +      .period_bytes_max       = PAGE_SIZE >> 1,
 +      .periods_min            = 2,
 +      .periods_max            = 4,
 +      /*
 +       * Increase the max buffer bytes as PAGE_SIZE bytes is
 +       * not enough to encompass all the scenarios sent by
 +       * userspapce.
 +       */
 +      .buffer_bytes_max       = PAGE_SIZE * 4,
 +};
 +
  /**
   * snd_soc_runtime_activate() - Increment active count for PCM runtime components
   * @rtd: ASoC PCM runtime that is activated
@@@ -177,8 -156,6 +177,8 @@@ int snd_soc_set_runtime_hwparams(struc
        const struct snd_pcm_hardware *hw)
  {
        struct snd_pcm_runtime *runtime = substream->runtime;
 +      if (!runtime)
 +              return 0;
        runtime->hw.info = hw->info;
        runtime->hw.formats = hw->formats;
        runtime->hw.period_bytes_min = hw->period_bytes_min;
@@@ -205,10 -182,8 +205,10 @@@ int dpcm_dapm_stream_event(struct snd_s
                                be->dai_link->name, event, dir);
  
                if ((event == SND_SOC_DAPM_STREAM_STOP) &&
 -                  (be->dpcm[dir].users >= 1))
 +                  (be->dpcm[dir].users >= 1)) {
 +                      pr_debug("%s Don't close BE \n", __func__);
                        continue;
 +              }
  
                snd_soc_dapm_stream_event(be, dir, event);
        }
@@@ -493,8 -468,6 +493,8 @@@ static int soc_pcm_open(struct snd_pcm_
        pm_runtime_get_sync(platform->dev);
  
        mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 +      if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
 +              snd_soc_set_runtime_hwparams(substream, &no_host_hardware);
  
        /* startup the audio subsystem */
        if (cpu_dai->driver->ops && cpu_dai->driver->ops->startup) {
@@@ -625,7 -598,7 +625,7 @@@ codec_dai_err
                platform->driver->ops->close(substream);
  
  platform_err:
 -      if (cpu_dai->driver->ops->shutdown)
 +      if (cpu_dai->driver->ops && cpu_dai->driver->ops->shutdown)
                cpu_dai->driver->ops->shutdown(substream, cpu_dai);
  out:
        mutex_unlock(&rtd->pcm_mutex);
@@@ -701,20 -674,6 +701,20 @@@ static int soc_pcm_close(struct snd_pcm
  
        snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
  
 +      if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 +              if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
 +                      /* powered down playback stream now */
 +                      snd_soc_dapm_stream_event(rtd,
 +                                                SNDRV_PCM_STREAM_PLAYBACK,
 +                                                SND_SOC_DAPM_STREAM_STOP);
 +              } else {
 +                      /* start delayed pop wq here for playback streams */
 +                      rtd->pop_wait = 1;
 +                      queue_delayed_work(system_power_efficient_wq,
 +                                         &rtd->delayed_work,
 +                                         msecs_to_jiffies(rtd->pmdown_time));
 +              }
 +      }
        if (cpu_dai->driver->ops->shutdown)
                cpu_dai->driver->ops->shutdown(substream, cpu_dai);
  
        if (platform->driver->ops && platform->driver->ops->close)
                platform->driver->ops->close(substream);
  
 -      if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 -              if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
 -                      /* powered down playback stream now */
 -                      snd_soc_dapm_stream_event(rtd,
 -                                                SNDRV_PCM_STREAM_PLAYBACK,
 -                                                SND_SOC_DAPM_STREAM_STOP);
 -              } else {
 -                      /* start delayed pop wq here for playback streams */
 -                      rtd->pop_wait = 1;
 -                      queue_delayed_work(system_power_efficient_wq,
 -                                         &rtd->delayed_work,
 -                                         msecs_to_jiffies(rtd->pmdown_time));
 -              }
 -      } else {
 +      if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) {
                /* capture streams can be powered down now */
                snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
                                          SND_SOC_DAPM_STREAM_STOP);
@@@ -767,11 -739,6 +767,11 @@@ static int soc_pcm_prepare(struct snd_p
  
        mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
  
 +      if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 +              snd_soc_dapm_stream_event(rtd,
 +              SNDRV_PCM_STREAM_PLAYBACK,
 +              SND_SOC_DAPM_STREAM_START);
 +
        if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) {
                ret = rtd->dai_link->ops->prepare(substream);
                if (ret < 0) {
                cancel_delayed_work(&rtd->delayed_work);
        }
  
 -      snd_soc_dapm_stream_event(rtd, substream->stream,
 -                      SND_SOC_DAPM_STREAM_START);
 +      if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
 +              for (i = 0; i < rtd->num_codecs; i++) {
 +                      codec_dai = rtd->codec_dais[i];
 +                      if (codec_dai->capture_active == 1)
 +                              snd_soc_dapm_stream_event(rtd,
 +                              SNDRV_PCM_STREAM_CAPTURE,
 +                              SND_SOC_DAPM_STREAM_START);
 +              }
 +      }
  
        for (i = 0; i < rtd->num_codecs; i++)
                snd_soc_dai_digital_mute(rtd->codec_dais[i], 0,
        snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
  
  out:
 +      if (ret < 0 && substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 +              pr_err("%s: Issue stop stream for codec_dai due to op failure %d = ret\n",
 +              __func__, ret);
 +              snd_soc_dapm_stream_event(rtd,
 +              SNDRV_PCM_STREAM_PLAYBACK,
 +              SND_SOC_DAPM_STREAM_STOP);
 +      }
        mutex_unlock(&rtd->pcm_mutex);
        return ret;
  }
@@@ -891,31 -844,10 +891,31 @@@ static int soc_pcm_hw_params(struct snd
  
        mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
  
 +      /* perform any hw_params fixups */
 +      if ((rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) &&
 +                              rtd->dai_link->be_hw_params_fixup) {
 +              ret = rtd->dai_link->be_hw_params_fixup(rtd,
 +                              params);
 +              if (ret < 0)
 +                      dev_err(rtd->card->dev, "ASoC: fixup failed for %s\n",
 +                      rtd->dai_link->name);
 +      }
 +
        ret = soc_pcm_params_symmetry(substream, params);
        if (ret)
                goto out;
  
 +      /* perform any hw_params fixups */
 +      if ((rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) &&
 +                              rtd->dai_link->be_hw_params_fixup) {
 +              ret = rtd->dai_link->be_hw_params_fixup(rtd,
 +                              params);
 +              if (ret < 0) {
 +                      dev_err(rtd->card->dev, "ASoC: fixup failed for %s\n",
 +                      rtd->dai_link->name);
 +              }
 +      }
 +
        if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) {
                ret = rtd->dai_link->ops->hw_params(substream, params);
                if (ret < 0) {
        cpu_dai->channels = params_channels(params);
        cpu_dai->sample_bits =
                snd_pcm_format_physical_width(params_format(params));
 +      /* malloc a page for hostless IO.
 +       * FIXME: rework with alsa-lib changes so that this malloc is not required.
 +       */
 +      if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) {
 +              substream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV;
 +              substream->dma_buffer.dev.dev = rtd->dev;
 +              substream->dma_buffer.dev.dev->coherent_dma_mask =
 +                                      DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
 +              substream->dma_buffer.private_data = NULL;
 +
 +              arch_setup_dma_ops(substream->dma_buffer.dev.dev,
 +                                 0, 0, NULL, 0);
 +              ret = snd_pcm_lib_malloc_pages(substream, PAGE_SIZE);
 +              if (ret < 0)
 +                      goto platform_err;
 +      }
  
  out:
        mutex_unlock(&rtd->pcm_mutex);
@@@ -1087,9 -1003,6 +1087,9 @@@ static int soc_pcm_hw_free(struct snd_p
        if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
                cpu_dai->driver->ops->hw_free(substream, cpu_dai);
  
 +      if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
 +              snd_pcm_lib_free_pages(substream);
 +
        mutex_unlock(&rtd->pcm_mutex);
        return 0;
  }
@@@ -1186,9 -1099,6 +1186,9 @@@ static snd_pcm_uframes_t soc_pcm_pointe
        if (platform->driver->ops && platform->driver->ops->pointer)
                offset = platform->driver->ops->pointer(substream);
  
 +      if (platform->driver->delay_blk)
 +              return offset;
 +
        if (cpu_dai->driver->ops && cpu_dai->driver->ops->delay)
                delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
  
        return offset;
  }
  
 +static int soc_pcm_delay_blk(struct snd_pcm_substream *substream)
 +{
 +      struct snd_soc_pcm_runtime *rtd = substream->private_data;
 +      struct snd_soc_platform *platform = rtd->platform;
 +      struct snd_pcm_runtime *runtime = substream->runtime;
 +      snd_pcm_sframes_t delay = 0;
 +
 +      if (platform->driver->delay_blk)
 +              delay = platform->driver->delay_blk(substream,
 +                              rtd->codec_dais[0]);
 +
 +      runtime->delay = delay;
 +
 +      return 0;
 +}
 +
  /* connect a FE and BE */
  static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                struct snd_soc_pcm_runtime *be, int stream)
@@@ -1335,11 -1229,7 +1335,11 @@@ static struct snd_soc_pcm_runtime *dpcm
                        if (!be->dai_link->no_pcm)
                                continue;
  
 -                      if (be->cpu_dai->playback_widget == widget)
 +                      if ((be->cpu_dai->playback_widget == widget &&
 +                              (be->dai_link->stream_name &&
 +                              !strcmp(be->dai_link->stream_name,
 +                                  be->cpu_dai->playback_widget->sname))) ||
 +                              be->codec_dai->playback_widget == widget)
                                return be;
  
                        for (j = 0; j < be->num_codecs; j++) {
                        if (!be->dai_link->no_pcm)
                                continue;
  
 -                      if (be->cpu_dai->capture_widget == widget)
 +                      if ((be->cpu_dai->capture_widget == widget &&
 +                              (be->dai_link->stream_name &&
 +                              !strcmp(be->dai_link->stream_name,
 +                                  be->cpu_dai->capture_widget->sname))) ||
 +                              be->codec_dai->capture_widget == widget)
                                return be;
  
                        for (j = 0; j < be->num_codecs; j++) {
@@@ -1830,14 -1716,14 +1830,14 @@@ static int dpcm_fe_dai_shutdown(struct 
  
        dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
  
 -      /* shutdown the BEs */
 -      dpcm_be_dai_shutdown(fe, substream->stream);
 -
        dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
  
        /* now shutdown the frontend */
        soc_pcm_close(substream);
  
 +      /* shutdown the BEs */
 +      dpcm_be_dai_shutdown(fe, substream->stream);
 +
        /* run the stream event for each BE */
        dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
  
@@@ -1916,81 -1802,6 +1916,81 @@@ static int dpcm_fe_dai_hw_free(struct s
        return 0;
  }
  
 +int dpcm_fe_dai_hw_params_be(struct snd_soc_pcm_runtime *fe,
 +      struct snd_soc_pcm_runtime *be,
 +      struct snd_pcm_hw_params *params, int stream)
 +{
 +      int ret;
 +      struct snd_soc_dpcm *dpcm;
 +      struct snd_pcm_substream *be_substream =
 +              snd_soc_dpcm_get_substream(be, stream);
 +
 +      /* is this op for this BE ? */
 +      if (!snd_soc_dpcm_be_can_update(fe, be, stream))
 +              return 0;
 +
 +      /* only allow hw_params() if no connected FEs are running */
 +      if (!snd_soc_dpcm_can_be_params(fe, be, stream))
 +              return 0;
 +
 +      if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
 +                      (be->dpcm[stream].state !=
 +                              SND_SOC_DPCM_STATE_HW_PARAMS) &&
 +                      (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE))
 +              return 0;
 +
 +      dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
 +                      fe->dai_link->name);
 +
 +      /* perform any hw_params fixups */
 +      if (be->dai_link->be_hw_params_fixup) {
 +              ret = be->dai_link->be_hw_params_fixup(be,
 +                              params);
 +              if (ret < 0) {
 +                      dev_err(be->dev,
 +                                      "ASoC: hw_params BE fixup failed %d\n",
 +                                      ret);
 +                      goto unwind;
 +              }
 +      }
 +
 +      ret = soc_pcm_hw_params(be_substream, params);
 +      if (ret < 0) {
 +              dev_err(be->dev, "ASoC: hw_params BE failed %d\n", ret);
 +              goto unwind;
 +      }
 +
 +      be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
 +      return 0;
 +
 +unwind:
 +      /* disable any enabled and non active backends */
 +      list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
 +              struct snd_soc_pcm_runtime *be = dpcm->be;
 +              struct snd_pcm_substream *be_substream =
 +                      snd_soc_dpcm_get_substream(be, stream);
 +
 +              if (!snd_soc_dpcm_be_can_update(fe, be, stream))
 +                      continue;
 +
 +              /* only allow hw_free() if no connected FEs are running */
 +              if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
 +                      continue;
 +
 +              if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
 +                      (be->dpcm[stream].state
 +                              != SND_SOC_DPCM_STATE_HW_PARAMS) &&
 +                      (be->dpcm[stream].state
 +                              != SND_SOC_DPCM_STATE_HW_FREE) &&
 +                      (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
 +                      continue;
 +
 +              soc_pcm_hw_free(be_substream);
 +      }
 +
 +      return ret;
 +}
 +
  int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
  {
        struct snd_soc_dpcm *dpcm;
@@@ -2215,42 -2026,81 +2215,81 @@@ int dpcm_be_dai_trigger(struct snd_soc_
  }
  EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
  
+ static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
+                                 int cmd, bool fe_first)
+ {
+       struct snd_soc_pcm_runtime *fe = substream->private_data;
+       int ret;
+       /* call trigger on the frontend before the backend. */
+       if (fe_first) {
+               dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
+                       fe->dai_link->name, cmd);
+               ret = soc_pcm_trigger(substream, cmd);
+               if (ret < 0)
+                       return ret;
+               ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+               return ret;
+       }
+       /* call trigger on the frontend after the backend. */
+       ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+       if (ret < 0)
+               return ret;
+       dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
+               fe->dai_link->name, cmd);
+       ret = soc_pcm_trigger(substream, cmd);
+       return ret;
+ }
  static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
  {
        struct snd_soc_pcm_runtime *fe = substream->private_data;
-       int stream = substream->stream, ret;
+       int stream = substream->stream;
+       int ret = 0;
        enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
  
        fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
  
        switch (trigger) {
        case SND_SOC_DPCM_TRIGGER_PRE:
-               /* call trigger on the frontend before the backend. */
-               dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
-                               fe->dai_link->name, cmd);
-               ret = soc_pcm_trigger(substream, cmd);
-               if (ret < 0) {
-                       dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
-                       goto out;
+               switch (cmd) {
+               case SNDRV_PCM_TRIGGER_START:
+               case SNDRV_PCM_TRIGGER_RESUME:
+               case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+                       ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+                       break;
+               case SNDRV_PCM_TRIGGER_STOP:
+               case SNDRV_PCM_TRIGGER_SUSPEND:
+               case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+                       ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
                }
-               ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
                break;
        case SND_SOC_DPCM_TRIGGER_POST:
-               /* call trigger on the frontend after the backend. */
-               ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
-               if (ret < 0) {
-                       dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
-                       goto out;
+               switch (cmd) {
+               case SNDRV_PCM_TRIGGER_START:
+               case SNDRV_PCM_TRIGGER_RESUME:
+               case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+                       ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+                       break;
+               case SNDRV_PCM_TRIGGER_STOP:
+               case SNDRV_PCM_TRIGGER_SUSPEND:
+               case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+                       ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
                }
-               dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
-                               fe->dai_link->name, cmd);
-               ret = soc_pcm_trigger(substream, cmd);
                break;
        case SND_SOC_DPCM_TRIGGER_BESPOKE:
                /* bespoke trigger() - handles both FE and BEs */
                                fe->dai_link->name, cmd);
  
                ret = soc_pcm_bespoke_trigger(substream, cmd);
-               if (ret < 0) {
-                       dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
-                       goto out;
-               }
                break;
        default:
                dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
                goto out;
        }
  
+       if (ret < 0) {
+               dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
+                       cmd, ret);
+               goto out;
+       }
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        return ret;
  }
  
 +int dpcm_fe_dai_prepare_be(struct snd_soc_pcm_runtime *fe,
 +              struct snd_soc_pcm_runtime *be, int stream)
 +{
 +      struct snd_pcm_substream *be_substream =
 +              snd_soc_dpcm_get_substream(be, stream);
 +      int ret = 0;
 +
 +      /* is this op for this BE ? */
 +      if (!snd_soc_dpcm_be_can_update(fe, be, stream))
 +              return 0;
 +
 +      if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
 +                      (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
 +              return 0;
 +
 +      dev_dbg(be->dev, "ASoC: prepare BE %s\n",
 +                      fe->dai_link->name);
 +
 +      ret = soc_pcm_prepare(be_substream);
 +      if (ret < 0) {
 +              dev_err(be->dev, "ASoC: backend prepare failed %d\n",
 +                              ret);
 +              return ret;
 +      }
 +
 +      be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
 +      return ret;
 +}
 +
  static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
  {
        struct snd_soc_pcm_runtime *fe = substream->private_data;
@@@ -2371,94 -2194,13 +2412,94 @@@ int dpcm_be_dai_prepare(struct snd_soc_
        return ret;
  }
  
 +static void dpcm_be_async_prepare(void *data, async_cookie_t cookie)
 +{
 +      struct snd_soc_dpcm *dpcm = data;
 +      struct snd_soc_pcm_runtime *be = dpcm->be;
 +      int stream = dpcm->stream;
 +      struct snd_pcm_substream *be_substream =
 +              snd_soc_dpcm_get_substream(be, stream);
 +      int ret;
 +
 +      dev_dbg(be->dev, "%s ASoC: prepare BE %s\n", __func__,
 +                                      dpcm->fe->dai_link->name);
 +      ret = soc_pcm_prepare(be_substream);
 +      if (ret < 0) {
 +              be->err_ops = ret;
 +              dev_err(be->dev, "ASoC: backend prepare failed %d\n",
 +                              ret);
 +              return;
 +      }
 +      be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
 +}
 +
 +void dpcm_be_dai_prepare_async(struct snd_soc_pcm_runtime *fe, int stream,
 +                                          struct async_domain *domain)
 +{
 +      struct snd_soc_dpcm *dpcm;
 +      struct snd_soc_dpcm *dpcm_async[DPCM_MAX_BE_USERS];
 +      int i = 0, j;
 +
 +      list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
 +              struct snd_soc_pcm_runtime *be = dpcm->be;
 +
 +              be->err_ops = 0;
 +              /* is this op for this BE ? */
 +              if (!snd_soc_dpcm_be_can_update(fe, be, stream))
 +                      continue;
 +
 +              if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
 +                      (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
 +                      continue;
 +
 +              /* does this BE support async op ?*/
 +              if ((fe->dai_link->async_ops & ASYNC_DPCM_SND_SOC_PREPARE) &&
 +                  (be->dai_link->async_ops & ASYNC_DPCM_SND_SOC_PREPARE)) {
 +                      dpcm->stream = stream;
 +                      async_schedule_domain(dpcm_be_async_prepare,
 +                                                          dpcm, domain);
 +              } else {
 +                      dpcm_async[i++] = dpcm;
 +                      if (i == DPCM_MAX_BE_USERS) {
 +                              dev_dbg(fe->dev, "ASoC: MAX backend users!\n");
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      for (j = 0; j < i; j++) {
 +              struct snd_soc_dpcm *dpcm = dpcm_async[j];
 +              struct snd_soc_pcm_runtime *be = dpcm->be;
 +              struct snd_pcm_substream *be_substream =
 +                      snd_soc_dpcm_get_substream(be, stream);
 +              int ret;
 +
 +              dev_dbg(be->dev, "ASoC: prepare BE %s\n",
 +                              dpcm->fe->dai_link->name);
 +
 +              ret = soc_pcm_prepare(be_substream);
 +              if (ret < 0) {
 +                      dev_err(be->dev, "ASoC: backend prepare failed %d\n",
 +                                      ret);
 +                      be->err_ops = ret;
 +                      return;
 +              }
 +
 +              be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
 +      }
 +}
 +
  static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
  {
        struct snd_soc_pcm_runtime *fe = substream->private_data;
 +      struct snd_soc_dpcm *dpcm;
        int stream = substream->stream, ret = 0;
 +      ASYNC_DOMAIN_EXCLUSIVE(async_domain);
  
        mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
  
 +      fe->err_ops = 0;
 +
        dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
  
        dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
                goto out;
        }
  
 -      ret = dpcm_be_dai_prepare(fe, substream->stream);
 -      if (ret < 0)
 -              goto out;
 +      if (!(fe->dai_link->async_ops & ASYNC_DPCM_SND_SOC_PREPARE)) {
 +              ret = dpcm_be_dai_prepare(fe, substream->stream);
 +              if (ret < 0)
 +                      goto out;
 +              /* call prepare on the frontend */
 +              ret = soc_pcm_prepare(substream);
 +              if (ret < 0) {
 +                      dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
 +                                      fe->dai_link->name);
 +                      goto out;
 +              }
 +      } else {
 +              dpcm_be_dai_prepare_async(fe, substream->stream,
 +                                                      &async_domain);
  
 -      /* call prepare on the frontend */
 -      ret = soc_pcm_prepare(substream);
 -      if (ret < 0) {
 -              dev_err(fe->dev,"ASoC: prepare FE %s failed\n",
 -                      fe->dai_link->name);
 -              goto out;
 +              /* call prepare on the frontend */
 +              ret = soc_pcm_prepare(substream);
 +              if (ret < 0) {
 +                      fe->err_ops = ret;
 +                      dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
 +                                      fe->dai_link->name);
 +              }
 +
 +              async_synchronize_full_domain(&async_domain);
 +
 +              /* check if any BE failed */
 +              list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients,
 +                                                          list_be) {
 +                      struct snd_soc_pcm_runtime *be = dpcm->be;
 +
 +                      if (be->err_ops < 0) {
 +                              ret = be->err_ops;
 +                              goto out;
 +                      }
 +              }
 +
 +              /* check if FE failed */
 +              if (fe->err_ops < 0) {
 +                      ret = fe->err_ops;
 +                      goto out;
 +              }
        }
  
        /* run the stream event for each BE */
        return ret;
  }
  
 +static int soc_pcm_compat_ioctl(struct snd_pcm_substream *substream,
 +                   unsigned int cmd, void *arg)
 +{
 +      struct snd_soc_pcm_runtime *rtd = substream->private_data;
 +      struct snd_soc_platform *platform = rtd->platform;
 +
 +      if (platform->driver->ops->compat_ioctl)
 +              return platform->driver->ops->compat_ioctl(substream,
 +                      cmd, arg);
 +      return snd_pcm_lib_ioctl(substream, cmd, arg);
 +}
 +
  static int soc_pcm_ioctl(struct snd_pcm_substream *substream,
                     unsigned int cmd, void *arg)
  {
@@@ -2965,27 -2664,9 +3006,27 @@@ int soc_new_pcm(struct snd_soc_pcm_runt
                        pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
                if (capture)
                        pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
 +              if (platform->driver->pcm_new)
 +                      rtd->platform->driver->pcm_new(rtd);
                goto out;
        }
  
 +      /* setup any hostless PCMs - i.e. no host IO is performed */
 +      if (rtd->dai_link->no_host_mode) {
 +              if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
 +                      pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->hw_no_buffer = 1;
 +                      snd_soc_set_runtime_hwparams(
 +                              pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
 +                              &no_host_hardware);
 +              }
 +              if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
 +                      pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->hw_no_buffer = 1;
 +                      snd_soc_set_runtime_hwparams(
 +                              pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
 +                              &no_host_hardware);
 +              }
 +      }
 +
        /* ASoC PCM operations */
        if (rtd->dai_link->dynamic) {
                rtd->ops.open           = dpcm_fe_dai_open;
                rtd->ops.hw_free        = dpcm_fe_dai_hw_free;
                rtd->ops.close          = dpcm_fe_dai_close;
                rtd->ops.pointer        = soc_pcm_pointer;
 +              rtd->ops.delay_blk      = soc_pcm_delay_blk;
                rtd->ops.ioctl          = soc_pcm_ioctl;
 +              rtd->ops.compat_ioctl   = soc_pcm_compat_ioctl;
        } else {
                rtd->ops.open           = soc_pcm_open;
                rtd->ops.hw_params      = soc_pcm_hw_params;
                rtd->ops.hw_free        = soc_pcm_hw_free;
                rtd->ops.close          = soc_pcm_close;
                rtd->ops.pointer        = soc_pcm_pointer;
 +              rtd->ops.delay_blk      = soc_pcm_delay_blk;
                rtd->ops.ioctl          = soc_pcm_ioctl;
 +              rtd->ops.compat_ioctl   = soc_pcm_compat_ioctl;
        }
  
        if (platform->driver->ops) {
  
        pcm->private_free = platform->driver->pcm_free;
  out:
 -      dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
 +      dev_dbg(rtd->card->dev, "%s <-> %s mapping ok\n",
                 (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
                 cpu_dai->name);
        return ret;