OSDN Git Service

Merge "msm: mdss: Fix potential null pointer dereference"
authorLinux Build Service Account <lnxbuild@localhost>
Sat, 27 Oct 2018 19:48:38 +0000 (12:48 -0700)
committerGerrit - the friendly Code Review server <code-review@localhost>
Sat, 27 Oct 2018 19:48:38 +0000 (12:48 -0700)
363 files changed:
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/filesystems/f2fs.txt
Documentation/hwmon/ina2xx
Documentation/kernel-parameters.txt
Makefile
arch/arc/Makefile
arch/arc/kernel/process.c
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/qcom/apq8096-v3-auto-adp.dts
arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp.dts
arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
arch/arm/boot/dts/qcom/msm8996-v3-auto-adp.dts
arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
arch/arm/boot/dts/qcom/msm8998.dtsi
arch/arm/boot/dts/qcom/sdm630-mdss-pll.dtsi
arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
arch/arm/boot/dts/qcom/sdm630.dtsi
arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
arch/arm/boot/dts/qcom/sdm660-mdss-pll.dtsi
arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
arch/arm/boot/dts/qcom/sdm660.dtsi
arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
arch/arm/boot/dts/sama5d3_emac.dtsi
arch/arm/mach-mvebu/pmsu.c
arch/arm64/configs/msm-auto-gvm-perf_defconfig
arch/arm64/configs/msm-auto-gvm_defconfig
arch/arm64/configs/msm-auto-perf_defconfig
arch/arm64/configs/msm-auto_defconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/jump_label.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kvm/guest.c
arch/hexagon/include/asm/bitops.h
arch/hexagon/kernel/dma.c
arch/powerpc/kernel/fadump.c
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/tm.S
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/s390/mm/extmem.c
arch/x86/configs/x86_64_cuttlefish_defconfig
arch/x86/crypto/crc32c-intel_glue.c
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/fpu/types.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/tsc_msr.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/x86.c
arch/x86/mm/numa_emulation.c
arch/x86/xen/pmu.c
crypto/ablkcipher.c
crypto/blkcipher.c
drivers/base/power/main.c
drivers/block/floppy.c
drivers/bluetooth/btusb.c
drivers/clk/msm/mdss/mdss-hdmi-pll-8998.c
drivers/clocksource/timer-ti-32k.c
drivers/crypto/mxs-dcp.c
drivers/edac/i7core_edac.c
drivers/gpio/Makefile
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-msm-smp2p-test.c [deleted file]
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
drivers/gpu/drm/msm/sde/sde_plane.c
drivers/gpu/drm/msm/sde_hdcp.h
drivers/gpu/drm/msm/sde_hdcp_1x.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
drivers/gpu/msm/kgsl.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-ntrig.c
drivers/hid/hid-sony.c
drivers/hv/hv_fcopy.c
drivers/hv/hv_kvp.c
drivers/hv/hv_snapshot.c
drivers/hv/hv_util.c
drivers/hv/hyperv_vmbus.h
drivers/hwmon/adt7475.c
drivers/hwmon/ina2xx.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-msm-v2.c
drivers/i2c/busses/i2c-scmi.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/atakbd.c
drivers/input/mouse/elantech.c
drivers/md/dm-cache-target.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/md-cluster.c
drivers/md/raid10.c
drivers/media/i2c/soc_camera/ov772x.c
drivers/media/platform/exynos4-is/fimc-isp-video.c
drivers/media/platform/fsl-viu.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/s3c-camif/camif-capture.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/tm6000/tm6000-dvb.c
drivers/media/usb/uvc/uvc_video.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-fh.c
drivers/mfd/omap-usb-host.c
drivers/misc/hdcp.c
drivers/misc/tsl2550.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/net/appletalk/ipddp.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/team/team.c
drivers/net/usb/smsc75xx.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/trace.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/cnss2/Makefile
drivers/net/wireless/cnss2/bus.c
drivers/net/wireless/cnss2/bus.h
drivers/net/wireless/cnss2/main.c
drivers/net/wireless/cnss2/main.h
drivers/net/wireless/cnss2/pci.c
drivers/net/wireless/cnss2/qmi.c
drivers/net/wireless/cnss2/qmi.h
drivers/net/wireless/cnss2/usb.c [new file with mode: 0644]
drivers/net/wireless/cnss2/usb.h [new file with mode: 0644]
drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/wcnss/wcnss_wlan.c
drivers/net/xen-netfront.c
drivers/of/unittest.c
drivers/pci/pci.c
drivers/platform/msm/ipa/ipa_v2/ipa.c
drivers/platform/msm/ipa/ipa_v2/ipa_i.h
drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
drivers/platform/msm/ipa/ipa_v2/ipa_reg.h
drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
drivers/platform/x86/alienware-wmi.c
drivers/power/reset/vexpress-poweroff.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/soc/qcom/Makefile
drivers/soc/qcom/hab/hab.c
drivers/soc/qcom/hab/hab.h
drivers/soc/qcom/hab/hab_ghs.c
drivers/soc/qcom/hab/hab_mem_linux.c
drivers/soc/qcom/hab/hab_mimex.c
drivers/soc/qcom/hab/hab_msg.c
drivers/soc/qcom/hab/hab_open.c
drivers/soc/qcom/hab/hab_vchan.c
drivers/soc/qcom/qdsp6v2/apr_vm.c
drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c
drivers/soc/qcom/qdsp6v2/audio_anc.c
drivers/soc/qcom/qdsp6v2/sdsp-anc.c
drivers/soc/qcom/smp2p_spinlock_test.c [deleted file]
drivers/soc/qcom/smp2p_test.c [deleted file]
drivers/soc/qcom/smp2p_test_common.h [deleted file]
drivers/soc/qcom/subsystem_notif_virt.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-tegra20-slink.c
drivers/staging/android/ashmem.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c
drivers/staging/android/ion/msm/msm_ion.c
drivers/staging/rts5208/sd.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/thermal/of-thermal.c
drivers/tty/serial/8250/serial_cs.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/imx.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/driver.c
drivers/usb/core/usb.c
drivers/usb/dwc3/dwc3-msm.c
drivers/usb/gadget/function/f_mtp.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/udc/fotg210-udc.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/misc/Kconfig
drivers/usb/misc/yurex.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/usb-serial-simple.c
drivers/usb/wusbcore/security.c
drivers/uwb/hwa-rc.c
drivers/video/fbdev/aty/atyfb.h
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/mach64_ct.c
drivers/video/fbdev/msm/mdss_dsi.c
drivers/video/fbdev/msm/mdss_dsi.h
drivers/video/fbdev/msm/mdss_dsi_panel.c
drivers/video/fbdev/msm/mdss_dsi_phy.c
drivers/video/fbdev/msm/mdss_dsi_phy.h
drivers/video/fbdev/msm/mdss_fb.c
drivers/video/fbdev/msm/mdss_panel.h
drivers/video/fbdev/msm/msm_mdss_io_8974.c
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
drivers/xen/cpu_hotplug.c
drivers/xen/events/events_base.c
drivers/xen/manage.c
fs/cifs/cifs_unicode.c
fs/cifs/cifssmb.c
fs/cifs/misc.c
fs/cifs/smb2ops.c
fs/ext4/dir.c
fs/ext4/inline.c
fs/ext4/mmp.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/f2fs/xattr.c
fs/jffs2/xattr.c
fs/nfsd/nfs4proc.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/dlm/dlmmaster.c
fs/proc/base.c
fs/sdcardfs/file.c
fs/sdcardfs/inode.c
fs/sdcardfs/lookup.c
fs/sdcardfs/main.c
fs/sdcardfs/sdcardfs.h
fs/sdcardfs/super.c
fs/ubifs/super.c
include/linux/f2fs_fs.h
include/linux/hdcp_qseecom.h
include/linux/hyperv.h
include/linux/ipa.h
include/linux/netdevice.h
include/linux/netfilter_bridge/ebtables.h
include/linux/platform_data/ina2xx.h
include/linux/qdsp6v2/audio-anc-dev-mgr.h
include/linux/qdsp6v2/sdsp_anc.h
include/linux/skbuff.h
include/linux/slub_def.h
include/linux/tcp.h
include/media/v4l2-fh.h
include/net/bonding.h
include/net/cnss2.h
include/net/ip_fib.h
include/net/nfc/hci.h
include/net/sock.h
include/net/tcp.h
include/uapi/linux/habmmid.h
include/uapi/linux/msm_audio_anc.h
include/uapi/linux/msm_kgsl.h
kernel/module.c
kernel/sched/tune.c
kernel/time/alarmtimer.c
kernel/trace/ring_buffer.c
lib/klist.c
mm/madvise.c
mm/shmem.c
mm/slub.c
mm/vmstat.c
net/6lowpan/iphc.c
net/bridge/netfilter/ebt_arpreply.c
net/core/dev.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skbuff.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv6/addrconf.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/tcp_ipv6.c
net/mac80211/cfg.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/netlabel/netlabel_unlabeled.c
net/wireless/nl80211.c
net/wireless/sme.c
net/wireless/util.c
net/xfrm/xfrm_user.c
sound/aoa/core/gpio-feature.c
sound/firewire/bebob/bebob_maudio.c
sound/hda/hdac_controller.c
sound/pci/emu10k1/emufx.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/sigmadsp.c
sound/soc/codecs/wm8804-i2c.c
sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
sound/soc/msm/qdsp6v2/q6asm.c
sound/soc/soc-dapm.c
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/scripts/python/export-to-postgresql.py
tools/testing/selftests/efivarfs/config [new file with mode: 0644]
tools/vm/page-types.c
tools/vm/slabinfo.c

index f82da9b..3bbb9fe 100644 (file)
@@ -51,6 +51,14 @@ Description:
                 Controls the dirty page count condition for the in-place-update
                 policies.
 
+What:          /sys/fs/f2fs/<disk>/min_seq_blocks
+Date:          August 2018
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+                Controls the dirty page count condition for batched sequential
+                writes in ->writepages.
+
+
 What:          /sys/fs/f2fs/<disk>/min_hot_blocks
 Date:          March 2017
 Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
index cc55f6e..4d0e1d5 100644 (file)
@@ -165,6 +165,9 @@ Optional properties:
 - qcom,mdss-dsi-border-color:          Defines the border color value if border is present.
                                        0 = default value.
 - qcom,mdss-dsi-pan-enable-dynamic-fps:        Boolean used to enable change in frame rate dynamically.
+- qcom,mdss-dsi-pan-enable-dynamic-bitclk: Boolean used to enable change in DSI clock dynamically.
+- qcom,mdss-dsi-dynamic-bitclk_freq:   An array of integers that specifies the DSI bit clock
+                                       frequencies supported as part of dynamic bit clock feature.
 - qcom,mdss-dsi-pan-fps-update:                A string that specifies when to change the frame rate.
                                        "dfps_suspend_resume_mode"= FPS change request is
                                        implemented during suspend/resume.
@@ -696,6 +699,9 @@ Example:
                qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
                qcom,mdss-dsi-pan-enable-dynamic-fps;
                qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
+               qcom,mdss-dsi-pan-enable-dynamic-bitclk;
+               qcom,mdss-dsi-dynamic-bitclk_freq = <711037824 724453632 737869440
+                       751285248 764701056 778116864 791532672 804948480>;
                qcom,min-refresh-rate = <30>;
                qcom,max-refresh-rate = <60>;
                qcom,mdss-dsi-bl-pmic-bank-select = <0>;
index b5d7976..410c044 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
   Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+  Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
   Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
index ecccb51..0c8bdd3 100644 (file)
@@ -155,6 +155,26 @@ noinline_data          Disable the inline data feature, inline data feature is
                        enabled by default.
 data_flush             Enable data flushing before checkpoint in order to
                        persist data of regular and symlink.
+fault_injection=%d     Enable fault injection in all supported types with
+                       specified injection rate.
+fault_type=%d          Support configuring fault injection type, should be
+                       enabled with fault_injection option, fault type value
+                       is shown below, it supports single or combined type.
+                       Type_Name               Type_Value
+                       FAULT_KMALLOC           0x000000001
+                       FAULT_KVMALLOC          0x000000002
+                       FAULT_PAGE_ALLOC                0x000000004
+                       FAULT_PAGE_GET          0x000000008
+                       FAULT_ALLOC_BIO         0x000000010
+                       FAULT_ALLOC_NID         0x000000020
+                       FAULT_ORPHAN            0x000000040
+                       FAULT_BLOCK             0x000000080
+                       FAULT_DIR_DEPTH         0x000000100
+                       FAULT_EVICT_INODE       0x000000200
+                       FAULT_TRUNCATE          0x000000400
+                       FAULT_IO                        0x000000800
+                       FAULT_CHECKPOINT                0x000001000
+                       FAULT_DISCARD           0x000002000
 mode=%s                Control block allocation mode which supports "adaptive"
                        and "lfs". In "lfs" mode, there should be no random
                        writes towards main area.
index cfd31d9..f8bf140 100644 (file)
@@ -32,7 +32,7 @@ Supported chips:
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
 
 Description
 -----------
index 40686dc..a0179a6 100644 (file)
@@ -977,11 +977,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        See Documentation/x86/intel_mpx.txt for more
                        information about the feature.
 
-       eagerfpu=       [X86]
-                       on      enable eager fpu restore
-                       off     disable eager fpu restore
-                       auto    selects the default scheme, which automatically
-                               enables eagerfpu restore for xsaveopt.
 
        module.async_probe [KNL]
                        Enable asynchronous probe on this module.
index 50f15f7..41e00b8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 158
+SUBLEVEL = 162
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
index b9f7306..9d64eac 100644 (file)
@@ -18,20 +18,6 @@ cflags-y     += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
 cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
 
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
-    $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
-    $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
-
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
 # We had a customer reported bug where some code built in kernel was NOT using
index b5db9e7..79109de 100644 (file)
@@ -153,6 +153,26 @@ int copy_thread(unsigned long clone_flags,
                task_thread_info(current)->thr_ptr;
        }
 
+
+       /*
+        * setup usermode thread pointer #1:
+        * when child is picked by scheduler, __switch_to() uses @c_callee to
+        * populate usermode callee regs: this works (despite being in a kernel
+        * function) since special return path for child @ret_from_fork()
+        * ensures those regs are not clobbered all the way to RTIE to usermode
+        */
+       c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /*
+        * setup usermode thread pointer #2:
+        * however for this special use of r25 in kernel, __switch_to() sets
+        * r25 for kernel needs and only in the final return path is usermode
+        * r25 setup, from pt_regs->user_r25. So set that up as well
+        */
+       c_regs->user_r25 = c_callee->r25;
+#endif
+
        return 0;
 }
 
index 02bd631..e6a3a94 100644 (file)
                        };
                };
 
-               dcan1: can@481cc000 {
+               dcan1: can@4ae3c000 {
                        compatible = "ti,dra7-d_can";
                        ti,hwmods = "dcan1";
                        reg = <0x4ae3c000 0x2000>;
                        status = "disabled";
                };
 
-               dcan2: can@481d0000 {
+               dcan2: can@48480000 {
                        compatible = "ti,dra7-d_can";
                        ti,hwmods = "dcan2";
                        reg = <0x48480000 0x2000>;
index 46894ea..1a870b4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,9 @@
 };
 
 &soc {
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
+       };
        qcom,msm-thermal {
                qcom,hotplug-temp = <115>;
                qcom,hotplug-temp-hysteresis = <25>;
index 4648d20..0bb0436 100644 (file)
@@ -30,6 +30,9 @@
 };
 
 &soc {
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
+       };
        qcom,msm-thermal {
                qcom,hotplug-temp = <115>;
                qcom,hotplug-temp-hysteresis = <25>;
index d0758ae..908dc4a 100644 (file)
@@ -37,6 +37,9 @@
 };
 
 &soc {
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
+       };
        qcom,msm-thermal {
                qcom,hotplug-temp = <115>;
                qcom,hotplug-temp-hysteresis = <25>;
index 12af84e..e6598fa 100644 (file)
                status = "disabled";
        };
 
-       qcom,msm-ssc-sensors {
-               compatible = "qcom,msm-ssc-sensors";
-               qcom,firmware-name = "slpi";
-               status = "ok";
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
        };
 
        sound-adp-agave {
                qcom,refs-tdm-rx = <&dai_tert_tdm_rx_5>;
                qcom,spkr-tdm-rx = <&dai_quat_tdm_rx_0>;
                qcom,mic-tdm-tx = <&dai_quat_tdm_tx_0>;
+               status = "disabled";
        };
 
        usb_detect: usb_detect {
index 5a266cd..34083ec 100644 (file)
                qcom,thermal-node;
        };
 };
+
+&soc {
+       qcom,msm-thermal {
+               qcom,hotplug-temp = <115>;
+               qcom,hotplug-temp-hysteresis = <25>;
+               qcom,therm-reset-temp = <119>;
+       };
+};
index 89a585b..c3abc6b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,6 +37,9 @@
 };
 
 &soc {
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
+       };
        qcom,msm-thermal {
                qcom,hotplug-temp = <115>;
                qcom,hotplug-temp-hysteresis = <25>;
index 02f5dbc..46140ab 100644 (file)
@@ -30,6 +30,9 @@
 };
 
 &soc {
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
+       };
        qcom,msm-thermal {
                qcom,hotplug-temp = <115>;
                qcom,hotplug-temp-hysteresis = <25>;
index 05a3144..75631bb 100644 (file)
@@ -37,6 +37,9 @@
 };
 
 &soc {
+       ssc_sensors: qcom,msm-ssc-sensors {
+               status = "disabled";
+       };
        qcom,msm-thermal {
                qcom,hotplug-temp = <115>;
                qcom,hotplug-temp-hysteresis = <25>;
index 6990099..7f87a45 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                        reg = <0x10 8>;
                };
 
-               dload_type@18 {
+               dload_type@1c {
                        compatible = "qcom,msm-imem-dload-type";
-                       reg = <0x18 4>;
+                       reg = <0x1c 4>;
                };
 
                restart_reason@65c {
index 42eac0a..3adf92a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,8 +19,9 @@
                #clock-cells = <1>;
 
                reg = <0xc994400 0x588>,
-                     <0xc8c2300 0x8>;
-               reg-names = "pll_base", "gdsc_base";
+                     <0xc8c2300 0x8>,
+                     <0xc994200 0x98>;
+               reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
 
                gdsc-supply = <&gdsc_mdss>;
 
index d7fef42..de06a67 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                                 <&clock_mmss MMSS_MDSS_ESC0_CLK>,
                                 <&clock_mmss BYTE0_CLK_SRC>,
                                 <&clock_mmss PCLK0_CLK_SRC>,
-                                <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>;
+                                <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>,
+                                <&mdss_dsi0_pll BYTE0_MUX_CLK>,
+                                <&mdss_dsi0_pll PIX0_MUX_CLK>,
+                                <&mdss_dsi0_pll BYTE0_SRC_CLK>,
+                                <&mdss_dsi0_pll PIX0_SRC_CLK>,
+                                <&mdss_dsi0_pll SHADOW_BYTE0_SRC_CLK>,
+                                <&mdss_dsi0_pll SHADOW_PIX0_SRC_CLK>;
                        clock-names = "byte_clk", "pixel_clk", "core_clk",
                                "byte_clk_rcg", "pixel_clk_rcg",
-                               "byte_intf_clk";
+                               "byte_intf_clk", "pll_byte_clk_mux",
+                               "pll_pixel_clk_mux", "pll_byte_clk_src",
+                               "pll_pixel_clk_src", "pll_shadow_byte_clk_src",
+                               "pll_shadow_pixel_clk_src";
 
                        qcom,platform-strength-ctrl = [ff 06
                                                        ff 06
index 384e24d..82e8089 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                        gpio-key,wakeup;
                        debounce-interval = <15>;
                };
-
-               home {
-                       label = "home";
-                       gpios = <&pm660_gpios 11 0x1>;
-                       linux,input-type = <1>;
-                       linux,code = <102>;
-                       gpio-key,wakeup;
-                       debounce-interval = <15>;
-               };
-
        };
 
        hbtp {
index 1e767b4..808421c 100644 (file)
                };
 
                cont_splash_mem: splash_region@9d400000 {
-                       reg = <0x0 0x9d400000 0x0 0x02400000>;
+                       reg = <0x0 0x9d400000 0x0 0x23ff000>;
                        label = "cont_splash_mem";
                };
+
+               dfps_data_mem: dfps_data_mem@0x9f7ff000 {
+                      reg = <0 0x9f7ff000 0 0x00001000>;
+                      label = "dfps_data_mem";
+               };
        };
 
        bluetooth: bt_wcn3990 {
index b2f4a8c..6abd62c 100644 (file)
        qcom,mdss-dsi-panel-max-error-count = <3>;
        qcom,mdss-dsi-min-refresh-rate = <53>;
        qcom,mdss-dsi-max-refresh-rate = <60>;
+       qcom,mdss-dsi-pan-enable-dynamic-bitclk;
+       qcom,mdss-dsi-dynamic-bitclk_freq = <798240576 801594528 804948480
+               808302432 811656384>;
        qcom,mdss-dsi-pan-enable-dynamic-fps;
        qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
 };
index 69d3736..a3b6aad 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,8 +19,9 @@
                #clock-cells = <1>;
 
                reg = <0xc994400 0x588>,
-                     <0xc8c2300 0x8>;
-               reg-names = "pll_base", "gdsc_base";
+                     <0xc8c2300 0x8>,
+                     <0xc994200 0x98>;
+               reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
 
                gdsc-supply = <&gdsc_mdss>;
 
@@ -29,6 +30,7 @@
                clock-rate = <0>;
                qcom,dsi-pll-ssc-en;
                qcom,dsi-pll-ssc-mode = "down-spread";
+               memory-region = <&dfps_data_mem>;
 
                qcom,platform-supply-entries {
                        #address-cells = <1>;
@@ -54,8 +56,9 @@
                #clock-cells = <1>;
 
                reg = <0xc996400 0x588>,
-                     <0xc8c2300 0x8>;
-               reg-names = "pll_base", "gdsc_base";
+                     <0xc8c2300 0x8>,
+                     <0xc996200 0x98>;
+               reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
 
                gdsc-supply = <&gdsc_mdss>;
 
index ab4e71e..b4fbb23 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                                 <&clock_mmss MMSS_MDSS_ESC0_CLK>,
                                 <&clock_mmss BYTE0_CLK_SRC>,
                                 <&clock_mmss PCLK0_CLK_SRC>,
-                                <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>;
+                                <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>,
+                                <&mdss_dsi0_pll BYTE0_MUX_CLK>,
+                                <&mdss_dsi0_pll PIX0_MUX_CLK>,
+                                <&mdss_dsi0_pll BYTE0_SRC_CLK>,
+                                <&mdss_dsi0_pll PIX0_SRC_CLK>,
+                                <&mdss_dsi0_pll SHADOW_BYTE0_SRC_CLK>,
+                                <&mdss_dsi0_pll SHADOW_PIX0_SRC_CLK>;
                        clock-names = "byte_clk", "pixel_clk", "core_clk",
                                "byte_clk_rcg", "pixel_clk_rcg",
-                               "byte_intf_clk";
+                               "byte_intf_clk", "pll_byte_clk_mux",
+                               "pll_pixel_clk_mux", "pll_byte_clk_src",
+                               "pll_pixel_clk_src", "pll_shadow_byte_clk_src",
+                               "pll_shadow_pixel_clk_src";
 
                        qcom,null-insertion-enabled;
                        qcom,platform-strength-ctrl = [ff 06
                                 <&clock_mmss MMSS_MDSS_ESC1_CLK>,
                                 <&clock_mmss BYTE1_CLK_SRC>,
                                 <&clock_mmss PCLK1_CLK_SRC>,
-                                <&clock_mmss MMSS_MDSS_BYTE1_INTF_CLK>;
+                                <&clock_mmss MMSS_MDSS_BYTE1_INTF_CLK>,
+                                <&mdss_dsi1_pll BYTE1_MUX_CLK>,
+                                <&mdss_dsi1_pll PIX1_MUX_CLK>,
+                                <&mdss_dsi1_pll BYTE1_SRC_CLK>,
+                                <&mdss_dsi1_pll PIX1_SRC_CLK>,
+                                <&mdss_dsi1_pll SHADOW_BYTE1_SRC_CLK>,
+                                <&mdss_dsi1_pll SHADOW_PIX1_SRC_CLK>;
                        clock-names = "byte_clk", "pixel_clk", "core_clk",
                                "byte_clk_rcg", "pixel_clk_rcg",
-                               "byte_intf_clk";
+                               "byte_intf_clk", "pll_byte_clk_mux",
+                               "pll_pixel_clk_mux", "pll_byte_clk_src",
+                               "pll_pixel_clk_src", "pll_shadow_byte_clk_src",
+                               "pll_shadow_pixel_clk_src";
 
                        qcom,null-insertion-enabled;
                        qcom,platform-strength-ctrl = [ff 06
index ef72d6b..27bc0ce 100644 (file)
                };
 
                cont_splash_mem: splash_region@9d400000 {
-                       reg = <0x0 0x9d400000 0x0 0x02400000>;
+                       reg = <0x0 0x9d400000 0x0 0x23ff000>;
                        label = "cont_splash_mem";
                };
+
+               dfps_data_mem: dfps_data_mem@0x9f7ff000 {
+                      reg = <0 0x9f7ff000 0 0x00001000>;
+                      label = "dfps_data_mem";
+               };
        };
 
        bluetooth: bt_wcn3990 {
index 3434514..4d44a20 100644 (file)
        model = "Qualcomm Technologies, Inc. MSM 8996";
        compatible = "qcom,msm8996";
        qcom,msm-id = <246 0x0>;
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+       CPU0: cpu@0 {
+               device_type = "cpu";
+               compatible = "qcom,kryo";
+               reg = <0x0 0x0>;
+               efficiency = <1024>;
+       };
+       CPU1: cpu@1 {
+               device_type = "cpu";
+               compatible = "qcom,kryo";
+               reg = <0x0 0x1>;
+               efficiency = <1024>;
+       };
+       CPU2: cpu@2 {
+               device_type = "cpu";
+               compatible = "qcom,kryo";
+               reg = <0x0 0x2>;
+               efficiency = <1536>;
+       };
+       CPU3: cpu@3 {
+               device_type = "cpu";
+               compatible = "qcom,kryo";
+               reg = <0x0 0x3>;
+               efficiency = <1536>;
+       };
+       cpu-map {
+               cluster0 {
+                       core0 {
+                               cpu = <&CPU0>;
+                       };
+                       core1 {
+                               cpu = <&CPU1>;
+                       };
+               };
+
+               cluster1 {
+                       core0 {
+                               cpu = <&CPU2>;
+                       };
+
+                       core1 {
+                               cpu = <&CPU3>;
+                       };
+               };
+       };
+};
 
        firmware: firmware {
                android {
                pinctrl-names = "bootstrap_active", "bootstrap_sleep";
                pinctrl-0 = <&cnss_bootstrap_active>;
                pinctrl-1 = <&cnss_bootstrap_sleep>;
+               qcom,wlan-ramdump-dynamic = <0x200000>;
 
                qcom,msm-bus,name = "msm-cnss";
                qcom,msm-bus,num-cases = <4>;
index 7cb235e..6e9e1c2 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f802c000 {
-                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
+                               compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index ed8fda4..45fd4b1 100644 (file)
@@ -117,8 +117,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
                PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
 }
 
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
 
 /*
  * This function sets up the boot address workaround needed for SMP
@@ -131,7 +131,7 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
                             phys_addr_t resume_addr_reg)
 {
        void __iomem *sram_virt_base;
-       u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+       u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
 
        mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
        mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
index 764b2de..2cd037f 100644 (file)
@@ -53,6 +53,8 @@ CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
 CONFIG_SECCOMP=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
 # CONFIG_EFI is not set
 CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
 CONFIG_COMPAT=y
@@ -72,6 +74,7 @@ CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -82,6 +85,7 @@ CONFIG_INET6_AH=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -306,7 +310,9 @@ CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MTP=y
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
@@ -382,8 +388,11 @@ CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_IPC_LOGGING=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_ARC4=y
index 84cc2ab..506a50d 100644 (file)
@@ -42,6 +42,8 @@ CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
 CONFIG_SECCOMP=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
 CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
 CONFIG_COMPAT=y
 CONFIG_PM_AUTOSLEEP=y
@@ -60,6 +62,7 @@ CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -70,6 +73,7 @@ CONFIG_INET6_AH=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -298,7 +302,9 @@ CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MTP=y
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
@@ -399,8 +405,11 @@ CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_ARM64_PTDUMP=y
 CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_ARC4=y
index 0f624b6..6f63128 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_HOTPLUG_SIZE_BITS=28
 CONFIG_LOCALVERSION="-perf"
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
@@ -67,6 +66,8 @@ CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
 CONFIG_SECCOMP=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
 # CONFIG_EFI is not set
 CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -96,6 +97,7 @@ CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -108,6 +110,7 @@ CONFIG_INET6_AH=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -429,6 +432,7 @@ CONFIG_USB_DWC3=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_USB_QTI_KS_BRIDGE=y
+CONFIG_USB_QCOM_IPC_BRIDGE=y
 CONFIG_USB_QCOM_DIAG_BRIDGE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -447,7 +451,9 @@ CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MTP=y
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
@@ -621,9 +627,12 @@ CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_QPDI=y
 CONFIG_CORESIGHT_SOURCE_DUMMY=y
 CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
index 39a9b67..a45fc1c 100644 (file)
@@ -1,6 +1,5 @@
 CONFIG_HOTPLUG_SIZE_BITS=28
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
@@ -65,6 +64,8 @@ CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
 CONFIG_SECCOMP=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
 CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_EFI is not set
 CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
@@ -96,6 +97,7 @@ CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -108,6 +110,7 @@ CONFIG_INET6_AH=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -433,6 +436,7 @@ CONFIG_USB_DWC3=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_USB_QTI_KS_BRIDGE=y
+CONFIG_USB_QCOM_IPC_BRIDGE=y
 CONFIG_USB_QCOM_DIAG_BRIDGE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -451,7 +455,9 @@ CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MTP=y
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
@@ -665,9 +671,12 @@ CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_QPDI=y
 CONFIG_CORESIGHT_SOURCE_DUMMY=y
 CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
index 6e99309..d00e27e 100644 (file)
@@ -38,7 +38,8 @@
 #define ARM64_HAS_VIRT_HOST_EXTN               12
 #define ARM64_HARDEN_BRANCH_PREDICTOR          13
 #define ARM64_UNMAP_KERNEL_AT_EL0              14
-#define ARM64_NCAPS                            15
+#define ARM64_HAS_32BIT_EL0                    15
+#define ARM64_NCAPS                            16
 
 #ifndef __ASSEMBLY__
 
@@ -190,6 +191,11 @@ static inline bool cpu_supports_mixed_endian_el0(void)
        return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_supports_32bit_el0(void)
+{
+       return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+}
+
 static inline bool system_supports_mixed_endian_el0(void)
 {
        return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
index 1b5e0e8..7e2b3e3 100644 (file)
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm goto("1: nop\n\t"
+       asm_volatile_goto("1: nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm goto("1: b %l[l_yes]\n\t"
+       asm_volatile_goto("1: b %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
index 3066328..9917b55 100644 (file)
@@ -40,6 +40,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+       return !(vcpu->arch.hcr_el2 & HCR_RW);
+}
+
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
index 9002555..c768daa 100644 (file)
 #define ID_AA64PFR0_ASIMD_SUPPORTED    0x0
 #define ID_AA64PFR0_EL1_64BIT_ONLY     0x1
 #define ID_AA64PFR0_EL0_64BIT_ONLY     0x1
+#define ID_AA64PFR0_EL0_32BIT_64BIT    0x2
 
 /* id_aa64mmfr0 */
 #define ID_AA64MMFR0_TGRAN4_SHIFT      28
index 8b326cb..98602e5 100644 (file)
@@ -753,6 +753,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = unmap_kernel_at_el0,
        },
 #endif
+       {
+               .desc = "32-bit EL0 Support",
+               .capability = ARM64_HAS_32BIT_EL0,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .field_pos = ID_AA64PFR0_EL0_SHIFT,
+               .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+       },
        {},
 };
 
index e5ee888..fe0ecc3 100644 (file)
@@ -47,6 +47,45 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size;
+
+       switch (off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               size = sizeof(__u64);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               size = sizeof(__uint128_t);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               size = sizeof(__u32);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (KVM_REG_SIZE(reg->id) == size &&
+           IS_ALIGNED(off, size / sizeof(__u32)))
+               return 0;
+
+       return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        /*
@@ -66,6 +105,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
@@ -88,6 +130,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
                return -EINVAL;
 
@@ -97,17 +142,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-               u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+               u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
                switch (mode) {
                case COMPAT_PSR_MODE_USR:
+                       if (!system_supports_32bit_el0())
+                               return -EINVAL;
+                       break;
                case COMPAT_PSR_MODE_FIQ:
                case COMPAT_PSR_MODE_IRQ:
                case COMPAT_PSR_MODE_SVC:
                case COMPAT_PSR_MODE_ABT:
                case COMPAT_PSR_MODE_UND:
+                       if (!vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
+                       break;
                case PSR_MODE_EL0t:
                case PSR_MODE_EL1t:
                case PSR_MODE_EL1h:
+                       if (vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
                        break;
                default:
                        err = -EINVAL;
index 5e4a59b..2691a18 100644 (file)
@@ -211,7 +211,7 @@ static inline long ffz(int x)
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
        int r;
 
@@ -232,7 +232,7 @@ static inline long fls(int x)
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
        int r;
 
index 9e3ddf7..2704e0b 100644 (file)
@@ -68,7 +68,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
                        panic("Can't create %s() memory pool!", __func__);
                else
                        gen_pool_add(coherent_pool,
-                               pfn_to_virt(max_low_pfn),
+                               (unsigned long)pfn_to_virt(max_low_pfn),
                                hexagon_coherent_pool_size, -1);
        }
 
index c3c8352..ca3ad5e 100644 (file)
@@ -360,9 +360,9 @@ static int __init early_fadump_reserve_mem(char *p)
 }
 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
 
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
 {
-       int rc;
+       int rc, err;
        unsigned int wait_time;
 
        pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -379,7 +379,11 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
 
        } while (wait_time);
 
+       err = -EIO;
        switch (rc) {
+       default:
+               pr_err("Failed to register. Unknown Error(%d).\n", rc);
+               break;
        case -1:
                printk(KERN_ERR "Failed to register firmware-assisted kernel"
                        " dump. Hardware Error(%d).\n", rc);
@@ -387,18 +391,22 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
        case -3:
                printk(KERN_ERR "Failed to register firmware-assisted kernel"
                        " dump. Parameter Error(%d).\n", rc);
+               err = -EINVAL;
                break;
        case -9:
                printk(KERN_ERR "firmware-assisted kernel dump is already "
                        " registered.");
                fw_dump.dump_registered = 1;
+               err = -EEXIST;
                break;
        case 0:
                printk(KERN_INFO "firmware-assisted kernel dump registration"
                        " is successful\n");
                fw_dump.dump_registered = 1;
+               err = 0;
                break;
        }
+       return err;
 }
 
 void crash_fadump(struct pt_regs *regs, const char *str)
@@ -997,7 +1005,7 @@ static unsigned long init_fadump_header(unsigned long addr)
        return addr;
 }
 
-static void register_fadump(void)
+static int register_fadump(void)
 {
        unsigned long addr;
        void *vaddr;
@@ -1008,7 +1016,7 @@ static void register_fadump(void)
         * assisted dump.
         */
        if (!fw_dump.reserve_dump_area_size)
-               return;
+               return -ENODEV;
 
        ret = fadump_setup_crash_memory_ranges();
        if (ret)
@@ -1023,7 +1031,7 @@ static void register_fadump(void)
        fadump_create_elfcore_headers(vaddr);
 
        /* register the future kernel dump with firmware. */
-       register_fw_dump(&fdm);
+       return register_fw_dump(&fdm);
 }
 
 static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1208,7 +1216,6 @@ static ssize_t fadump_register_store(struct kobject *kobj,
        switch (buf[0]) {
        case '0':
                if (fw_dump.dump_registered == 0) {
-                       ret = -EINVAL;
                        goto unlock_out;
                }
                /* Un-register Firmware-assisted dump */
@@ -1216,11 +1223,11 @@ static ssize_t fadump_register_store(struct kobject *kobj,
                break;
        case '1':
                if (fw_dump.dump_registered == 1) {
-                       ret = -EINVAL;
+                       ret = -EEXIST;
                        goto unlock_out;
                }
                /* Register Firmware-assisted dump */
-               register_fadump();
+               ret = register_fadump();
                break;
        default:
                ret = -EINVAL;
index 015ae55..8dff2b3 100644 (file)
@@ -186,7 +186,12 @@ void __init reserve_crashkernel(void)
                        (unsigned long)(crashk_res.start >> 20),
                        (unsigned long)(memblock_phys_mem_size() >> 20));
 
-       memblock_reserve(crashk_res.start, crash_size);
+       if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+           memblock_reserve(crashk_res.start, crash_size)) {
+               pr_err("Failed to reserve memory for crashkernel!\n");
+               crashk_res.start = crashk_res.end = 0;
+               return;
+       }
 }
 
 int overlaps_crashkernel(unsigned long start, unsigned long size)
index b7019b5..2d28607 100644 (file)
@@ -199,13 +199,27 @@ dont_backup_fp:
        std     r1, PACATMSCRATCH(r13)
        ld      r1, PACAR1(r13)
 
-       /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
 
+       /*
+        * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+        * clobbered by an exception once we turn on MSR_RI below.
+        */
+       ld      r11, PACATMSCRATCH(r13)
+       std     r11, GPR1(r1)
+
+       /*
+        * Store r13 away so we can free up the scratch SPR for the SLB fault
+        * handler (needed once we start accessing the thread_struct).
+        */
+       GET_SCRATCH0(r11)
+       std     r11, GPR13(r1)
+
        /* Reset MSR RI so we can take SLB faults again */
        li      r11, MSR_RI
        mtmsrd  r11, 1
 
+       /* Store the PPR in r11 and reset to decent value */
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -230,11 +244,11 @@ dont_backup_fp:
        SAVE_GPR(8, r7)                         /* user r8 */
        SAVE_GPR(9, r7)                         /* user r9 */
        SAVE_GPR(10, r7)                        /* user r10 */
-       ld      r3, PACATMSCRATCH(r13)          /* user r1 */
+       ld      r3, GPR1(r1)                    /* user r1 */
        ld      r4, GPR7(r1)                    /* user r7 */
        ld      r5, GPR11(r1)                   /* user r11 */
        ld      r6, GPR12(r1)                   /* user r12 */
-       GET_SCRATCH0(8)                         /* user r13 */
+       ld      r8, GPR13(r1)                   /* user r13 */
        std     r3, GPR1(r7)
        std     r4, GPR7(r7)
        std     r5, GPR11(r7)
index fb37290..366965a 100644 (file)
@@ -314,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        unsigned long pp, key;
        unsigned long v, gr;
        __be64 *hptep;
-       int index;
+       long int index;
        int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
        /* Get SLB entry */
index eac3b7c..ab7b259 100644 (file)
@@ -2270,7 +2270,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        level_shift = entries_shift + 3;
        level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
-       if ((level_shift - 3) * levels + page_shift >= 60)
+       if ((level_shift - 3) * levels + page_shift >= 55)
                return -EINVAL;
 
        /* Allocate TCE table */
index 18fccc3..bfd75be 100644 (file)
@@ -79,7 +79,7 @@ struct qin64 {
 struct dcss_segment {
        struct list_head list;
        char dcss_name[8];
-       char res_name[15];
+       char res_name[16];
        unsigned long start_addr;
        unsigned long end;
        atomic_t ref_count;
@@ -434,7 +434,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
        memcpy(&seg->res_name, seg->dcss_name, 8);
        EBCASC(seg->res_name, 8);
        seg->res_name[8] = '\0';
-       strncat(seg->res_name, " (DCSS)", 7);
+       strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
        seg->res->name = seg->res_name;
        rc = seg->vm_segtype;
        if (rc == SEG_TYPE_SC ||
index 38dd546..c5ccb61 100644 (file)
@@ -206,7 +206,6 @@ CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_VIRTIO_BLK=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
index 715399b..c194d57 100644 (file)
 #ifdef CONFIG_X86_64
 /*
  * use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
  * for fpu state save/restore overhead.
  */
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU  512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU        1024
+#define CRC32C_PCL_BREAKEVEN   512
 
 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
                                unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point()                                      \
-do {                                                                   \
-       if (!use_eager_fpu())                                           \
-               crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
 #endif /* CONFIG_X86_64 */
 
 static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -185,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
         * use faster PCL version if datasize is large enough to
         * overcome kernel fpu state save/restore overhead
         */
-       if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+       if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
                kernel_fpu_begin();
                *crcp = crc_pcl(data, len, *crcp);
                kernel_fpu_end();
@@ -197,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
 static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
                                u8 *out)
 {
-       if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+       if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
                kernel_fpu_begin();
                *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
                kernel_fpu_end();
@@ -256,7 +248,6 @@ static int __init crc32c_intel_mod_init(void)
                alg.update = crc32c_pcl_intel_update;
                alg.finup = crc32c_pcl_intel_finup;
                alg.digest = crc32c_pcl_intel_digest;
-               set_pcl_breakeven_point();
        }
 #endif
        return crypto_register_shash(&alg);
index a619254..9a66d73 100644 (file)
@@ -90,7 +90,7 @@ ENDPROC(native_usergs_sysret64)
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG
-       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+       btl     $9, EFLAGS(%rsp)                /* interrupts off? */
        jnc     1f
        TRACE_IRQS_ON_DEBUG
 1:
@@ -620,7 +620,7 @@ retint_kernel:
 #ifdef CONFIG_PREEMPT
        /* Interrupts are off */
        /* Check if we need preemption */
-       bt      $9, EFLAGS(%rsp)                /* were interrupts off? */
+       btl     $9, EFLAGS(%rsp)                /* were interrupts off? */
        jnc     1f
 0:     cmpl    $0, PER_CPU_VAR(__preempt_count)
        jnz     1f
index 5dd363d..049327e 100644 (file)
@@ -51,8 +51,9 @@ extern u8 pvclock_page
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*ts) :
+            "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -60,8 +61,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -143,13 +145,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[clock], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+               : "=a" (ret), "=m" (*ts)
+               : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
                : "memory", "edx");
        return ret;
 }
@@ -158,13 +160,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[tv], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+               : "=a" (ret), "=m" (*tv), "=m" (*tz)
+               : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
                : "memory", "edx");
        return ret;
 }
index dd2269d..a5fa319 100644 (file)
 #define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU     ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
index ec2aedb..16825dd 100644 (file)
@@ -57,11 +57,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
 /*
  * FPU related CPU feature flag helper routines:
  */
-static __always_inline __pure bool use_eager_fpu(void)
-{
-       return true;
-}
-
 static __always_inline __pure bool use_xsaveopt(void)
 {
        return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -498,24 +493,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
 }
 
 
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
-       if (!use_eager_fpu())
-               clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
-       if (!use_eager_fpu())
-               stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
 static inline void __fpregs_deactivate(struct fpu *fpu)
 {
        WARN_ON_FPU(!fpu->fpregs_active);
@@ -524,7 +501,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
        this_cpu_write(fpu_fpregs_owner_ctx, NULL);
 }
 
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
 static inline void __fpregs_activate(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu->fpregs_active);
@@ -549,22 +525,17 @@ static inline int fpregs_active(void)
 }
 
 /*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
  * These generally need preemption protection to work,
  * do try to avoid using these on their own.
  */
 static inline void fpregs_activate(struct fpu *fpu)
 {
-       __fpregs_activate_hw();
        __fpregs_activate(fpu);
 }
 
 static inline void fpregs_deactivate(struct fpu *fpu)
 {
        __fpregs_deactivate(fpu);
-       __fpregs_deactivate_hw();
 }
 
 /*
@@ -591,8 +562,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
         * or if the past 5 consecutive context-switches used math.
         */
        fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
-                     new_fpu->fpstate_active &&
-                     (use_eager_fpu() || new_fpu->counter > 5);
+                     new_fpu->fpstate_active;
 
        if (old_fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(old_fpu))
@@ -605,17 +575,12 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 
                /* Don't change CR0.TS if we just switch! */
                if (fpu.preload) {
-                       new_fpu->counter++;
                        __fpregs_activate(new_fpu);
                        prefetch(&new_fpu->state);
-               } else {
-                       __fpregs_deactivate_hw();
                }
        } else {
-               old_fpu->counter = 0;
                old_fpu->last_cpu = -1;
                if (fpu.preload) {
-                       new_fpu->counter++;
                        if (fpu_want_lazy_restore(new_fpu, cpu))
                                fpu.preload = 0;
                        else
index 1c6f6ac..0d81c7d 100644 (file)
@@ -303,17 +303,6 @@ struct fpu {
        unsigned char                   fpregs_active;
 
        /*
-        * @counter:
-        *
-        * This counter contains the number of consecutive context switches
-        * during which the FPU stays used. If this is over a threshold, the
-        * lazy FPU restore logic becomes eager, to save the trap overhead.
-        * This is an unsigned char so that after 256 iterations the counter
-        * wraps and the context switch behavior turns lazy again; this is to
-        * deal with bursty apps that only use the FPU for a short time:
-        */
-       unsigned char                   counter;
-       /*
         * @state:
         *
         * In-memory copy of all FPU registers that we save/restore
@@ -321,29 +310,6 @@ struct fpu {
         * the registers in the FPU are more recent than this state
         * copy. If the task context-switches away then they get
         * saved here and represent the FPU state.
-        *
-        * After context switches there may be a (short) time period
-        * during which the in-FPU hardware registers are unchanged
-        * and still perfectly match this state, if the tasks
-        * scheduled afterwards are not using the FPU.
-        *
-        * This is the 'lazy restore' window of optimization, which
-        * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
-        *
-        * We detect whether a subsequent task uses the FPU via setting
-        * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
-        *
-        * During this window, if the task gets scheduled again, we
-        * might be able to skip having to do a restore from this
-        * memory buffer to the hardware registers - at the cost of
-        * incurring the overhead of #NM fault traps.
-        *
-        * Note that on modern CPUs that support the XSAVEOPT (or other
-        * optimized XSAVE instructions), we don't use #NM traps anymore,
-        * as the hardware can track whether FPU registers need saving
-        * or not. On such CPUs we activate the non-lazy ('eagerfpu')
-        * logic, which unconditionally saves/restores all FPU state
-        * across context switches. (if FPU state exists.)
         */
        union fpregs_state              state;
        /*
index 74fda1a..3a37cdb 100644 (file)
@@ -439,7 +439,6 @@ struct kvm_vcpu_arch {
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
        struct fpu guest_fpu;
-       bool eager_fpu;
        u64 xcr0;
        u64 guest_supported_xcr0;
        u32 guest_xstate_size;
index 6aa0b51..b322325 100644 (file)
@@ -53,27 +53,9 @@ static bool kernel_fpu_disabled(void)
        return this_cpu_read(in_kernel_fpu);
 }
 
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
 static bool interrupted_kernel_fpu_idle(void)
 {
-       if (kernel_fpu_disabled())
-               return false;
-
-       if (use_eager_fpu())
-               return true;
-
-       return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+       return !kernel_fpu_disabled();
 }
 
 /*
@@ -121,7 +103,6 @@ void __kernel_fpu_begin(void)
                copy_fpregs_to_fpstate(fpu);
        } else {
                this_cpu_write(fpu_fpregs_owner_ctx, NULL);
-               __fpregs_activate_hw();
        }
 }
 EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -132,8 +113,6 @@ void __kernel_fpu_end(void)
 
        if (fpu->fpregs_active)
                copy_kernel_to_fpregs(&fpu->state);
-       else
-               __fpregs_deactivate_hw();
 
        kernel_fpu_enable();
 }
@@ -194,10 +173,7 @@ void fpu__save(struct fpu *fpu)
        preempt_disable();
        if (fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(fpu)) {
-                       if (use_eager_fpu())
-                               copy_kernel_to_fpregs(&fpu->state);
-                       else
-                               fpregs_deactivate(fpu);
+                       copy_kernel_to_fpregs(&fpu->state);
                }
        }
        preempt_enable();
@@ -245,8 +221,7 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
         * Don't let 'init optimized' areas of the XSAVE area
         * leak into the child task:
         */
-       if (use_eager_fpu())
-               memset(&dst_fpu->state.xsave, 0, xstate_size);
+       memset(&dst_fpu->state.xsave, 0, xstate_size);
 
        /*
         * Save current FPU registers directly into the child
@@ -268,17 +243,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        if (!copy_fpregs_to_fpstate(dst_fpu)) {
                memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
 
-               if (use_eager_fpu())
-                       copy_kernel_to_fpregs(&src_fpu->state);
-               else
-                       fpregs_deactivate(src_fpu);
+               copy_kernel_to_fpregs(&src_fpu->state);
        }
        preempt_enable();
 }
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-       dst_fpu->counter = 0;
        dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
 
@@ -381,7 +352,6 @@ void fpu__restore(struct fpu *fpu)
        kernel_fpu_disable();
        fpregs_activate(fpu);
        copy_kernel_to_fpregs(&fpu->state);
-       fpu->counter++;
        kernel_fpu_enable();
 }
 EXPORT_SYMBOL_GPL(fpu__restore);
@@ -398,7 +368,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
 void fpu__drop(struct fpu *fpu)
 {
        preempt_disable();
-       fpu->counter = 0;
 
        if (fpu->fpregs_active) {
                /* Ignore delayed exceptions from user space */
@@ -437,7 +406,7 @@ void fpu__clear(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
 
-       if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
+       if (!static_cpu_has(X86_FEATURE_FPU)) {
                /* FPU state will be reallocated lazily at the first use. */
                fpu__drop(fpu);
        } else {
index 3de0771..9be3e79 100644 (file)
@@ -319,11 +319,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                }
 
                fpu->fpstate_active = 1;
-               if (use_eager_fpu()) {
-                       preempt_disable();
-                       fpu__restore(fpu);
-                       preempt_enable();
-               }
+               preempt_disable();
+               fpu__restore(fpu);
+               preempt_enable();
 
                return err;
        } else {
index 6aa0f4d..0e37e36 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/setup.h>
 #include <asm/apic.h>
 #include <asm/param.h>
+#include <asm/tsc.h>
 
 /* CPU reference clock frequency: in KHz */
 #define FREQ_83                83200
index 83d6369..338d13d 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
 #include <asm/user.h>
 #include <asm/fpu/xstate.h>
 #include "cpuid.h"
@@ -104,9 +103,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
        if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
-       vcpu->arch.eager_fpu = use_eager_fpu();
-       if (vcpu->arch.eager_fpu)
-               kvm_x86_ops->fpu_activate(vcpu);
+       kvm_x86_ops->fpu_activate(vcpu);
 
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
index 53d43d2..e6ab034 100644 (file)
@@ -7319,16 +7319,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
        __kernel_fpu_end();
        ++vcpu->stat.fpu_reload;
-       /*
-        * If using eager FPU mode, or if the guest is a frequent user
-        * of the FPU, just leave the FPU active for next time.
-        * Every 255 times fpu_counter rolls over to 0; a guest that uses
-        * the FPU in bursts will revert to loading it on demand.
-        */
-       if (!vcpu->arch.eager_fpu) {
-               if (++vcpu->fpu_counter < 5)
-                       kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
-       }
        trace_kvm_fpu(0);
 }
 
index a8f90ce..dc6d990 100644 (file)
@@ -60,7 +60,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
        eb->nid = nid;
 
        if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
-               emu_nid_to_phys[nid] = nid;
+               emu_nid_to_phys[nid] = pb->nid;
 
        pb->start += size;
        if (pb->start >= pb->end) {
index 724a087..9c73581 100644 (file)
@@ -477,7 +477,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
 {
        int err, ret = IRQ_NONE;
-       struct pt_regs regs;
+       struct pt_regs regs = {0};
        const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
        uint8_t xenpmu_flags = get_xenpmu_flags();
 
index 149e7a7..b524f70 100644 (file)
@@ -384,6 +384,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
        strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
                sizeof(rblkcipher.geniv));
+       rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
        rblkcipher.blocksize = alg->cra_blocksize;
        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -465,6 +466,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
        strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
                sizeof(rblkcipher.geniv));
+       rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
        rblkcipher.blocksize = alg->cra_blocksize;
        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
index 97ccb31..a1150dc 100644 (file)
@@ -536,6 +536,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
        strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
                sizeof(rblkcipher.geniv));
+       rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
        rblkcipher.blocksize = alg->cra_blocksize;
        rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
index 6c5bc3f..38857ab 100644 (file)
@@ -1359,8 +1359,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        dpm_wait_for_children(dev, async);
 
-       if (async_error)
+       if (async_error) {
+               dev->power.direct_complete = false;
                goto Complete;
+       }
 
        /*
         * If a device configured to wake up the system from sleep states
@@ -1375,6 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                pm_get_active_wakeup_sources(suspend_abort,
                        MAX_SUSPEND_ABORT_LEN);
                log_suspend_abort_reason(suspend_abort);
+               dev->power.direct_complete = false;
                async_error = -EBUSY;
                goto Complete;
        }
index 331363e..2daa5b8 100644 (file)
@@ -3459,6 +3459,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
                                          (struct floppy_struct **)&outparam);
                if (ret)
                        return ret;
+               memcpy(&inparam.g, outparam,
+                               offsetof(struct floppy_struct, name));
+               outparam = &inparam.g;
                break;
        case FDMSGON:
                UDP->flags |= FTD_MSG;
index 4a899b4..b0a12e6 100644 (file)
@@ -340,6 +340,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
 
        /* Additional Realtek 8723DE Bluetooth devices */
+       { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
 
        /* Additional Realtek 8821AE Bluetooth devices */
index c4215f3..9d3bf28 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,11 @@ struct hdmi_8998_reg_cfg {
        u32 l3_pre_driver_1;
        u32 l3_pre_driver_2;
 
+       u32 l0_res_code_offset;
+       u32 l1_res_code_offset;
+       u32 l2_res_code_offset;
+       u32 l3_res_code_offset;
+
        bool debug;
 };
 
@@ -394,14 +399,14 @@ static int hdmi_8998_config_phy(unsigned long rate,
 
        if (ver == HDMI_VERSION_8998_3_3) {
                if (bclk > high_freq_bit_clk_threshold) {
-                       cfg->l0_tx_drv_lvl = 0xA;
+                       cfg->l0_tx_drv_lvl = 0xf;
                        cfg->l0_tx_emp_post1_lvl = 0x3;
-                       cfg->l1_tx_drv_lvl = 0xA;
-                       cfg->l1_tx_emp_post1_lvl = 0x3;
-                       cfg->l2_tx_drv_lvl = 0xA;
+                       cfg->l1_tx_drv_lvl = 0xf;
+                       cfg->l1_tx_emp_post1_lvl = 0x2;
+                       cfg->l2_tx_drv_lvl = 0xf;
                        cfg->l2_tx_emp_post1_lvl = 0x3;
-                       cfg->l3_tx_drv_lvl = 0x8;
-                       cfg->l3_tx_emp_post1_lvl = 0x3;
+                       cfg->l3_tx_drv_lvl = 0xf;
+                       cfg->l3_tx_emp_post1_lvl = 0x0;
                        cfg->l0_pre_driver_1 = 0x0;
                        cfg->l0_pre_driver_2 = 0x1C;
                        cfg->l1_pre_driver_1 = 0x0;
@@ -410,15 +415,19 @@ static int hdmi_8998_config_phy(unsigned long rate,
                        cfg->l2_pre_driver_2 = 0x1C;
                        cfg->l3_pre_driver_1 = 0x0;
                        cfg->l3_pre_driver_2 = 0x0;
+                       cfg->l0_res_code_offset = 0x3;
+                       cfg->l1_res_code_offset = 0x0;
+                       cfg->l2_res_code_offset = 0x0;
+                       cfg->l3_res_code_offset = 0x3;
                } else if (bclk > dig_freq_bit_clk_threshold) {
-                       cfg->l0_tx_drv_lvl = 0x9;
+                       cfg->l0_tx_drv_lvl = 0xf;
                        cfg->l0_tx_emp_post1_lvl = 0x3;
-                       cfg->l1_tx_drv_lvl = 0x9;
+                       cfg->l1_tx_drv_lvl = 0xf;
                        cfg->l1_tx_emp_post1_lvl = 0x3;
-                       cfg->l2_tx_drv_lvl = 0x9;
+                       cfg->l2_tx_drv_lvl = 0xf;
                        cfg->l2_tx_emp_post1_lvl = 0x3;
-                       cfg->l3_tx_drv_lvl = 0x8;
-                       cfg->l3_tx_emp_post1_lvl = 0x3;
+                       cfg->l3_tx_drv_lvl = 0xf;
+                       cfg->l3_tx_emp_post1_lvl = 0x0;
                        cfg->l0_pre_driver_1 = 0x0;
                        cfg->l0_pre_driver_2 = 0x16;
                        cfg->l1_pre_driver_1 = 0x0;
@@ -426,16 +435,20 @@ static int hdmi_8998_config_phy(unsigned long rate,
                        cfg->l2_pre_driver_1 = 0x0;
                        cfg->l2_pre_driver_2 = 0x16;
                        cfg->l3_pre_driver_1 = 0x0;
-                       cfg->l3_pre_driver_2 = 0x0;
+                       cfg->l3_pre_driver_2 = 0x18;
+                       cfg->l0_res_code_offset = 0x3;
+                       cfg->l1_res_code_offset = 0x0;
+                       cfg->l2_res_code_offset = 0x0;
+                       cfg->l3_res_code_offset = 0x0;
                } else if (bclk > mid_freq_bit_clk_threshold) {
-                       cfg->l0_tx_drv_lvl = 0x9;
-                       cfg->l0_tx_emp_post1_lvl = 0x3;
-                       cfg->l1_tx_drv_lvl = 0x9;
-                       cfg->l1_tx_emp_post1_lvl = 0x3;
-                       cfg->l2_tx_drv_lvl = 0x9;
-                       cfg->l2_tx_emp_post1_lvl = 0x3;
-                       cfg->l3_tx_drv_lvl = 0x8;
-                       cfg->l3_tx_emp_post1_lvl = 0x3;
+                       cfg->l0_tx_drv_lvl = 0xf;
+                       cfg->l0_tx_emp_post1_lvl = 0x5;
+                       cfg->l1_tx_drv_lvl = 0xf;
+                       cfg->l1_tx_emp_post1_lvl = 0x5;
+                       cfg->l2_tx_drv_lvl = 0xf;
+                       cfg->l2_tx_emp_post1_lvl = 0x5;
+                       cfg->l3_tx_drv_lvl = 0xf;
+                       cfg->l3_tx_emp_post1_lvl = 0x0;
                        cfg->l0_pre_driver_1 = 0x0;
                        cfg->l0_pre_driver_2 = 0x0E;
                        cfg->l1_pre_driver_1 = 0x0;
@@ -443,24 +456,32 @@ static int hdmi_8998_config_phy(unsigned long rate,
                        cfg->l2_pre_driver_1 = 0x0;
                        cfg->l2_pre_driver_2 = 0x0E;
                        cfg->l3_pre_driver_1 = 0x0;
-                       cfg->l3_pre_driver_2 = 0x0;
+                       cfg->l3_pre_driver_2 = 0x0E;
+                       cfg->l0_res_code_offset = 0x0;
+                       cfg->l1_res_code_offset = 0x0;
+                       cfg->l2_res_code_offset = 0x0;
+                       cfg->l3_res_code_offset = 0x0;
                } else {
-                       cfg->l0_tx_drv_lvl = 0x0;
+                       cfg->l0_tx_drv_lvl = 0x1;
                        cfg->l0_tx_emp_post1_lvl = 0x0;
-                       cfg->l1_tx_drv_lvl = 0x0;
+                       cfg->l1_tx_drv_lvl = 0x1;
                        cfg->l1_tx_emp_post1_lvl = 0x0;
-                       cfg->l2_tx_drv_lvl = 0x0;
+                       cfg->l2_tx_drv_lvl = 0x1;
                        cfg->l2_tx_emp_post1_lvl = 0x0;
                        cfg->l3_tx_drv_lvl = 0x0;
                        cfg->l3_tx_emp_post1_lvl = 0x0;
                        cfg->l0_pre_driver_1 = 0x0;
-                       cfg->l0_pre_driver_2 = 0x01;
+                       cfg->l0_pre_driver_2 = 0x16;
                        cfg->l1_pre_driver_1 = 0x0;
-                       cfg->l1_pre_driver_2 = 0x01;
+                       cfg->l1_pre_driver_2 = 0x16;
                        cfg->l2_pre_driver_1 = 0x0;
-                       cfg->l2_pre_driver_2 = 0x01;
+                       cfg->l2_pre_driver_2 = 0x16;
                        cfg->l3_pre_driver_1 = 0x0;
-                       cfg->l3_pre_driver_2 = 0x0;
+                       cfg->l3_pre_driver_2 = 0x18;
+                       cfg->l0_res_code_offset = 0x0;
+                       cfg->l1_res_code_offset = 0x0;
+                       cfg->l2_res_code_offset = 0x0;
+                       cfg->l3_res_code_offset = 0x0;
                }
        } else {
                cfg->l0_tx_drv_lvl = 0xF;
@@ -479,6 +500,10 @@ static int hdmi_8998_config_phy(unsigned long rate,
                cfg->l2_pre_driver_2 = 0x1E;
                cfg->l3_pre_driver_1 = 0x0;
                cfg->l3_pre_driver_2 = 0x10;
+               cfg->l0_res_code_offset = 0x3;
+               cfg->l1_res_code_offset = 0x0;
+               cfg->l2_res_code_offset = 0x0;
+               cfg->l3_res_code_offset = 0x3;
        }
 
        return rc;
@@ -564,10 +589,10 @@ static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate,
        _W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
        _W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
 
-       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x3);
-       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
-       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
-       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x3);
+       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), cfg.l0_res_code_offset);
+       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), cfg.l1_res_code_offset);
+       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), cfg.l2_res_code_offset);
+       _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), cfg.l3_res_code_offset);
 
        _W(phy, PHY_MODE, cfg.phy_mode);
 
index 8518d9d..73c9908 100644 (file)
@@ -98,6 +98,9 @@ static void __init ti_32k_timer_init(struct device_node *np)
                return;
        }
 
+       if (!of_machine_is_compatible("ti,am43"))
+               ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
        ti_32k_timer.counter = ti_32k_timer.base;
 
        /*
index 59ed54e..fe8cfe2 100644 (file)
@@ -63,7 +63,7 @@ struct dcp {
        struct dcp_coherent_block       *coh;
 
        struct completion               completion[DCP_MAX_CHANS];
-       struct mutex                    mutex[DCP_MAX_CHANS];
+       spinlock_t                      lock[DCP_MAX_CHANS];
        struct task_struct              *thread[DCP_MAX_CHANS];
        struct crypto_queue             queue[DCP_MAX_CHANS];
 };
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
 
        int ret;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
                if (arq) {
                        ret = mxs_dcp_aes_block_crypt(arq);
                        arq->complete(arq, ret);
-                       continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -407,9 +411,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
        rctx->ecb = ecb;
        actx->chan = DCP_CHAN_CRYPTO;
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
 
@@ -645,13 +649,20 @@ static int dcp_chan_thread_sha(void *data)
        struct ahash_request *req;
        int ret, fini;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -663,12 +674,8 @@ static int dcp_chan_thread_sha(void *data)
                        ret = dcp_sha_req_to_buf(arq);
                        fini = rctx->fini;
                        arq->complete(arq, ret);
-                       if (!fini)
-                               continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -726,9 +733,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
                rctx->init = 1;
        }
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
        mutex_unlock(&actx->mutex);
@@ -984,7 +991,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sdcp);
 
        for (i = 0; i < DCP_MAX_CHANS; i++) {
-               mutex_init(&sdcp->mutex[i]);
+               spin_lock_init(&sdcp->lock[i]);
                init_completion(&sdcp->completion[i]);
                crypto_init_queue(&sdcp->queue[i], 50);
        }
index 792bdae..d14c8ff 100644 (file)
@@ -1187,15 +1187,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
 
        rc = device_add(pvt->addrmatch_dev);
        if (rc < 0)
-               return rc;
+               goto err_put_addrmatch;
 
        if (!pvt->is_registered) {
                pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
                                              GFP_KERNEL);
                if (!pvt->chancounts_dev) {
-                       put_device(pvt->addrmatch_dev);
-                       device_del(pvt->addrmatch_dev);
-                       return -ENOMEM;
+                       rc = -ENOMEM;
+                       goto err_del_addrmatch;
                }
 
                pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1209,9 +1208,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
 
                rc = device_add(pvt->chancounts_dev);
                if (rc < 0)
-                       return rc;
+                       goto err_put_chancounts;
        }
        return 0;
+
+err_put_chancounts:
+       put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+       device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+       put_device(pvt->addrmatch_dev);
+
+       return rc;
 }
 
 static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1221,11 +1229,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
        edac_dbg(1, "\n");
 
        if (!pvt->is_registered) {
-               put_device(pvt->chancounts_dev);
                device_del(pvt->chancounts_dev);
+               put_device(pvt->chancounts_dev);
        }
-       put_device(pvt->addrmatch_dev);
        device_del(pvt->addrmatch_dev);
+       put_device(pvt->addrmatch_dev);
 }
 
 /****************************************************************************
index ac55dd3..7f2acfa 100644 (file)
@@ -121,4 +121,3 @@ obj-$(CONFIG_GPIO_ZEVIO)    += gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)                += gpio-zynq.o
 obj-$(CONFIG_GPIO_ZX)          += gpio-zx.o
 obj-$(CONFIG_MSM_SMP2P)                += gpio-msm-smp2p.o
-obj-$(CONFIG_MSM_SMP2P_TEST)   += gpio-msm-smp2p-test.o
index 984186e..f5f7b53 100644 (file)
@@ -41,6 +41,8 @@ struct adp5588_gpio {
        uint8_t int_en[3];
        uint8_t irq_mask[3];
        uint8_t irq_stat[3];
+       uint8_t int_input_en[3];
+       uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -177,12 +179,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
        struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
        int i;
 
-       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+               if (dev->int_input_en[i]) {
+                       mutex_lock(&dev->lock);
+                       dev->dir[i] &= ~dev->int_input_en[i];
+                       dev->int_input_en[i] = 0;
+                       adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+                                          dev->dir[i]);
+                       mutex_unlock(&dev->lock);
+               }
+
+               if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+                       dev->int_lvl_cached[i] = dev->int_lvl[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+                                          dev->int_lvl[i]);
+               }
+
                if (dev->int_en[i] ^ dev->irq_mask[i]) {
                        dev->int_en[i] = dev->irq_mask[i];
                        adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
                                           dev->int_en[i]);
                }
+       }
 
        mutex_unlock(&dev->irq_lock);
 }
@@ -225,9 +243,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
        else
                return -EINVAL;
 
-       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-                          dev->int_lvl[bank]);
+       dev->int_input_en[bank] |= bit;
 
        return 0;
 }
diff --git a/drivers/gpio/gpio-msm-smp2p-test.c b/drivers/gpio/gpio-msm-smp2p-test.c
deleted file mode 100644 (file)
index 5907513..0000000
+++ /dev/null
@@ -1,762 +0,0 @@
-/* drivers/gpio/gpio-msm-smp2p-test.c
- *
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/gpio.h>
-#include <linux/debugfs.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/bitmap.h>
-#include "../soc/qcom/smp2p_private.h"
-#include "../soc/qcom/smp2p_test_common.h"
-
-/* Interrupt callback data */
-struct gpio_info {
-       int gpio_base_id;
-       int irq_base_id;
-
-       bool initialized;
-       struct completion cb_completion;
-       int cb_count;
-       DECLARE_BITMAP(triggered_irqs, SMP2P_BITS_PER_ENTRY);
-};
-
-/* GPIO Inbound/Outbound callback info */
-struct gpio_inout {
-       struct gpio_info in;
-       struct gpio_info out;
-};
-
-static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
-
-/**
- * Init/reset the callback data.
- *
- * @info: Pointer to callback data
- */
-static void cb_data_reset(struct gpio_info *info)
-{
-       int n;
-
-       if (!info)
-               return;
-
-       if (!info->initialized) {
-               init_completion(&info->cb_completion);
-               info->initialized = true;
-       }
-       info->cb_count = 0;
-
-       for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n)
-               clear_bit(n,  info->triggered_irqs);
-
-       reinit_completion(&info->cb_completion);
-}
-
-static int smp2p_gpio_test_probe(struct platform_device *pdev)
-{
-       int id;
-       int cnt;
-       struct device_node *node = pdev->dev.of_node;
-       struct gpio_info *gpio_info_ptr = NULL;
-
-       /*
-        * NOTE:  This does a string-lookup of the GPIO pin name and doesn't
-        * actually directly link to the SMP2P GPIO driver since all
-        * GPIO/Interrupt access must be through standard
-        * Linux GPIO / Interrupt APIs.
-        */
-       if (strcmp("qcom,smp2pgpio_test_smp2p_1_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].in;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_1_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].out;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_2_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].in;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_2_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].out;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_3_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].in;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_3_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].out;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_4_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].in;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_4_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].out;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_5_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].in;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_5_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].out;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_7_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].in;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_7_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].out;
-       } else if (strcmp("qcom,smp2pgpio_test_smp2p_15_in", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
-       } else if (
-               strcmp("qcom,smp2pgpio_test_smp2p_15_out", node->name) == 0) {
-               gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
-       } else {
-               pr_err("%s: unable to match device type '%s'\n",
-                               __func__, node->name);
-               return -ENODEV;
-       }
-
-       /* retrieve the GPIO and interrupt ID's */
-       cnt = of_gpio_count(node);
-       if (cnt && gpio_info_ptr) {
-               /*
-                * Instead of looping through all 32-bits, we can just get the
-                * first pin to get the base IDs.  This saves on the verbosity
-                * of the device tree nodes as well.
-                */
-               id = of_get_gpio(node, 0);
-               if (id == -EPROBE_DEFER)
-                       return id;
-               gpio_info_ptr->gpio_base_id = id;
-               gpio_info_ptr->irq_base_id = gpio_to_irq(id);
-       }
-       return 0;
-}
-
-/*
- * NOTE:  Instead of match table and device driver, you may be able to just
- * call of_find_compatible_node() in your init function.
- */
-static struct of_device_id msm_smp2p_match_table[] = {
-       /* modem */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_1_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_1_in", },
-
-       /* audio (adsp) */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_2_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_2_in", },
-
-       /* sensor */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_3_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_3_in", },
-
-       /* wcnss */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_4_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_4_in", },
-
-       /* CDSP */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_5_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_5_in", },
-
-       /* TZ */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_7_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_7_in", },
-
-       /* mock loopback */
-       {.compatible = "qcom,smp2pgpio_test_smp2p_15_out", },
-       {.compatible = "qcom,smp2pgpio_test_smp2p_15_in", },
-       {},
-};
-
-static struct platform_driver smp2p_gpio_driver = {
-       .probe = smp2p_gpio_test_probe,
-       .driver = {
-               .name = "smp2pgpio_test",
-               .owner = THIS_MODULE,
-               .of_match_table = msm_smp2p_match_table,
-       },
-};
-
-/**
- * smp2p_ut_local_gpio_out - Verify outbound functionality.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_local_gpio_out(struct seq_file *s)
-{
-       int failed = 0;
-       struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
-       int ret;
-       int id;
-       struct msm_smp2p_remote_mock *mock;
-
-       seq_printf(s, "Running %s\n", __func__);
-       do {
-               /* initialize mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               mock = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(mock, !=, NULL);
-
-               mock->rx_interrupt_count = 0;
-               memset(&mock->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
-                       SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
-                       0, 1);
-               strlcpy(mock->remote_item.entries[0].name, "smp2p",
-                       SMP2P_MAX_ENTRY_NAME);
-               SMP2P_SET_ENT_VALID(
-                       mock->remote_item.header.valid_total_ent, 1);
-               msm_smp2p_set_remote_mock_exists(true);
-               mock->tx_interrupt();
-
-               /* open GPIO entry */
-               smp2p_gpio_open_test_entry("smp2p",
-                               SMP2P_REMOTE_MOCK_PROC, true);
-
-               /* verify set/get functions */
-               UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       int pin = cb_info->gpio_base_id + id;
-
-                       mock->rx_interrupt_count = 0;
-                       gpio_set_value(pin, 1);
-                       UT_ASSERT_INT(1, ==, mock->rx_interrupt_count);
-                       UT_ASSERT_INT(1, ==, gpio_get_value(pin));
-
-                       gpio_set_value(pin, 0);
-                       UT_ASSERT_INT(2, ==, mock->rx_interrupt_count);
-                       UT_ASSERT_INT(0, ==, gpio_get_value(pin));
-               }
-               if (failed)
-                       break;
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-
-       smp2p_gpio_open_test_entry("smp2p",
-                       SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_irq - Interrupt handler for inbound entries.
- *
- * @irq:         Virtual IRQ being triggered
- * @data:        Cookie data (struct gpio_info * in this case)
- * @returns:     Number of bytes written
- */
-static irqreturn_t smp2p_gpio_irq(int irq, void *data)
-{
-       struct gpio_info *gpio_ptr = (struct gpio_info *)data;
-       int offset;
-
-       if (!gpio_ptr) {
-               pr_err("%s: gpio_ptr is NULL for irq %d\n", __func__, irq);
-               return IRQ_HANDLED;
-       }
-
-       offset = irq - gpio_ptr->irq_base_id;
-       if (offset >= 0 &&  offset < SMP2P_BITS_PER_ENTRY)
-               set_bit(offset, gpio_ptr->triggered_irqs);
-       else
-               pr_err("%s: invalid irq offset base %d; irq %d\n",
-                       __func__, gpio_ptr->irq_base_id, irq);
-
-       ++gpio_ptr->cb_count;
-       complete(&gpio_ptr->cb_completion);
-       return IRQ_HANDLED;
-}
-
-/**
- * smp2p_ut_local_gpio_in - Verify inbound functionality.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_local_gpio_in(struct seq_file *s)
-{
-       int failed = 0;
-       struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
-       int id;
-       int ret;
-       int virq;
-       struct msm_smp2p_remote_mock *mock;
-
-       seq_printf(s, "Running %s\n", __func__);
-
-       cb_data_reset(cb_info);
-       do {
-               /* initialize mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               mock = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(mock, !=, NULL);
-
-               mock->rx_interrupt_count = 0;
-               memset(&mock->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
-                       SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
-                       0, 1);
-               strlcpy(mock->remote_item.entries[0].name, "smp2p",
-                       SMP2P_MAX_ENTRY_NAME);
-               SMP2P_SET_ENT_VALID(
-                       mock->remote_item.header.valid_total_ent, 1);
-               msm_smp2p_set_remote_mock_exists(true);
-               mock->tx_interrupt();
-
-               smp2p_gpio_open_test_entry("smp2p",
-                               SMP2P_REMOTE_MOCK_PROC, true);
-
-               /* verify set/get functions locally */
-               UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       int pin;
-                       int current_value;
-
-                       /* verify pin value cannot be set */
-                       pin = cb_info->gpio_base_id + id;
-                       current_value = gpio_get_value(pin);
-
-                       gpio_set_value(pin, 0);
-                       UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
-                       gpio_set_value(pin, 1);
-                       UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
-
-                       /* verify no interrupts */
-                       UT_ASSERT_INT(0, ==, cb_info->cb_count);
-               }
-               if (failed)
-                       break;
-
-               /* register for interrupts */
-               UT_ASSERT_INT(0, <, cb_info->irq_base_id);
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       virq = cb_info->irq_base_id + id;
-                       UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
-                       ret = request_irq(virq,
-                                       smp2p_gpio_irq, IRQF_TRIGGER_RISING,
-                                       "smp2p_test", cb_info);
-                       UT_ASSERT_INT(0, ==, ret);
-               }
-               if (failed)
-                       break;
-
-               /* verify both rising and falling edge interrupts */
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       virq = cb_info->irq_base_id + id;
-                       irq_set_irq_type(virq, IRQ_TYPE_EDGE_BOTH);
-                       cb_data_reset(cb_info);
-
-                       /* verify rising-edge interrupt */
-                       mock->remote_item.entries[0].entry = 1 << id;
-                       mock->tx_interrupt();
-                       UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-                       UT_ASSERT_INT(0, <,
-                               test_bit(id, cb_info->triggered_irqs));
-                       test_bit(id, cb_info->triggered_irqs);
-
-                       /* verify falling-edge interrupt */
-                       mock->remote_item.entries[0].entry = 0;
-                       mock->tx_interrupt();
-                       UT_ASSERT_INT(cb_info->cb_count, ==, 2);
-                       UT_ASSERT_INT(0, <,
-                                       test_bit(id, cb_info->triggered_irqs));
-               }
-               if (failed)
-                       break;
-
-               /* verify rising-edge interrupts */
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       virq = cb_info->irq_base_id + id;
-                       irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
-                       cb_data_reset(cb_info);
-
-                       /* verify only rising-edge interrupt is triggered */
-                       mock->remote_item.entries[0].entry = 1 << id;
-                       mock->tx_interrupt();
-                       UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-                       UT_ASSERT_INT(0, <,
-                               test_bit(id, cb_info->triggered_irqs));
-                       test_bit(id, cb_info->triggered_irqs);
-
-                       mock->remote_item.entries[0].entry = 0;
-                       mock->tx_interrupt();
-                       UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-                       UT_ASSERT_INT(0, <,
-                               test_bit(id, cb_info->triggered_irqs));
-               }
-               if (failed)
-                       break;
-
-               /* verify falling-edge interrupts */
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       virq = cb_info->irq_base_id + id;
-                       irq_set_irq_type(virq, IRQ_TYPE_EDGE_FALLING);
-                       cb_data_reset(cb_info);
-
-                       /* verify only rising-edge interrupt is triggered */
-                       mock->remote_item.entries[0].entry = 1 << id;
-                       mock->tx_interrupt();
-                       UT_ASSERT_INT(cb_info->cb_count, ==, 0);
-                       UT_ASSERT_INT(0, ==,
-                               test_bit(id, cb_info->triggered_irqs));
-
-                       mock->remote_item.entries[0].entry = 0;
-                       mock->tx_interrupt();
-                       UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-                       UT_ASSERT_INT(0, <,
-                               test_bit(id, cb_info->triggered_irqs));
-               }
-               if (failed)
-                       break;
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-
-       /* unregister for interrupts */
-       if (cb_info->irq_base_id) {
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
-                       free_irq(cb_info->irq_base_id + id, cb_info);
-       }
-
-       smp2p_gpio_open_test_entry("smp2p",
-                       SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_ut_local_gpio_in_update_open - Verify combined open/update.
- *
- * @s:   pointer to output file
- *
- * If the remote side updates the SMP2P bits and sends before negotiation is
- * complete, then the UPDATE event will have to be delayed until negotiation is
- * complete.  This should result in both the OPEN and UPDATE events coming in
- * right after each other and the behavior should be transparent to the clients
- * of SMP2P GPIO.
- */
-static void smp2p_ut_local_gpio_in_update_open(struct seq_file *s)
-{
-       int failed = 0;
-       struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
-       int id;
-       int ret;
-       int virq;
-       struct msm_smp2p_remote_mock *mock;
-
-       seq_printf(s, "Running %s\n", __func__);
-
-       cb_data_reset(cb_info);
-       do {
-               /* initialize mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               mock = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(mock, !=, NULL);
-
-               mock->rx_interrupt_count = 0;
-               memset(&mock->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
-                       SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
-                       0, 1);
-               strlcpy(mock->remote_item.entries[0].name, "smp2p",
-                       SMP2P_MAX_ENTRY_NAME);
-               SMP2P_SET_ENT_VALID(
-                       mock->remote_item.header.valid_total_ent, 1);
-
-               /* register for interrupts */
-               smp2p_gpio_open_test_entry("smp2p",
-                               SMP2P_REMOTE_MOCK_PROC, true);
-
-               UT_ASSERT_INT(0, <, cb_info->irq_base_id);
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       virq = cb_info->irq_base_id + id;
-                       UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
-                       ret = request_irq(virq,
-                                       smp2p_gpio_irq, IRQ_TYPE_EDGE_BOTH,
-                                       "smp2p_test", cb_info);
-                       UT_ASSERT_INT(0, ==, ret);
-               }
-               if (failed)
-                       break;
-
-               /* update the state value and complete negotiation */
-               mock->remote_item.entries[0].entry = 0xDEADDEAD;
-               msm_smp2p_set_remote_mock_exists(true);
-               mock->tx_interrupt();
-
-               /* verify delayed state updates were processed */
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       virq = cb_info->irq_base_id + id;
-
-                       UT_ASSERT_INT(cb_info->cb_count, >, 0);
-                       if (0x1 & (0xDEADDEAD >> id)) {
-                               /* rising edge should have been triggered */
-                               if (!test_bit(id, cb_info->triggered_irqs)) {
-                                       seq_printf(s, "%s:%d bit %d clear, ",
-                                               __func__, __LINE__, id);
-                                       seq_puts(s, "expected set\n");
-                                       failed = 1;
-                                       break;
-                               }
-                       } else {
-                               /* edge should not have been triggered */
-                               if (test_bit(id, cb_info->triggered_irqs)) {
-                                       seq_printf(s, "%s:%d bit %d set, ",
-                                               __func__, __LINE__, id);
-                                       seq_puts(s, "expected clear\n");
-                                       failed = 1;
-                                       break;
-                               }
-                       }
-               }
-               if (failed)
-                       break;
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-
-       /* unregister for interrupts */
-       if (cb_info->irq_base_id) {
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
-                       free_irq(cb_info->irq_base_id + id, cb_info);
-       }
-
-       smp2p_gpio_open_test_entry("smp2p",
-                       SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_write_bits - writes value to each GPIO pin specified in mask.
- *
- * @gpio: gpio test structure
- * @mask: 1 = write gpio_value to this GPIO pin
- * @gpio_value: value to write to GPIO pin
- */
-static void smp2p_gpio_write_bits(struct gpio_info *gpio, uint32_t mask,
-       int gpio_value)
-{
-       int n;
-
-       for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
-               if (mask & 0x1)
-                       gpio_set_value(gpio->gpio_base_id + n, gpio_value);
-               mask >>= 1;
-       }
-}
-
-static void smp2p_gpio_set_bits(struct gpio_info *gpio, uint32_t mask)
-{
-       smp2p_gpio_write_bits(gpio, mask, 1);
-}
-
-static void smp2p_gpio_clr_bits(struct gpio_info *gpio, uint32_t mask)
-{
-       smp2p_gpio_write_bits(gpio, mask, 0);
-}
-
-/**
- * smp2p_gpio_get_value - reads entire 32-bits of GPIO
- *
- * @gpio: gpio structure
- * @returns: 32 bit value of GPIO pins
- */
-static uint32_t smp2p_gpio_get_value(struct gpio_info *gpio)
-{
-       int n;
-       uint32_t value = 0;
-
-       for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
-               if (gpio_get_value(gpio->gpio_base_id + n))
-                       value |= 1 << n;
-       }
-       return value;
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s:   pointer to output file
- * @remote_pid:  Remote processor to test
- * @name:        Name of the test for reporting
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid,
-               const char *name)
-{
-       int failed = 0;
-       uint32_t request;
-       uint32_t response;
-       struct gpio_info *cb_in;
-       struct gpio_info *cb_out;
-       int id;
-       int ret;
-
-       seq_printf(s, "Running %s for '%s' remote pid %d\n",
-                  __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-       cb_in = &gpio_info[remote_pid].in;
-       cb_out = &gpio_info[remote_pid].out;
-       cb_data_reset(cb_in);
-       cb_data_reset(cb_out);
-       do {
-               /* open test entries */
-               msm_smp2p_deinit_rmt_lpb_proc(remote_pid);
-               smp2p_gpio_open_test_entry("smp2p", remote_pid, true);
-
-               /* register for interrupts */
-               UT_ASSERT_INT(0, <, cb_in->gpio_base_id);
-               UT_ASSERT_INT(0, <, cb_in->irq_base_id);
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-                       int virq = cb_in->irq_base_id + id;
-                       UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
-                       ret = request_irq(virq,
-                               smp2p_gpio_irq,
-                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                               "smp2p_test", cb_in);
-                       UT_ASSERT_INT(0, ==, ret);
-               }
-               if (failed)
-                       break;
-
-               /* write echo of data value 0 */
-               UT_ASSERT_INT(0, <, cb_out->gpio_base_id);
-               request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE(request, 1);
-               SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
-               SMP2P_SET_RMT_DATA(request, 0x0);
-
-               smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-               smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
-               smp2p_gpio_set_bits(cb_out, request);
-
-               UT_ASSERT_INT(cb_in->cb_count, ==, 0);
-               smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
-               /* verify response */
-               do {
-                       /* wait for up to 32 changes */
-                       if (wait_for_completion_timeout(
-                                       &cb_in->cb_completion, HZ / 2) == 0)
-                               break;
-                       reinit_completion(&cb_in->cb_completion);
-               } while (cb_in->cb_count < 32);
-               UT_ASSERT_INT(cb_in->cb_count, >, 0);
-               response = smp2p_gpio_get_value(cb_in);
-               SMP2P_SET_RMT_CMD_TYPE(request, 0);
-               UT_ASSERT_HEX(request, ==, response);
-
-               /* write echo of data value of all 1's */
-               request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE(request, 1);
-               SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
-               SMP2P_SET_RMT_DATA(request, ~0);
-
-               smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-               cb_data_reset(cb_in);
-               smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
-               smp2p_gpio_set_bits(cb_out, request);
-
-               UT_ASSERT_INT(cb_in->cb_count, ==, 0);
-               smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
-               /* verify response including 24 interrupts */
-               do {
-                       UT_ASSERT_INT(
-                               (int)wait_for_completion_timeout(
-                                       &cb_in->cb_completion, HZ / 2),
-                          >, 0);
-                       reinit_completion(&cb_in->cb_completion);
-               } while (cb_in->cb_count < 24);
-               response = smp2p_gpio_get_value(cb_in);
-               SMP2P_SET_RMT_CMD_TYPE(request, 0);
-               UT_ASSERT_HEX(request, ==, response);
-               UT_ASSERT_INT(24, ==, cb_in->cb_count);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", name);
-               seq_puts(s, "\tFailed\n");
-       }
-
-       /* unregister for interrupts */
-       if (cb_in->irq_base_id) {
-               for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
-                       free_irq(cb_in->irq_base_id + id, cb_in);
-       }
-
-       smp2p_gpio_open_test_entry("smp2p",     remote_pid, false);
-       msm_smp2p_init_rmt_lpb_proc(remote_pid);
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s:   pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
-       struct smp2p_interrupt_config *int_cfg;
-       int pid;
-
-       int_cfg = smp2p_get_interrupt_config();
-       if (!int_cfg) {
-               seq_puts(s, "Remote processor config unavailable\n");
-               return;
-       }
-
-       for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-               if (!int_cfg[pid].is_configured)
-                       continue;
-
-               smp2p_ut_remote_inout_core(s, pid, __func__);
-       }
-}
-
-static int __init smp2p_debugfs_init(void)
-{
-       /* register GPIO pins */
-       (void)platform_driver_register(&smp2p_gpio_driver);
-
-       /*
-        * Add Unit Test entries.
-        *
-        * The idea with unit tests is that you can run all of them
-        * from ADB shell by doing:
-        *  adb shell
-        *  cat ut*
-        *
-        * And if particular tests fail, you can then repeatedly run the
-        * failing tests as you debug and resolve the failing test.
-        */
-       smp2p_debug_create("ut_local_gpio_out", smp2p_ut_local_gpio_out);
-       smp2p_debug_create("ut_local_gpio_in", smp2p_ut_local_gpio_in);
-       smp2p_debug_create("ut_local_gpio_in_update_open",
-               smp2p_ut_local_gpio_in_update_open);
-       smp2p_debug_create("ut_remote_gpio_inout", smp2p_ut_remote_inout);
-       return 0;
-}
-late_initcall(smp2p_debugfs_init);
index b233cf8..2e1e84c 100644 (file)
@@ -504,7 +504,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
 
        while (true) {
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
-               if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+               if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
                        break;
                if (timeout == 0)
                        return -ETIME;
index 48bddbf..ca60c86 100644 (file)
@@ -591,6 +591,22 @@ static void sde_hdmi_tx_hdcp_cb(void *ptr, enum sde_hdcp_states status)
        queue_delayed_work(hdmi->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4);
 }
 
+static void sde_hdmi_tx_set_avmute(void *ptr)
+{
+       struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr;
+       struct hdmi *hdmi;
+
+       if (!hdmi_ctrl) {
+               DEV_ERR("%s: invalid input\n", __func__);
+               return;
+       }
+
+       pr_err("setting avmute to true\n");
+
+       hdmi = hdmi_ctrl->ctrl.ctrl;
+       sde_hdmi_config_avmute(hdmi, true);
+}
+
 void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl)
 {
 
@@ -645,10 +661,6 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
 
                hdmi_ctrl->auth_state = false;
 
-               if (sde_hdmi_tx_is_encryption_set(hdmi_ctrl) ||
-                       !sde_hdmi_tx_is_stream_shareable(hdmi_ctrl))
-                       rc = sde_hdmi_config_avmute(hdmi, true);
-
                if (sde_hdmi_tx_is_panel_on(hdmi_ctrl)) {
                        pr_debug("%s: Reauthenticating\n", __func__);
                        if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) {
@@ -666,7 +678,7 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
                }
 
                break;
-               case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+       case HDCP_STATE_AUTH_FAIL_NOREAUTH:
                if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
                        if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
                                hdcp1_set_enc(false);
@@ -2472,6 +2484,7 @@ static int _sde_hdmi_init_hdcp(struct sde_hdmi *hdmi_ctrl)
        hdcp_init_data.mutex         = &hdmi_ctrl->hdcp_mutex;
        hdcp_init_data.workq         = hdmi->workq;
        hdcp_init_data.notify_status = sde_hdmi_tx_hdcp_cb;
+       hdcp_init_data.avmute_sink   = sde_hdmi_tx_set_avmute;
        hdcp_init_data.cb_data       = (void *)hdmi_ctrl;
        hdcp_init_data.hdmi_tx_ver   = hdmi_ctrl->hdmi_tx_major_version;
        hdcp_init_data.sec_access    = true;
index a4f4775..fbb8bd1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -321,11 +321,27 @@ int min_enc_lvl)
 
        SDE_HDCP_DEBUG("enc level changed %d\n", min_enc_lvl);
 
+       /* notify the client first about the new level */
+       if (enc_notify && ctrl->init_data.notify_status)
+               ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
        cdata.context = ctrl->lib_ctx;
        sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
 
-       if (enc_notify && ctrl->init_data.notify_status)
-               ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+static void sde_hdmi_hdcp2p2_mute_sink(void *client_ctx)
+{
+       struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+               (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+
+       if (!ctrl) {
+               SDE_ERROR("invalid input\n");
+               return;
+       }
+
+       /* call into client to send avmute to the sink */
+       if (ctrl->init_data.avmute_sink)
+               ctrl->init_data.avmute_sink(ctrl->init_data.cb_data);
 }
 
 static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
@@ -930,6 +946,7 @@ void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
                .wakeup = sde_hdmi_hdcp2p2_wakeup,
                .notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
                .srm_cb = sde_hdmi_hdcp2p2_srm_cb,
+               .mute_sink = sde_hdmi_hdcp2p2_mute_sink,
        };
 
        static struct hdcp_txmtr_ops txmtr_ops;
index ff53071..e50577e 100644 (file)
@@ -2143,6 +2143,12 @@ static inline void _sde_plane_set_scaler_v2(struct sde_phy_plane *pp,
                return;
        }
 
+       /* detach/ignore user data if 'disabled' */
+       if (!scale_v2.enable) {
+               SDE_DEBUG_PLANE(psde, "scale data removed\n");
+               return;
+       }
+
        /* populate from user space */
        pe = &(pp->pixel_ext);
        memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
index 415e846..90c0d1c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -59,6 +59,7 @@ struct sde_hdcp_init_data {
        struct workqueue_struct *workq;
        void *cb_data;
        void (*notify_status)(void *cb_data, enum sde_hdcp_states status);
+       void (*avmute_sink)(void *cb_data);
        struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
        u8 sink_rx_status;
        u16 *version;
index 6c69cd5..ddbe54f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1359,6 +1359,9 @@ static void sde_hdcp_1x_notify_topology(void)
 
 static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp)
 {
+       if (sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL))
+               hdcp->init_data.avmute_sink(hdcp->init_data.cb_data);
+
        if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
                sde_hdcp_1x_cache_topology(hdcp);
                sde_hdcp_1x_notify_topology();
@@ -1853,7 +1856,8 @@ void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data)
 
        if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
                !init_data->mutex || !init_data->notify_status ||
-               !init_data->workq || !init_data->cb_data) {
+               !init_data->workq || !init_data->cb_data ||
+               !init_data->avmute_sink) {
                pr_err("invalid input\n");
                goto error;
        }
index ababdaa..1855b47 100644 (file)
@@ -253,12 +253,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                nv_connector->edid = NULL;
        }
 
-       /* Outputs are only polled while runtime active, so acquiring a
-        * runtime PM ref here is unnecessary (and would deadlock upon
-        * runtime suspend because it waits for polling to finish).
+       /* Outputs are only polled while runtime active, so resuming the
+        * device here is unnecessary (and would deadlock upon runtime suspend
+        * because it waits for polling to finish). We do however, want to
+        * prevent the autosuspend timer from elapsing during this operation
+        * if possible.
         */
-       if (!drm_kms_helper_is_poll_worker()) {
-               ret = pm_runtime_get_sync(connector->dev->dev);
+       if (drm_kms_helper_is_poll_worker()) {
+               pm_runtime_get_noresume(dev->dev);
+       } else {
+               ret = pm_runtime_get_sync(dev->dev);
                if (ret < 0 && ret != -EACCES)
                        return conn_status;
        }
@@ -329,10 +333,8 @@ detect_analog:
 
  out:
 
-       if (!drm_kms_helper_is_poll_worker()) {
-               pm_runtime_mark_last_busy(connector->dev->dev);
-               pm_runtime_put_autosuspend(connector->dev->dev);
-       }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 
        return conn_status;
 }
index 2b9c3f1..ba42ed8 100644 (file)
@@ -161,7 +161,8 @@ gm204_devinit_post(struct nvkm_devinit *base, bool post)
        }
 
        /* load and execute some other ucode image (bios therm?) */
-       return pmu_load(init, 0x01, post, NULL, NULL);
+       pmu_load(init, 0x01, post, NULL, NULL);
+       return 0;
 }
 
 static const struct nvkm_devinit_func
index 02f0cb7..55ebc0a 100644 (file)
@@ -1403,6 +1403,45 @@ long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
                kgsl_context_put(context);
                break;
        }
+       case KGSL_PROP_SECURE_BUFFER_ALIGNMENT:
+       {
+               unsigned int align;
+
+               if (param->sizebytes != sizeof(unsigned int)) {
+                       result = -EINVAL;
+                       break;
+               }
+               /*
+                * XPUv2 impose the constraint of 1MB memory alignment,
+                * on the other hand Hypervisor does not have such
+                * constraints. So driver should fulfill such
+                * requirements when allocating secure memory.
+                */
+               align = MMU_FEATURE(&dev_priv->device->mmu,
+                               KGSL_MMU_HYP_SECURE_ALLOC) ? PAGE_SIZE : SZ_1M;
+
+               if (copy_to_user(param->value, &align, sizeof(align)))
+                       result = -EFAULT;
+
+               break;
+       }
+       case KGSL_PROP_SECURE_CTXT_SUPPORT:
+       {
+               unsigned int secure_ctxt;
+
+               if (param->sizebytes != sizeof(unsigned int)) {
+                       result = -EINVAL;
+                       break;
+               }
+
+               secure_ctxt = dev_priv->device->mmu.secured ? 1 : 0;
+
+               if (copy_to_user(param->value, &secure_ctxt,
+                               sizeof(secure_ctxt)))
+                       result = -EFAULT;
+
+               break;
+       }
        default:
                if (is_compat_task())
                        result = dev_priv->device->ftbl->getproperty_compat(
index d1d399c..7f6c137 100644 (file)
@@ -2012,6 +2012,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
index 60e2c9f..00d8366 100644 (file)
 #define USB_DEVICE_ID_SONY_PS3_BDREMOTE                0x0306
 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER      0x0268
 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER      0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2    0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE       0x0ba0
 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER   0x03d5
 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER       0x042f
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER             0x0002
index 756d1ef..6124fd6 100644 (file)
@@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        ret = sysfs_create_group(&hdev->dev.kobj,
                        &ntrig_attribute_group);
+       if (ret)
+               hid_err(hdev, "cannot create sysfs group\n");
 
        return 0;
 err_free:
index 21febbb..6f3d471 100644 (file)
@@ -2460,6 +2460,12 @@ static const struct hid_device_id sony_devices[] = {
                .driver_data = DUALSHOCK4_CONTROLLER_USB },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
                .driver_data = DUALSHOCK4_CONTROLLER_BT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+               .driver_data = DUALSHOCK4_CONTROLLER_USB },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+               .driver_data = DUALSHOCK4_CONTROLLER_BT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+               .driver_data = DUALSHOCK4_CONTROLLER_USB },
        { }
 };
 MODULE_DEVICE_TABLE(hid, sony_devices);
index 12dcbd8..2cce48d 100644 (file)
@@ -256,7 +256,6 @@ void hv_fcopy_onchannelcallback(void *context)
                 */
 
                fcopy_transaction.recv_len = recvlen;
-               fcopy_transaction.recv_channel = channel;
                fcopy_transaction.recv_req_id = requestid;
                fcopy_transaction.fcopy_msg = fcopy_msg;
 
@@ -323,6 +322,7 @@ static void fcopy_on_reset(void)
 int hv_fcopy_init(struct hv_util_service *srv)
 {
        recv_buffer = srv->recv_buffer;
+       fcopy_transaction.recv_channel = srv->channel;
 
        init_completion(&release_event);
        /*
index ce4d3a9..1771a96 100644 (file)
@@ -78,9 +78,11 @@ static void kvp_send_key(struct work_struct *dummy);
 
 static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
 static void kvp_timeout_func(struct work_struct *dummy);
+static void kvp_host_handshake_func(struct work_struct *dummy);
 static void kvp_register(int);
 
 static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
+static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
 static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
 
 static const char kvp_devname[] = "vmbus/hv_kvp";
@@ -131,6 +133,11 @@ static void kvp_timeout_func(struct work_struct *dummy)
        hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
 }
 
+static void kvp_host_handshake_func(struct work_struct *dummy)
+{
+       hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
+}
+
 static int kvp_handle_handshake(struct hv_kvp_msg *msg)
 {
        switch (msg->kvp_hdr.operation) {
@@ -155,7 +162,13 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
        pr_debug("KVP: userspace daemon ver. %d registered\n",
                 KVP_OP_REGISTER);
        kvp_register(dm_reg_value);
-       kvp_transaction.state = HVUTIL_READY;
+
+       /*
+        * If we're still negotiating with the host cancel the timeout
+        * work to not poll the channel twice.
+        */
+       cancel_delayed_work_sync(&kvp_host_handshake_work);
+       hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
 
        return 0;
 }
@@ -595,10 +608,26 @@ void hv_kvp_onchannelcallback(void *context)
        struct icmsg_negotiate *negop = NULL;
        int util_fw_version;
        int kvp_srv_version;
+       static enum {NEGO_NOT_STARTED,
+                    NEGO_IN_PROGRESS,
+                    NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
 
+       if (kvp_transaction.state < HVUTIL_READY) {
+               /*
+                * If userspace daemon is not connected and host is asking
+                * us to negotiate we need to delay to not lose messages.
+                * This is important for Failover IP setting.
+                */
+               if (host_negotiatied == NEGO_NOT_STARTED) {
+                       host_negotiatied = NEGO_IN_PROGRESS;
+                       schedule_delayed_work(&kvp_host_handshake_work,
+                                     HV_UTIL_NEGO_TIMEOUT * HZ);
+               }
+               return;
+       }
        if (kvp_transaction.state > HVUTIL_READY)
                return;
-
+recheck:
        vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
                         &requestid);
 
@@ -640,7 +669,6 @@ void hv_kvp_onchannelcallback(void *context)
                         */
 
                        kvp_transaction.recv_len = recvlen;
-                       kvp_transaction.recv_channel = channel;
                        kvp_transaction.recv_req_id = requestid;
                        kvp_transaction.kvp_msg = kvp_msg;
 
@@ -674,6 +702,10 @@ void hv_kvp_onchannelcallback(void *context)
                vmbus_sendpacket(channel, recv_buffer,
                                       recvlen, requestid,
                                       VM_PKT_DATA_INBAND, 0);
+
+               host_negotiatied = NEGO_FINISHED;
+
+               goto recheck;
        }
 
 }
@@ -690,6 +722,7 @@ int
 hv_kvp_init(struct hv_util_service *srv)
 {
        recv_buffer = srv->recv_buffer;
+       kvp_transaction.recv_channel = srv->channel;
 
        init_completion(&release_event);
        /*
@@ -711,6 +744,7 @@ hv_kvp_init(struct hv_util_service *srv)
 void hv_kvp_deinit(void)
 {
        kvp_transaction.state = HVUTIL_DEVICE_DYING;
+       cancel_delayed_work_sync(&kvp_host_handshake_work);
        cancel_delayed_work_sync(&kvp_timeout_work);
        cancel_work_sync(&kvp_sendkey_work);
        hvutil_transport_destroy(hvt);
index faad79a..b0feddb 100644 (file)
@@ -114,7 +114,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
        default:
                return -EINVAL;
        }
-       vss_transaction.state = HVUTIL_READY;
+       hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
        pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
        return 0;
 }
@@ -264,7 +264,6 @@ void hv_vss_onchannelcallback(void *context)
                         */
 
                        vss_transaction.recv_len = recvlen;
-                       vss_transaction.recv_channel = channel;
                        vss_transaction.recv_req_id = requestid;
                        vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
 
@@ -340,6 +339,7 @@ hv_vss_init(struct hv_util_service *srv)
                return -ENOTSUPP;
        }
        recv_buffer = srv->recv_buffer;
+       vss_transaction.recv_channel = srv->channel;
 
        /*
         * When this driver loads, the user level daemon that
index 41f5896..9dc6372 100644 (file)
@@ -326,6 +326,7 @@ static int util_probe(struct hv_device *dev,
        srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
        if (!srv->recv_buffer)
                return -ENOMEM;
+       srv->channel = dev->channel;
        if (srv->util_init) {
                ret = srv->util_init(srv);
                if (ret) {
index 75e383e..15e0649 100644 (file)
 #define HV_UTIL_TIMEOUT 30
 
 /*
+ * Timeout for guest-host handshake for services.
+ */
+#define HV_UTIL_NEGO_TIMEOUT 60
+
+/*
  * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
  * is set by CPUID(HVCPUID_VERSION_FEATURES).
  */
index 3cefd1a..9c262d9 100644 (file)
@@ -274,14 +274,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
        return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
 {
-       u16 val;
+       int val1, val2;
 
-       val = i2c_smbus_read_byte_data(client, reg);
-       val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+       val1 = i2c_smbus_read_byte_data(client, reg);
+       if (val1 < 0)
+               return val1;
+       val2 = i2c_smbus_read_byte_data(client, reg + 1);
+       if (val2 < 0)
+               return val2;
 
-       return val;
+       return val1 | (val2 << 8);
 }
 
 static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
index ac63e56..9ac6e16 100644 (file)
@@ -17,7 +17,7 @@
  * Bi-directional Current/Power Monitor with I2C Interface
  * Datasheet: http://www.ti.com/product/ina230
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  * Thanks to Jan Volkering
  *
  * This program is free software; you can redistribute it and/or modify
@@ -328,6 +328,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
        return 0;
 }
 
+static ssize_t ina2xx_show_shunt(struct device *dev,
+                             struct device_attribute *da,
+                             char *buf)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
 static ssize_t ina2xx_store_shunt(struct device *dev,
                                  struct device_attribute *da,
                                  const char *buf, size_t count)
@@ -402,7 +411,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
-                         ina2xx_show_value, ina2xx_store_shunt,
+                         ina2xx_show_shunt, ina2xx_store_shunt,
                          INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
index 9f9dd57..6bd4c89 100644 (file)
@@ -2196,6 +2196,42 @@ static struct attribute *coresight_etmv4_attrs[] = {
        NULL,
 };
 
+struct etmv4_reg {
+       void __iomem *addr;
+       u32 data;
+};
+
+static void do_smp_cross_read(void *data)
+{
+       struct etmv4_reg *reg = data;
+
+       reg->data = readl_relaxed(reg->addr);
+}
+
+static u32 etmv4_cross_read(const struct device *dev, u32 offset)
+{
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+       struct etmv4_reg reg;
+
+       reg.addr = drvdata->base + offset;
+
+       smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
+       return reg.data;
+}
+#define coresight_cross_read(name, offset)                             \
+static ssize_t name##_show(struct device *_dev,                                \
+                          struct device_attribute *attr, char *buf)    \
+{                                                                      \
+       u32 val;                                                        \
+       pm_runtime_get_sync(_dev->parent);                              \
+                                                                       \
+       val = etmv4_cross_read(_dev->parent, offset);                   \
+                                                                       \
+       pm_runtime_put_sync(_dev->parent);                              \
+       return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);                \
+}                                                                      \
+static DEVICE_ATTR_RO(name)
+
 #define coresight_simple_func(name, offset)                            \
 static ssize_t name##_show(struct device *_dev,                                \
                           struct device_attribute *attr, char *buf)    \
@@ -2206,17 +2242,17 @@ static ssize_t name##_show(struct device *_dev,                         \
 }                                                                      \
 DEVICE_ATTR_RO(name)
 
-coresight_simple_func(trcoslsr, TRCOSLSR);
-coresight_simple_func(trcpdcr, TRCPDCR);
-coresight_simple_func(trcpdsr, TRCPDSR);
-coresight_simple_func(trclsr, TRCLSR);
-coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_simple_func(trcdevid, TRCDEVID);
-coresight_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_simple_func(trcpidr0, TRCPIDR0);
-coresight_simple_func(trcpidr1, TRCPIDR1);
-coresight_simple_func(trcpidr2, TRCPIDR2);
-coresight_simple_func(trcpidr3, TRCPIDR3);
+coresight_cross_read(trcoslsr, TRCOSLSR);
+coresight_cross_read(trcpdcr, TRCPDCR);
+coresight_cross_read(trcpdsr, TRCPDSR);
+coresight_cross_read(trclsr, TRCLSR);
+coresight_cross_read(trcauthstatus, TRCAUTHSTATUS);
+coresight_cross_read(trcdevid, TRCDEVID);
+coresight_cross_read(trcdevtype, TRCDEVTYPE);
+coresight_cross_read(trcpidr0, TRCPIDR0);
+coresight_cross_read(trcpidr1, TRCPIDR1);
+coresight_cross_read(trcpidr2, TRCPIDR2);
+coresight_cross_read(trcpidr3, TRCPIDR3);
 
 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
        &dev_attr_trcoslsr.attr,
@@ -2233,19 +2269,19 @@ static struct attribute *coresight_etmv4_mgmt_attrs[] = {
        NULL,
 };
 
-coresight_simple_func(trcidr0, TRCIDR0);
-coresight_simple_func(trcidr1, TRCIDR1);
-coresight_simple_func(trcidr2, TRCIDR2);
-coresight_simple_func(trcidr3, TRCIDR3);
-coresight_simple_func(trcidr4, TRCIDR4);
-coresight_simple_func(trcidr5, TRCIDR5);
+coresight_cross_read(trcidr0, TRCIDR0);
+coresight_cross_read(trcidr1, TRCIDR1);
+coresight_cross_read(trcidr2, TRCIDR2);
+coresight_cross_read(trcidr3, TRCIDR3);
+coresight_cross_read(trcidr4, TRCIDR4);
+coresight_cross_read(trcidr5, TRCIDR5);
 /* trcidr[6,7] are reserved */
-coresight_simple_func(trcidr8, TRCIDR8);
-coresight_simple_func(trcidr9, TRCIDR9);
-coresight_simple_func(trcidr10, TRCIDR10);
-coresight_simple_func(trcidr11, TRCIDR11);
-coresight_simple_func(trcidr12, TRCIDR12);
-coresight_simple_func(trcidr13, TRCIDR13);
+coresight_cross_read(trcidr8, TRCIDR8);
+coresight_cross_read(trcidr9, TRCIDR9);
+coresight_cross_read(trcidr10, TRCIDR10);
+coresight_cross_read(trcidr11, TRCIDR11);
+coresight_cross_read(trcidr12, TRCIDR12);
+coresight_cross_read(trcidr13, TRCIDR13);
 
 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
        &dev_attr_trcidr0.attr,
index 47581c3..f78069c 100644 (file)
@@ -1272,6 +1272,13 @@ static void i801_add_tco(struct i801_priv *priv)
 }
 
 #ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+                                     acpi_physical_address address)
+{
+       return address >= priv->smba &&
+              address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
 static acpi_status
 i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
                     u64 *value, void *handler_context, void *region_context)
@@ -1287,7 +1294,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
         */
        mutex_lock(&priv->acpi_lock);
 
-       if (!priv->acpi_reserved) {
+       if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
                priv->acpi_reserved = true;
 
                dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
index d72953f..198e558 100644 (file)
@@ -2330,6 +2330,12 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        struct i2c_msm_ctrl      *ctrl = i2c_get_adapdata(adap);
        struct i2c_msm_xfer      *xfer = &ctrl->xfer;
 
+       if (num < 1) {
+               dev_err(ctrl->dev,
+               "error on number of msgs(%d) received\n", num);
+               return -EINVAL;
+       }
+
        if (IS_ERR_OR_NULL(msgs)) {
                dev_err(ctrl->dev, " error on msgs Accessing invalid  pointer location\n");
                return PTR_ERR(msgs);
index 7aa7b9c..efefcfa 100644 (file)
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
                        mt_params[3].type = ACPI_TYPE_INTEGER;
                        mt_params[3].integer.value = len;
                        mt_params[4].type = ACPI_TYPE_BUFFER;
+                       mt_params[4].buffer.length = len;
                        mt_params[4].buffer.pointer = data->block + 1;
                }
                break;
index e8d03bc..3f6b43f 100644 (file)
@@ -394,11 +394,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
                if (ret)
index e3c3861..ad5eb8b 100644 (file)
@@ -247,11 +247,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
                if (ret)
index 55aa8d3..7525e9f 100644 (file)
@@ -123,6 +123,8 @@ static DEFINE_MUTEX(mut);
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
 {
@@ -1535,6 +1537,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
        f = fdget(cmd.fd);
        if (!f.file)
                return -ENOENT;
+       if (f.file->f_op != &ucma_fops) {
+               ret = -EINVAL;
+               goto file_put;
+       }
 
        /* Validate current fd and prevent destruction of id. */
        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1703,6 +1709,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
                mutex_lock(&mut);
                if (!ctx->closing) {
                        mutex_unlock(&mut);
+                       ucma_put_ctx(ctx);
+                       wait_for_completion(&ctx->comp);
                        /* rdma_destroy_id ensures that no event handlers are
                         * inflight for that id before releasing it.
                         */
index 53aa751..04206c6 100644 (file)
@@ -1183,6 +1183,12 @@ static void flush_qp(struct c4iw_qp *qhp)
 
        t4_set_wq_in_error(&qhp->wq);
        if (qhp->ibqp.uobject) {
+
+               /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+               if (qhp->wq.flushed)
+                       return;
+
+               qhp->wq.flushed = 1;
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
index 4fd2892..1897c40 100644 (file)
@@ -2594,7 +2594,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
-       int i;
+       int i, j;
        u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2608,8 +2608,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               for (i = 0; i < target->req_ring_size; ++i) {
-                       struct srp_request *req = &ch->req_ring[i];
+               for (j = 0; j < target->req_ring_size; ++j) {
+                       struct srp_request *req = &ch->req_ring[j];
 
                        srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
                }
index f123583..fdeda0b 100644 (file)
@@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
  */
 
 
-static unsigned char atakbd_keycode[0x72] = {  /* American layout */
-       [0]      = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = {  /* American layout */
        [1]      = KEY_ESC,
        [2]      = KEY_1,
        [3]      = KEY_2,
@@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = {       /* American layout */
        [38]     = KEY_L,
        [39]     = KEY_SEMICOLON,
        [40]     = KEY_APOSTROPHE,
-       [41]     = KEY_BACKSLASH,       /* FIXME, '#' */
+       [41]     = KEY_GRAVE,
        [42]     = KEY_LEFTSHIFT,
-       [43]     = KEY_GRAVE,           /* FIXME: '~' */
+       [43]     = KEY_BACKSLASH,
        [44]     = KEY_Z,
        [45]     = KEY_X,
        [46]     = KEY_C,
@@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = {     /* American layout */
        [66]     = KEY_F8,
        [67]     = KEY_F9,
        [68]     = KEY_F10,
-       [69]     = KEY_ESC,
-       [70]     = KEY_DELETE,
-       [71]     = KEY_KP7,
-       [72]     = KEY_KP8,
-       [73]     = KEY_KP9,
+       [71]     = KEY_HOME,
+       [72]     = KEY_UP,
        [74]     = KEY_KPMINUS,
-       [75]     = KEY_KP4,
-       [76]     = KEY_KP5,
-       [77]     = KEY_KP6,
+       [75]     = KEY_LEFT,
+       [77]     = KEY_RIGHT,
        [78]     = KEY_KPPLUS,
-       [79]     = KEY_KP1,
-       [80]     = KEY_KP2,
-       [81]     = KEY_KP3,
-       [82]     = KEY_KP0,
-       [83]     = KEY_KPDOT,
-       [90]     = KEY_KPLEFTPAREN,
-       [91]     = KEY_KPRIGHTPAREN,
-       [92]     = KEY_KPASTERISK,      /* FIXME */
-       [93]     = KEY_KPASTERISK,
-       [94]     = KEY_KPPLUS,
-       [95]     = KEY_HELP,
+       [80]     = KEY_DOWN,
+       [82]     = KEY_INSERT,
+       [83]     = KEY_DELETE,
        [96]     = KEY_102ND,
-       [97]     = KEY_KPASTERISK,      /* FIXME */
-       [98]     = KEY_KPSLASH,
+       [97]     = KEY_UNDO,
+       [98]     = KEY_HELP,
        [99]     = KEY_KPLEFTPAREN,
        [100]    = KEY_KPRIGHTPAREN,
        [101]    = KEY_KPSLASH,
        [102]    = KEY_KPASTERISK,
-       [103]    = KEY_UP,
-       [104]    = KEY_KPASTERISK,      /* FIXME */
-       [105]    = KEY_LEFT,
-       [106]    = KEY_RIGHT,
-       [107]    = KEY_KPASTERISK,      /* FIXME */
-       [108]    = KEY_DOWN,
-       [109]    = KEY_KPASTERISK,      /* FIXME */
-       [110]    = KEY_KPASTERISK,      /* FIXME */
-       [111]    = KEY_KPASTERISK,      /* FIXME */
-       [112]    = KEY_KPASTERISK,      /* FIXME */
-       [113]    = KEY_KPASTERISK       /* FIXME */
+       [103]    = KEY_KP7,
+       [104]    = KEY_KP8,
+       [105]    = KEY_KP9,
+       [106]    = KEY_KP4,
+       [107]    = KEY_KP5,
+       [108]    = KEY_KP6,
+       [109]    = KEY_KP1,
+       [110]    = KEY_KP2,
+       [111]    = KEY_KP3,
+       [112]    = KEY_KP0,
+       [113]    = KEY_KPDOT,
+       [114]    = KEY_KPENTER,
 };
 
 static struct input_dev *atakbd_dev;
@@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
 static void atakbd_interrupt(unsigned char scancode, char down)
 {
 
-       if (scancode < 0x72) {          /* scancodes < 0xf2 are keys */
+       if (scancode < 0x73) {          /* scancodes < 0xf3 are keys */
 
                // report raw events here?
 
                scancode = atakbd_keycode[scancode];
 
-               if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
-                       input_report_key(atakbd_dev, scancode, 1);
-                       input_report_key(atakbd_dev, scancode, 0);
-                       input_sync(atakbd_dev);
-               } else {
-                       input_report_key(atakbd_dev, scancode, down);
-                       input_sync(atakbd_dev);
-               }
-       } else                          /* scancodes >= 0xf2 are mouse data, most likely */
+               input_report_key(atakbd_dev, scancode, down);
+               input_sync(atakbd_dev);
+       } else                          /* scancodes >= 0xf3 are mouse data, most likely */
                printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
 
        return;
index 174bb52..84aead1 100644 (file)
@@ -1180,6 +1180,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
 static const char * const middle_button_pnp_ids[] = {
        "LEN2131", /* ThinkPad P52 w/ NFC */
        "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
        NULL
 };
 
index 54e50fc..9d85e57 100644 (file)
@@ -3388,8 +3388,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-       if (from_cblock(new_size) > from_cblock(cache->cache_size))
-               return true;
+       if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+               if (cache->sized) {
+                       DMERR("%s: unable to extend cache due to missing cache table reload",
+                             cache_device_name(cache));
+                       return false;
+               }
+       }
 
        /*
         * We can't drop a dirty block when shrinking the cache.
index e339f42..2711aa9 100644 (file)
@@ -190,6 +190,12 @@ struct dm_pool_metadata {
        sector_t data_block_size;
 
        /*
+        * We reserve a section of the metadata for commit overhead.
+        * All reported space does *not* include this.
+        */
+       dm_block_t metadata_reserve;
+
+       /*
         * Set if a transaction has to be aborted but the attempt to roll back
         * to the previous (good) transaction failed.  The only pool metadata
         * operation possible in this state is the closing of the device.
@@ -827,6 +833,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+       int r;
+       dm_block_t total;
+       dm_block_t max_blocks = 4096; /* 16M */
+
+       r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+       if (r) {
+               DMERR("could not get size of metadata device");
+               pmd->metadata_reserve = max_blocks;
+       } else
+               pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                                               sector_t data_block_size,
                                               bool format_device)
@@ -860,6 +880,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                return ERR_PTR(r);
        }
 
+       __set_metadata_reserve(pmd);
+
        return pmd;
 }
 
@@ -1763,6 +1785,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
        down_read(&pmd->root_lock);
        if (!pmd->fail_io)
                r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+       if (!r) {
+               if (*result < pmd->metadata_reserve)
+                       *result = 0;
+               else
+                       *result -= pmd->metadata_reserve;
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1875,8 +1904,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
        int r = -EINVAL;
 
        down_write(&pmd->root_lock);
-       if (!pmd->fail_io)
+       if (!pmd->fail_io) {
                r = __resize_space_map(pmd->metadata_sm, new_count);
+               if (!r)
+                       __set_metadata_reserve(pmd);
+       }
        up_write(&pmd->root_lock);
 
        return r;
index 87454a7..3ca7164 100644 (file)
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
        PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
+
+       /*
+        * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+        */
+       PM_OUT_OF_METADATA_SPACE,
        PM_READ_ONLY,           /* metadata may not be changed */
+
        PM_FAIL,                /* all I/O fails */
 };
 
@@ -1301,7 +1307,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+       return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+       return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+       int r;
+       const char *ooms_reason = NULL;
+       dm_block_t nr_free;
+
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+       if (r)
+               ooms_reason = "Could not get free metadata blocks";
+       else if (!nr_free)
+               ooms_reason = "No free metadata blocks";
+
+       if (ooms_reason && !is_read_only(pool)) {
+               DMERR("%s", ooms_reason);
+               set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+       }
+}
+
+static void check_for_data_space(struct pool *pool)
 {
        int r;
        dm_block_t nr_free;
@@ -1327,14 +1361,16 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY)
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
                metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-       else
-               check_for_space(pool);
+       else {
+               check_for_metadata_space(pool);
+               check_for_data_space(pool);
+       }
 
        return r;
 }
@@ -1400,6 +1436,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
        }
 
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+       if (r) {
+               metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+               return r;
+       }
+
+       if (!free_blocks) {
+               /* Let's commit before we use up the metadata reserve. */
+               r = commit(pool);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
@@ -1431,6 +1480,7 @@ static int should_error_unserviceable_bio(struct pool *pool)
        case PM_OUT_OF_DATA_SPACE:
                return pool->pf.error_if_no_space ? -ENOSPC : 0;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
        case PM_FAIL:
                return -EIO;
@@ -2401,8 +2451,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                error_retry_list(pool);
                break;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (old_mode != new_mode)
+               if (!is_read_only_pool_mode(old_mode))
                        notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
@@ -3333,6 +3384,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
                DMINFO("%s: growing the metadata device from %llu to %llu blocks",
                       dm_device_name(pool->pool_md),
                       sb_metadata_dev_size, metadata_dev_size);
+
+               if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+                       set_pool_mode(pool, PM_WRITE);
+
                r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
                if (r) {
                        metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3636,7 +3691,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
                DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
                      dm_device_name(pool->pool_md));
                return -EOPNOTSUPP;
@@ -3710,6 +3765,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
        dm_block_t nr_blocks_data;
        dm_block_t nr_blocks_metadata;
        dm_block_t held_root;
+       enum pool_mode mode;
        char buf[BDEVNAME_SIZE];
        char buf2[BDEVNAME_SIZE];
        struct pool_c *pt = ti->private;
@@ -3780,9 +3836,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("- ");
 
-               if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+               mode = get_pool_mode(pool);
+               if (mode == PM_OUT_OF_DATA_SPACE)
                        DMEMIT("out_of_data_space ");
-               else if (pool->pf.mode == PM_READ_ONLY)
+               else if (is_read_only_pool_mode(mode))
                        DMEMIT("ro ");
                else
                        DMEMIT("rw ");
index a7a561a..617a0ae 100644 (file)
@@ -239,15 +239,6 @@ static void recover_bitmaps(struct md_thread *thread)
        while (cinfo->recovery_map) {
                slot = fls64((u64)cinfo->recovery_map) - 1;
 
-               /* Clear suspend_area associated with the bitmap */
-               spin_lock_irq(&cinfo->suspend_lock);
-               list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
-                       if (slot == s->slot) {
-                               list_del(&s->list);
-                               kfree(s);
-                       }
-               spin_unlock_irq(&cinfo->suspend_lock);
-
                snprintf(str, 64, "bitmap%04d", slot);
                bm_lockres = lockres_init(mddev, str, NULL, 1);
                if (!bm_lockres) {
@@ -266,6 +257,16 @@ static void recover_bitmaps(struct md_thread *thread)
                        pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
                        goto dlm_unlock;
                }
+
+               /* Clear suspend_area associated with the bitmap */
+               spin_lock_irq(&cinfo->suspend_lock);
+               list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+                       if (slot == s->slot) {
+                               list_del(&s->list);
+                               kfree(s);
+                       }
+               spin_unlock_irq(&cinfo->suspend_lock);
+
                if (hi > 0) {
                        /* TODO:Wait for current resync to get over */
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
index 9620e1d..21f4c8f 100644 (file)
@@ -4336,11 +4336,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
                allow_barrier(conf);
        }
 
+       raise_barrier(conf, 0);
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
        r10_bio->state = 0;
-       raise_barrier(conf, sectors_done != 0);
+       raise_barrier(conf, 1);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
        r10_bio->sector = sector_nr;
@@ -4445,6 +4446,8 @@ bio_full:
        if (sector_nr <= last)
                goto read_more;
 
+       lower_barrier(conf);
+
        /* Now that we have done the whole section we can
         * update reshape_progress
         */
index f150a8b..70f1a80 100644 (file)
@@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv,
         * set COM8
         */
        if (priv->band_filter) {
-               ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+               ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
                if (!ret)
                        ret = ov772x_mask_set(client, BDBASE,
                                              0xff, 256 - priv->band_filter);
index 6e66484..667d372 100644 (file)
@@ -391,12 +391,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp,
                                struct v4l2_pix_format_mplane *pixm,
                                const struct fimc_fmt **fmt)
 {
-       *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+       const struct fimc_fmt *__fmt;
+
+       __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+       if (fmt)
+               *fmt = __fmt;
 
        pixm->colorspace = V4L2_COLORSPACE_SRGB;
        pixm->field = V4L2_FIELD_NONE;
-       pixm->num_planes = (*fmt)->memplanes;
-       pixm->pixelformat = (*fmt)->fourcc;
+       pixm->num_planes = __fmt->memplanes;
+       pixm->pixelformat = __fmt->fourcc;
        /*
         * TODO: double check with the docmentation these width/height
         * constraints are correct.
index ae8c6b3..7f0ed5a 100644 (file)
@@ -1417,7 +1417,7 @@ static int viu_of_probe(struct platform_device *op)
                                     sizeof(struct viu_reg), DRV_NAME)) {
                dev_err(&op->dev, "Error while requesting mem region\n");
                ret = -EBUSY;
-               goto err;
+               goto err_irq;
        }
 
        /* remap registers */
@@ -1425,7 +1425,7 @@ static int viu_of_probe(struct platform_device *op)
        if (!viu_regs) {
                dev_err(&op->dev, "Can't map register set\n");
                ret = -ENOMEM;
-               goto err;
+               goto err_irq;
        }
 
        /* Prepare our private structure */
@@ -1433,7 +1433,7 @@ static int viu_of_probe(struct platform_device *op)
        if (!viu_dev) {
                dev_err(&op->dev, "Can't allocate private structure\n");
                ret = -ENOMEM;
-               goto err;
+               goto err_irq;
        }
 
        viu_dev->vr = viu_regs;
@@ -1449,16 +1449,21 @@ static int viu_of_probe(struct platform_device *op)
        ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
        if (ret < 0) {
                dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
-               goto err;
+               goto err_irq;
        }
 
        ad = i2c_get_adapter(0);
+       if (!ad) {
+               ret = -EFAULT;
+               dev_err(&op->dev, "couldn't get i2c adapter\n");
+               goto err_v4l2;
+       }
 
        v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
        if (viu_dev->hdl.error) {
                ret = viu_dev->hdl.error;
                dev_err(&op->dev, "couldn't register control\n");
-               goto err_vdev;
+               goto err_i2c;
        }
        /* This control handler will inherit the control(s) from the
           sub-device(s). */
@@ -1476,7 +1481,7 @@ static int viu_of_probe(struct platform_device *op)
        vdev = video_device_alloc();
        if (vdev == NULL) {
                ret = -ENOMEM;
-               goto err_vdev;
+               goto err_hdl;
        }
 
        *vdev = viu_template;
@@ -1497,7 +1502,7 @@ static int viu_of_probe(struct platform_device *op)
        ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
        if (ret < 0) {
                video_device_release(viu_dev->vdev);
-               goto err_vdev;
+               goto err_unlock;
        }
 
        /* enable VIU clock */
@@ -1505,12 +1510,12 @@ static int viu_of_probe(struct platform_device *op)
        if (IS_ERR(clk)) {
                dev_err(&op->dev, "failed to lookup the clock!\n");
                ret = PTR_ERR(clk);
-               goto err_clk;
+               goto err_vdev;
        }
        ret = clk_prepare_enable(clk);
        if (ret) {
                dev_err(&op->dev, "failed to enable the clock!\n");
-               goto err_clk;
+               goto err_vdev;
        }
        viu_dev->clk = clk;
 
@@ -1521,7 +1526,7 @@ static int viu_of_probe(struct platform_device *op)
        if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
                dev_err(&op->dev, "Request VIU IRQ failed.\n");
                ret = -ENODEV;
-               goto err_irq;
+               goto err_clk;
        }
 
        mutex_unlock(&viu_dev->lock);
@@ -1529,16 +1534,19 @@ static int viu_of_probe(struct platform_device *op)
        dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
        return ret;
 
-err_irq:
-       clk_disable_unprepare(viu_dev->clk);
 err_clk:
-       video_unregister_device(viu_dev->vdev);
+       clk_disable_unprepare(viu_dev->clk);
 err_vdev:
-       v4l2_ctrl_handler_free(&viu_dev->hdl);
+       video_unregister_device(viu_dev->vdev);
+err_unlock:
        mutex_unlock(&viu_dev->lock);
+err_hdl:
+       v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
        i2c_put_adapter(ad);
+err_v4l2:
        v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
        irq_dispose_mapping(viu_irq);
        return ret;
 }
index 91e02c1..136ea18 100644 (file)
@@ -303,7 +303,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
 static int isp_xclk_init(struct isp_device *isp)
 {
        struct device_node *np = isp->dev->of_node;
-       struct clk_init_data init;
+       struct clk_init_data init = { 0 };
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
index fa6af4a..f97f4bc 100644 (file)
@@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on)
 
        if (camif->sensor.power_count == !on)
                err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+       if (err == -ENOIOCTLCMD)
+               err = 0;
        if (!err)
                sensor->power_count += on ? 1 : -1;
 
index 6e02a15..abddb62 100644 (file)
@@ -389,8 +389,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
                            msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
                                reg |= 0x100000;
 
-                       ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
-                                       msg[0].len - 3);
+                       ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+                                                                &msg[0].buf[3],
+                                                                msg[0].len - 3)
+                                               : -EOPNOTSUPP;
                } else {
                        /* I2C write */
                        u8 buf[MAX_XFER_SIZE];
index 095f5db..4f317e2 100644 (file)
@@ -275,6 +275,11 @@ static int register_dvb(struct tm6000_core *dev)
 
        ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
                                        THIS_MODULE, &dev->udev->dev, adapter_nr);
+       if (ret < 0) {
+               pr_err("tm6000: couldn't register the adapter!\n");
+               goto err;
+       }
+
        dvb->adapter.priv = dev;
 
        if (dvb->frontend) {
index 2b276ab..a4048a0 100644 (file)
@@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
        }
 }
 
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+       /*
+        * Return the size of the video probe and commit controls, which depends
+        * on the protocol version.
+        */
+       if (stream->dev->uvc_version < 0x0110)
+               return 26;
+       else if (stream->dev->uvc_version < 0x0150)
+               return 34;
+       else
+               return 48;
+}
+
 static int uvc_get_video_ctrl(struct uvc_streaming *stream,
        struct uvc_streaming_control *ctrl, int probe, __u8 query)
 {
+       __u16 size = uvc_video_ctrl_size(stream);
        __u8 *data;
-       __u16 size;
        int ret;
 
-       size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
        if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
                        query == UVC_GET_DEF)
                return -EIO;
@@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
        ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
        ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
 
-       if (size == 34) {
+       if (size >= 34) {
                ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
                ctrl->bmFramingInfo = data[30];
                ctrl->bPreferedVersion = data[31];
@@ -254,11 +267,10 @@ out:
 static int uvc_set_video_ctrl(struct uvc_streaming *stream,
        struct uvc_streaming_control *ctrl, int probe)
 {
+       __u16 size = uvc_video_ctrl_size(stream);
        __u8 *data;
-       __u16 size;
        int ret;
 
-       size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
        data = kzalloc(size, GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
@@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
        put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
        put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
 
-       if (size == 34) {
+       if (size >= 34) {
                put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
                data[30] = ctrl->bmFramingInfo;
                data[31] = ctrl->bPreferedVersion;
index 8d3171c..b47ac4e 100644 (file)
@@ -119,14 +119,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
        if (sev == NULL)
                return;
 
-       /*
-        * If the event has been added to the fh->subscribed list, but its
-        * add op has not completed yet elems will be 0, treat this as
-        * not being subscribed.
-        */
-       if (!sev->elems)
-               return;
-
        /* Increase event sequence number on fh. */
        fh->sequence++;
 
@@ -212,6 +204,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        struct v4l2_subscribed_event *sev, *found_ev;
        unsigned long flags;
        unsigned i;
+       int ret = 0;
 
        if (sub->type == V4L2_EVENT_ALL)
                return -EINVAL;
@@ -229,31 +222,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev->flags = sub->flags;
        sev->fh = fh;
        sev->ops = ops;
+       sev->elems = elems;
+
+       mutex_lock(&fh->subscribe_lock);
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (!found_ev)
-               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
+               /* Already listening */
                kfree(sev);
-               return 0; /* Already listening */
+               goto out_unlock;
        }
 
        if (sev->ops && sev->ops->add) {
-               int ret = sev->ops->add(sev, elems);
+               ret = sev->ops->add(sev, elems);
                if (ret) {
-                       sev->ops = NULL;
-                       v4l2_event_unsubscribe(fh, sub);
-                       return ret;
+                       kfree(sev);
+                       goto out_unlock;
                }
        }
 
-       /* Mark as ready for use */
-       sev->elems = elems;
+       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+       list_add(&sev->list, &fh->subscribed);
+       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&fh->subscribe_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -292,6 +290,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
                return 0;
        }
 
+       mutex_lock(&fh->subscribe_lock);
+
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -310,6 +310,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
                sev->ops->del(sev);
 
        kfree(sev);
+       mutex_unlock(&fh->subscribe_lock);
 
        return 0;
 }
index c97067a..1d076de 100644 (file)
@@ -49,6 +49,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
        INIT_LIST_HEAD(&fh->available);
        INIT_LIST_HEAD(&fh->subscribed);
        fh->sequence = -1;
+       mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -93,6 +94,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
        if (fh->vdev == NULL)
                return;
        v4l2_event_unsubscribe_all(fh);
+       mutex_destroy(&fh->subscribe_lock);
        fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
index 1d924d1..b9dc2fc 100644 (file)
@@ -548,8 +548,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
 }
 
 static const struct of_device_id usbhs_child_match_table[] = {
-       { .compatible = "ti,omap-ehci", },
-       { .compatible = "ti,omap-ohci", },
+       { .compatible = "ti,ehci-omap", },
+       { .compatible = "ti,ohci-omap3", },
        { }
 };
 
@@ -875,6 +875,7 @@ static struct platform_driver usbhs_omap_driver = {
                .pm             = &usbhsomap_dev_pm_ops,
                .of_match_table = usbhs_omap_dt_ids,
        },
+       .probe          = usbhs_omap_probe,
        .remove         = usbhs_omap_remove,
 };
 
@@ -884,9 +885,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
 
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
 {
-       return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+       return platform_driver_register(&usbhs_omap_driver);
 }
 
 /*
@@ -898,7 +899,7 @@ static int __init omap_usbhs_drvinit(void)
  */
 fs_initcall_sync(omap_usbhs_drvinit);
 
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
 {
        platform_driver_unregister(&usbhs_omap_driver);
 }
index 09c7c1e..cc11ede 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,9 @@
 #define SKE_SEND_EKS_MESSAGE_SIZE \
        (MESSAGE_ID_SIZE+BITS_128_IN_BYTES+BITS_64_IN_BYTES)
 
+#define HDCP2_0_REPEATER_DOWNSTREAM BIT(1)
+#define HDCP1_DEVICE_DOWNSTREAM BIT(0)
+
 /* all message IDs */
 #define INVALID_MESSAGE_ID               0
 #define AKE_INIT_MESSAGE_ID              2
 #define LC_INIT_MESSAGE_ID               9
 #define LC_SEND_L_PRIME_MESSAGE_ID      10
 #define SKE_SEND_EKS_MESSAGE_ID         11
-#define REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID 12
-#define REPEATER_AUTH_SEND_ACK_MESSAGE_ID      15
-#define REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID 16
-#define REPEATER_AUTH_STREAM_READY_MESSAGE_ID  17
+#define REP_SEND_RECV_ID_LIST_ID 12
+#define REP_SEND_ACK_ID      15
+#define REP_STREAM_MANAGE_ID     16
+#define REP_STREAM_READY_ID  17
 #define SKE_SEND_TYPE_ID                       18
 #define HDCP2P2_MAX_MESSAGES                   19
 
@@ -195,18 +198,18 @@ static const struct hdcp_msg_data hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = {
        [SKE_SEND_TYPE_ID] = { 1,
                { {"type", 0x69494, 1} },
                0 },
-       [REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID] = { 4,
+       [REP_SEND_RECV_ID_LIST_ID] = { 4,
                { {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3},
                        {"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} },
                (1 << 0) },
-       [REPEATER_AUTH_SEND_ACK_MESSAGE_ID] = { 1,
+       [REP_SEND_ACK_ID] = { 1,
                { {"V", 0x693E0, 16} },
                0 },
-       [REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID] = { 3,
+       [REP_STREAM_MANAGE_ID] = { 3,
                { {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2},
                        {"streamID_Type", 0x693F5, 126} },
                0 },
-       [REPEATER_AUTH_STREAM_READY_MESSAGE_ID] = { 1,
+       [REP_STREAM_READY_ID] = { 1,
                { {"M'", 0x69473, 32} },
                0 }
 };
@@ -552,6 +555,7 @@ struct hdcp_lib_handle {
        enum hdcp_state hdcp_state;
        enum hdcp_lib_wakeup_cmd wakeup_cmd;
        bool repeater_flag;
+       bool non_2p2_present;
        bool update_stream;
        bool tethered;
        struct qseecom_handle *qseecom_handle;
@@ -684,19 +688,19 @@ static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle,
                        handle->device_type == HDCP_TXMTR_DP)
                        return SKE_SEND_TYPE_ID;
        case SKE_SEND_TYPE_ID:
-       case REPEATER_AUTH_STREAM_READY_MESSAGE_ID:
-       case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+       case REP_STREAM_READY_ID:
+       case REP_SEND_ACK_ID:
                if (!handle->repeater_flag)
                        return INVALID_MESSAGE_ID;
 
                if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE)
-                       return REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID;
+                       return REP_STREAM_MANAGE_ID;
                else
-                       return REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID;
-       case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
-               return REPEATER_AUTH_SEND_ACK_MESSAGE_ID;
-       case REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID:
-               return REPEATER_AUTH_STREAM_READY_MESSAGE_ID;
+                       return REP_SEND_RECV_ID_LIST_ID;
+       case REP_SEND_RECV_ID_LIST_ID:
+               return REP_SEND_ACK_ID;
+       case REP_STREAM_MANAGE_ID:
+               return REP_STREAM_READY_ID;
        default:
                pr_err("Uknown message ID (%d)", handle->last_msg);
                return -EINVAL;
@@ -716,7 +720,7 @@ static void hdcp_lib_wait_for_response(struct hdcp_lib_handle *handle,
        case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
                handle->wait_timeout = HZ / 4;
                break;
-       case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
+       case REP_SEND_RECV_ID_LIST_ID:
                if (!handle->authenticated)
                        handle->wait_timeout = HZ * 3;
                else
@@ -1791,6 +1795,7 @@ static int hdcp_lib_wakeup_thread(struct hdcp_lib_wakeup_data *data)
        case HDCP_LIB_WKUP_CMD_START:
                handle->no_stored_km_flag = 0;
                handle->repeater_flag = false;
+               handle->non_2p2_present = false;
                handle->update_stream = false;
                handle->last_msg_sent = 0;
                handle->last_msg = INVALID_MESSAGE_ID;
@@ -1898,7 +1903,7 @@ static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
                        cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
                }
                break;
-       case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+       case REP_SEND_ACK_ID:
                pr_debug("Repeater authentication successful\n");
 
                if (handle->update_stream) {
@@ -2033,12 +2038,13 @@ static void hdcp_lib_timeout(struct hdcp_lib_handle *handle)
        }
 
        /*
-        * if the response contains LC_Init message
-        * send the message again to TZ
+        * if the response contains LC_Init OR RepeaterAuth_Stream_Manage
+        * message send the message again to the sink as this means that
+        * TZ would like to try again
         */
        if ((rsp_buf->commandid == HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) &&
-           ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID) &&
-           (rsp_buf->msglen == LC_INIT_MESSAGE_SIZE)) {
+           ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID ||
+                (int)rsp_buf->message[0] == REP_STREAM_MANAGE_ID)) {
                if (!atomic_read(&handle->hdcp_off)) {
                        /* keep local copy of TZ response */
                        memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
@@ -2078,6 +2084,9 @@ static void hdcp_lib_clean(struct hdcp_lib_handle *handle)
 
        handle->authenticated = false;
 
+       /* AV mute the sink first to avoid artifacts */
+       handle->client_ops->mute_sink(handle->client_ctx);
+
        hdcp_lib_txmtr_deinit(handle);
        if (!handle->legacy_app)
                hdcp_lib_session_deinit(handle);
@@ -2188,6 +2197,14 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
                                  QSEECOM_ALIGN(sizeof
                                                (struct hdcp_rcvd_msg_rsp)));
 
+       if (msg[0] == REP_SEND_RECV_ID_LIST_ID) {
+               if ((msg[2] & HDCP2_0_REPEATER_DOWNSTREAM) ||
+                  (msg[2] & HDCP1_DEVICE_DOWNSTREAM))
+                       handle->non_2p2_present = true;
+               else
+                       handle->non_2p2_present = false;
+       }
+
        /* get next message from sink if we receive H PRIME on no store km */
        if ((msg[0] == AKE_SEND_H_PRIME_MESSAGE_ID) &&
            handle->no_stored_km_flag) {
@@ -2199,7 +2216,7 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
                goto exit;
        }
 
-       if ((msg[0] == REPEATER_AUTH_STREAM_READY_MESSAGE_ID) &&
+       if ((msg[0] == REP_STREAM_READY_ID) &&
            (rc == 0) && (rsp_buf->status == 0)) {
                pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n");
 
@@ -2814,6 +2831,20 @@ static ssize_t hdmi_hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
 
        handle = hdcp_drv_mgr->handle;
 
+       /*
+        * if the stream type from TZ is type 1
+        * ignore subsequent writes to the min_enc_level
+        * to avoid state transitions which can potentially
+        * cause visual artifacts because the stream type
+        * is already at the highest level and for a HDCP 2.2
+        * capable sink, we do not need to reduce the stream type
+        */
+       if (handle &&
+               !handle->non_2p2_present) {
+               pr_info("stream type is 1 returning\n");
+               return ret;
+       }
+
        rc = kstrtoint(buf, 10, &min_enc_lvl);
        if (rc) {
                pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
index 87a1337..eb57610 100644 (file)
@@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
                } else
                        lux = 0;
        else
-               return -EAGAIN;
+               return 0;
 
        /* LUX range check */
        return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
index cc277f7..3877f53 100644 (file)
@@ -755,7 +755,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
        retval = get_user_pages_fast((uintptr_t) produce_uva,
                                     produce_q->kernel_if->num_pages, 1,
                                     produce_q->kernel_if->u.h.header_page);
-       if (retval < produce_q->kernel_if->num_pages) {
+       if (retval < (int)produce_q->kernel_if->num_pages) {
                pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
                        retval);
                qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -767,7 +767,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
        retval = get_user_pages_fast((uintptr_t) consume_uva,
                                     consume_q->kernel_if->num_pages, 1,
                                     consume_q->kernel_if->u.h.header_page);
-       if (retval < consume_q->kernel_if->num_pages) {
+       if (retval < (int)consume_q->kernel_if->num_pages) {
                pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
                        retval);
                qp_release_pages(consume_q->kernel_if->u.h.header_page,
index 2e46496..4e98e5a 100644 (file)
@@ -284,8 +284,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                 case SIOCFINDIPDDPRT:
                        spin_lock_bh(&ipddp_route_lock);
                        rp = __ipddp_find_route(&rcp);
-                       if (rp)
-                               memcpy(&rcp2, rp, sizeof(rcp2));
+                       if (rp) {
+                               memset(&rcp2, 0, sizeof(rcp2));
+                               rcp2.ip    = rp->ip;
+                               rcp2.at    = rp->at;
+                               rcp2.flags = rp->flags;
+                       }
                        spin_unlock_bh(&ipddp_route_lock);
 
                        if (rp) {
index 339118f..78da1b7 100644 (file)
@@ -216,6 +216,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
                                  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -1237,6 +1238,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
                        return NULL;
                }
        }
+       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
        return slave;
 }
 
@@ -1244,6 +1247,7 @@ static void bond_free_slave(struct slave *slave)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
 
+       cancel_delayed_work_sync(&slave->notify_work);
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                kfree(SLAVE_AD_INFO(slave));
 
@@ -1265,39 +1269,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
        info->link_failure_count = slave->link_failure_count;
 }
 
-static void bond_netdev_notify(struct net_device *dev,
-                              struct netdev_bonding_info *info)
-{
-       rtnl_lock();
-       netdev_bonding_info_change(dev, info);
-       rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
-       struct netdev_notify_work *w =
-               container_of(_work, struct netdev_notify_work, work.work);
+       struct slave *slave = container_of(_work, struct slave,
+                                          notify_work.work);
+
+       if (rtnl_trylock()) {
+               struct netdev_bonding_info binfo;
 
-       bond_netdev_notify(w->dev, &w->bonding_info);
-       dev_put(w->dev);
-       kfree(w);
+               bond_fill_ifslave(slave, &binfo.slave);
+               bond_fill_ifbond(slave->bond, &binfo.master);
+               netdev_bonding_info_change(slave->dev, &binfo);
+               rtnl_unlock();
+       } else {
+               queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+       }
 }
 
 void bond_queue_slave_event(struct slave *slave)
 {
-       struct bonding *bond = slave->bond;
-       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-       if (!nnw)
-               return;
-
-       dev_hold(slave->dev);
-       nnw->dev = slave->dev;
-       bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-       bond_fill_ifbond(bond, &nnw->bonding_info.master);
-       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-       queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+       queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 
 /* enslave device <slave> to bond device <master> */
index af9ec57..7a6dd5e 100644 (file)
@@ -850,14 +850,22 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
        u32 reg;
 
-       /* Stop monitoring MPD interrupt */
-       intrl2_0_mask_set(priv, INTRL2_0_MPD);
-
        /* Clear the MagicPacket detection logic */
        reg = umac_readl(priv, UMAC_MPD_CTRL);
        reg &= ~MPD_EN;
        umac_writel(priv, reg, UMAC_MPD_CTRL);
 
+       reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+       if (reg & INTRL2_0_MPD)
+               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+       if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+                                 RXCHK_BRCM_TAG_MATCH_MASK;
+               netdev_info(priv->netdev,
+                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+       }
+
        netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
@@ -890,11 +898,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
-       if (priv->irq0_stat & INTRL2_0_MPD) {
-               netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
-               bcm_sysport_resume_from_wol(priv);
-       }
-
        return IRQ_HANDLED;
 }
 
@@ -1915,9 +1918,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
        /* UniMAC receive needs to be turned on */
        umac_enable_set(priv, CMD_RX_EN, 1);
 
-       /* Enable the interrupt wake-up source */
-       intrl2_0_mask_clear(priv, INTRL2_0_MPD);
-
        netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
        return 0;
index 4ffacaf..fea8116 100644 (file)
@@ -1343,8 +1343,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
                                rx_pkts = budget;
+                               raw_cons = NEXT_RAW_CMP(raw_cons);
+                               break;
+                       }
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
                        if (likely(rc >= 0))
@@ -1362,7 +1365,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                }
                raw_cons = NEXT_RAW_CMP(raw_cons);
 
-               if (rx_pkts == budget)
+               if (rx_pkts && rx_pkts == budget)
                        break;
        }
 
@@ -1404,8 +1407,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
        while (1) {
                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
-               if (work_done >= budget)
+               if (work_done >= budget) {
+                       if (!budget)
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
                        break;
+               }
 
                if (!bnxt_has_work(bp, cpr)) {
                        napi_complete(napi);
index 8d54e7b..085f77f 100644 (file)
@@ -523,7 +523,7 @@ static int macb_halt_tx(struct macb *bp)
                if (!(status & MACB_BIT(TGO)))
                        return 0;
 
-               usleep_range(10, 250);
+               udelay(250);
        } while (time_before(halt_time, timeout));
 
        return -ETIMEDOUT;
@@ -2743,6 +2743,13 @@ static const struct macb_config at91sam9260_config = {
        .init = macb_init,
 };
 
+static const struct macb_config sama5d3macb_config = {
+       .caps = MACB_CAPS_SG_DISABLED
+             | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
 static const struct macb_config pc302gem_config = {
        .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
        .dma_burst_length = 16,
@@ -2801,6 +2808,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,gem", .data = &pc302gem_config },
        { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
        { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+       { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
        { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
        { .compatible = "cdns,emac", .data = &emac_config },
index cec95ac..fe37fc7 100644 (file)
@@ -171,10 +171,10 @@ struct hnae_desc_cb {
 
        /* priv data for the desc, e.g. skb when use with ip stack*/
        void *priv;
-       u16 page_offset;
-       u16 reuse_flag;
+       u32 page_offset;
+       u32 length;     /* length of the buffer */
 
-       u16 length;     /* length of the buffer */
+       u16 reuse_flag;
 
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
index 3daf2d4..884aa80 100644 (file)
@@ -2636,7 +2636,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
                /* Wait for link to drop */
                time = jiffies + (HZ / 10);
                do {
-                       if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+                       if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
                                break;
                        if (!in_interrupt())
                                schedule_timeout_interruptible(1);
index 83e557c..5ae8874 100644 (file)
@@ -645,14 +645,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
                adapter->tx_ring = tx_old;
                e1000_free_all_rx_resources(adapter);
                e1000_free_all_tx_resources(adapter);
-               kfree(tx_old);
-               kfree(rx_old);
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
                err = e1000_up(adapter);
                if (err)
                        goto err_setup;
        }
+       kfree(tx_old);
+       kfree(rx_old);
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return 0;
@@ -665,7 +665,8 @@ err_setup_rx:
 err_alloc_rx:
        kfree(txdr);
 err_alloc_tx:
-       e1000_up(adapter);
+       if (netif_running(adapter->netdev))
+               e1000_up(adapter);
 err_setup:
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return err;
index ac92685..42305f3 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/clk.h>
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
+#include <linux/if_vlan.h>
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -4268,7 +4269,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
 }
 
 /* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
                               int ip_hdr_len, int l4_proto)
 {
        u32 command;
@@ -5032,14 +5033,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                int ip_hdr_len = 0;
                u8 l4_proto;
+               __be16 l3_proto = vlan_get_protocol(skb);
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (l3_proto == htons(ETH_P_IP)) {
                        struct iphdr *ip4h = ip_hdr(skb);
 
                        /* Calculate IPv4 checksum and L4 checksum */
                        ip_hdr_len = ip4h->ihl;
                        l4_proto = ip4h->protocol;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
                        /* Read l4_protocol from one of IPv6 extra headers */
@@ -5051,7 +5053,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
                }
 
                return mvpp2_txq_desc_csum(skb_network_offset(skb),
-                               skb->protocol, ip_hdr_len, l4_proto);
+                                          l3_proto, ip_hdr_len, l4_proto);
        }
 
        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
index ff77b8b..7417605 100644 (file)
@@ -228,7 +228,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
        struct mlx4_dev *dev = &priv->dev;
        struct mlx4_eq *eq = &priv->eq_table.eq[vec];
 
-       if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+       if (!cpumask_available(eq->affinity_mask) ||
+           cpumask_empty(eq->affinity_mask))
                return;
 
        hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
index 55007f1..12cd8ae 100644 (file)
@@ -1802,7 +1802,8 @@ struct qlcnic_hardware_ops {
        int (*config_loopback) (struct qlcnic_adapter *, u8);
        int (*clear_loopback) (struct qlcnic_adapter *, u8);
        int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
-       void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+       void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+                                u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
        int (*get_board_info) (struct qlcnic_adapter *);
        void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2044,9 +2045,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
 }
 
 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-                                       u64 *addr, u16 id)
+                                       u64 *addr, u16 vlan,
+                                       struct qlcnic_host_tx_ring *tx_ring)
 {
-       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
 }
 
 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
index b4f3cb5..7f7aea9 100644 (file)
@@ -2132,7 +2132,8 @@ out:
 }
 
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
-                                 u16 vlan_id)
+                                 u16 vlan_id,
+                                 struct qlcnic_host_tx_ring *tx_ring)
 {
        u8 mac[ETH_ALEN];
        memcpy(&mac, addr, ETH_ALEN);
index 331ae2c..c8e012b 100644 (file)
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+                                 u16 vlan, struct qlcnic_host_tx_ring *ring);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
index 4bb33af..56a3bd9 100644 (file)
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev);
 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
-                              u64 *uaddr, u16 vlan_id);
+                              u64 *uaddr, u16 vlan_id,
+                              struct qlcnic_host_tx_ring *tx_ring);
 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
                                     struct ethtool_coalesce *);
 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
index d4b5085..98042a3 100644 (file)
@@ -269,13 +269,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
 }
 
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
-                              u16 vlan_id)
+                              u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
 {
        struct cmd_desc_type0 *hwdesc;
        struct qlcnic_nic_req *req;
        struct qlcnic_mac_req *mac_req;
        struct qlcnic_vlan_req *vlan_req;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u32 producer;
        u64 word;
 
@@ -302,7 +301,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
 
 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                               struct cmd_desc_type0 *first_desc,
-                              struct sk_buff *skb)
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -336,7 +336,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                    tmp_fil->vlan_id == vlan_id) {
                        if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
                                qlcnic_change_filter(adapter, &src_addr,
-                                                    vlan_id);
+                                                    vlan_id, tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
@@ -351,7 +351,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        if (!fil)
                return;
 
-       qlcnic_change_filter(adapter, &src_addr, vlan_id);
+       qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
        fil->ftime = jiffies;
        fil->vlan_id = vlan_id;
        memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -767,7 +767,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (adapter->drv_mac_learn)
-               qlcnic_send_filter(adapter, first_desc, skb);
+               qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
 
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
index 8b4069e..c6782eb 100644 (file)
@@ -759,7 +759,7 @@ struct rtl8169_tc_offsets {
 };
 
 enum rtl_flag {
-       RTL_FLAG_TASK_ENABLED,
+       RTL_FLAG_TASK_ENABLED = 0,
        RTL_FLAG_TASK_SLOW_PENDING,
        RTL_FLAG_TASK_RESET_PENDING,
        RTL_FLAG_TASK_PHY_PENDING,
@@ -7618,7 +7618,8 @@ static int rtl8169_close(struct net_device *dev)
        rtl8169_update_counters(dev);
 
        rtl_lock_work(tp);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
        rtl8169_down(dev);
        rtl_unlock_work(tp);
@@ -7795,7 +7796,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
 
        rtl_lock_work(tp);
        napi_disable(&tp->napi);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
        rtl_unlock_work(tp);
 
        rtl_pll_power_down(tp);
index d02691b..20aa34f 100644 (file)
@@ -71,7 +71,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
  * Description:
  * This function validates the number of Unicast address entries supported
  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 132, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
  * logic. This function validates a valid, supported configuration is
  * selected, and defaults to 1 Unicast address if an unsupported
  * configuration is selected.
@@ -81,8 +81,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
        int x = ucast_entries;
 
        switch (x) {
-       case 1:
-       case 32:
+       case 1 ... 32:
        case 64:
        case 128:
                break;
index 4917483..33ffb57 100644 (file)
@@ -1142,6 +1142,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                return -EBUSY;
        }
 
+       if (dev == port_dev) {
+               netdev_err(dev, "Cannot enslave team device to itself\n");
+               return -EINVAL;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
index 4789374..8dbe086 100644 (file)
@@ -1506,6 +1506,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        if (pdata) {
+               cancel_work_sync(&pdata->set_multicast);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index 184da61..f569826 100644 (file)
@@ -218,11 +218,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
        spin_lock_bh(&htt->rx_ring.lock);
        ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
                                              htt->rx_ring.fill_cnt));
-       spin_unlock_bh(&htt->rx_ring.lock);
 
        if (ret)
                ath10k_htt_rx_ring_free(htt);
 
+       spin_unlock_bh(&htt->rx_ring.lock);
+
        return ret;
 }
 
@@ -234,7 +235,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
        skb_queue_purge(&htt->rx_in_ord_compl_q);
        skb_queue_purge(&htt->tx_fetch_ind_q);
 
+       spin_lock_bh(&htt->rx_ring.lock);
        ath10k_htt_rx_ring_free(htt);
+       spin_unlock_bh(&htt->rx_ring.lock);
 
        dma_free_coherent(htt->ar->dev,
                          (htt->rx_ring.size *
index e0d00ce..5b974bb 100644 (file)
@@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
 );
 
 TRACE_EVENT(ath10k_wmi_cmd,
-       TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
-                int ret),
+       TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
 
-       TP_ARGS(ar, id, buf, buf_len, ret),
+       TP_ARGS(ar, id, buf, buf_len),
 
        TP_STRUCT__entry(
                __string(device, dev_name(ar->dev))
@@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
                __field(unsigned int, id)
                __field(size_t, buf_len)
                __dynamic_array(u8, buf, buf_len)
-               __field(int, ret)
        ),
 
        TP_fast_assign(
@@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
                __assign_str(driver, dev_driver_string(ar->dev));
                __entry->id = id;
                __entry->buf_len = buf_len;
-               __entry->ret = ret;
                memcpy(__get_dynamic_array(buf), buf, buf_len);
        ),
 
        TP_printk(
-               "%s %s id %d len %zu ret %d",
+               "%s %s id %d len %zu",
                __get_str(driver),
                __get_str(device),
                __entry->id,
-               __entry->buf_len,
-               __entry->ret
+               __entry->buf_len
        )
 );
 
index 3d323f3..4608b69 100644 (file)
@@ -1575,10 +1575,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
        bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
        ie_len = roundup(arg->ie_len, 4);
        len = (sizeof(*tlv) + sizeof(*cmd)) +
-             (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
-             (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
-             (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
-             (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+             sizeof(*tlv) + chan_len +
+             sizeof(*tlv) + ssid_len +
+             sizeof(*tlv) + bssid_len +
+             sizeof(*tlv) + ie_len;
 
        skb = ath10k_wmi_alloc_skb(ar, len);
        if (!skb)
index 938fc84..2be8e76 100644 (file)
@@ -1711,8 +1711,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
        cmd_hdr->cmd_id = __cpu_to_le32(cmd);
 
        memset(skb_cb, 0, sizeof(*skb_cb));
+       trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
        ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
-       trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
 
        if (ret)
                goto err_pull;
index 318076f..a4b177e 100644 (file)
@@ -4,6 +4,7 @@ cnss2-y := main.o
 cnss2-y += bus.o
 cnss2-y += debug.o
 cnss2-y += pci.o
+cnss2-y += usb.o
 cnss2-y += power.o
 cnss2-y += qmi.o
 cnss2-y += wlan_firmware_service_v01.o
index c0810df..17da902 100644 (file)
@@ -13,6 +13,7 @@
 #include "bus.h"
 #include "debug.h"
 #include "pci.h"
+#include "usb.h"
 
 enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
 {
@@ -36,12 +37,33 @@ enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id)
        case QCA6290_DEVICE_ID:
        case QCN7605_DEVICE_ID:
                return CNSS_BUS_PCI;
+       case QCN7605_COMPOSITE_DEVICE_ID:
+       case QCN7605_STANDALONE_DEVICE_ID:
+               return CNSS_BUS_USB;
        default:
                cnss_pr_err("Unknown device_id: 0x%lx\n", device_id);
                return CNSS_BUS_NONE;
        }
 }
 
+bool cnss_bus_req_mem_ind_valid(struct cnss_plat_data *plat_priv)
+{
+       if (cnss_get_bus_type(plat_priv->device_id) == CNSS_BUS_USB)
+               return false;
+       else
+               return true;
+}
+
+bool cnss_bus_dev_cal_rep_valid(struct cnss_plat_data *plat_priv)
+{
+       bool ret = false;
+
+       if (cnss_get_bus_type(plat_priv->device_id) == CNSS_BUS_USB)
+               ret = true;
+
+       return ret;
+}
+
 void *cnss_bus_dev_to_bus_priv(struct device *dev)
 {
        if (!dev)
@@ -69,6 +91,8 @@ struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
        switch (cnss_get_dev_bus_type(dev)) {
        case CNSS_BUS_PCI:
                return cnss_pci_priv_to_plat_priv(bus_priv);
+       case CNSS_BUS_USB:
+               return cnss_usb_priv_to_plat_priv(bus_priv);
        default:
                return NULL;
        }
@@ -82,6 +106,8 @@ int cnss_bus_init(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_init(plat_priv);
+       case CNSS_BUS_USB:
+               return cnss_usb_init(plat_priv);
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -97,6 +123,8 @@ void cnss_bus_deinit(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                cnss_pci_deinit(plat_priv);
+       case CNSS_BUS_USB:
+               cnss_usb_deinit(plat_priv);
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -212,6 +240,8 @@ int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_call_driver_probe(plat_priv->bus_priv);
+       case CNSS_BUS_USB:
+               return cnss_usb_call_driver_probe(plat_priv->bus_priv);
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -227,6 +257,8 @@ int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_call_driver_remove(plat_priv->bus_priv);
+       case CNSS_BUS_USB:
+               return cnss_usb_call_driver_remove(plat_priv->bus_priv);
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -242,6 +274,8 @@ int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_dev_powerup(plat_priv->bus_priv);
+       case CNSS_BUS_USB:
+               return 0;
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -257,6 +291,8 @@ int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_dev_shutdown(plat_priv->bus_priv);
+       case CNSS_BUS_USB:
+               return 0;
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -302,6 +338,8 @@ int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_register_driver_hdlr(plat_priv->bus_priv, data);
+       case CNSS_BUS_USB:
+               return cnss_usb_register_driver_hdlr(plat_priv->bus_priv, data);
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
@@ -317,6 +355,8 @@ int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
        switch (plat_priv->bus_type) {
        case CNSS_BUS_PCI:
                return cnss_pci_unregister_driver_hdlr(plat_priv->bus_priv);
+       case CNSS_BUS_USB:
+               return cnss_usb_unregister_driver_hdlr(plat_priv->bus_priv);
        default:
                cnss_pr_err("Unsupported bus type: %d\n",
                            plat_priv->bus_type);
index bd32a94..415f1ae 100644 (file)
 #define QCN7605_VENDOR_ID              0x17CB
 #define QCN7605_DEVICE_ID              0x1102
 
+#define QCN7605_USB_VENDOR_ID             0x05C6
+#define QCN7605_COMPOSITE_DEVICE_ID     QCN7605_COMPOSITE_PRODUCT_ID
+#define QCN7605_STANDALONE_DEVICE_ID    QCN7605_STANDALONE_PRODUCT_ID
+
+#define QCN7605_STANDALONE_PRODUCT_ID    0x9900
+#define QCN7605_COMPOSITE_PRODUCT_ID     0x9901
+
 enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev);
 enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id);
 void *cnss_bus_dev_to_bus_priv(struct device *dev);
@@ -50,4 +57,6 @@ int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv);
 int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
                                      int modem_current_status);
 int cnss_bus_recovery_update_status(struct cnss_plat_data *plat_priv);
+bool cnss_bus_req_mem_ind_valid(struct cnss_plat_data *plat_priv);
+bool cnss_bus_dev_cal_rep_valid(struct cnss_plat_data *plat_priv);
 #endif /* _CNSS_BUS_H */
index 8aa9eab..9c1b29f 100644 (file)
@@ -37,6 +37,7 @@
 #define FW_READY_TIMEOUT               20000
 #define FW_ASSERT_TIMEOUT              5000
 #define CNSS_EVENT_PENDING             2989
+#define CE_MSI_NAME                    "CE"
 
 static struct cnss_plat_data *plat_env;
 
@@ -249,7 +250,7 @@ int cnss_wlan_enable(struct device *dev,
 {
        struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
        struct wlfw_wlan_cfg_req_msg_v01 req;
-       u32 i;
+       u32 i, ce_id, num_vectors, user_base_data, base_vector;
        int ret = 0;
 
        if (plat_priv->device_id == QCA6174_DEVICE_ID)
@@ -299,6 +300,19 @@ int cnss_wlan_enable(struct device *dev,
                req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
        }
 
+       if (config->num_shadow_reg_cfg) {
+               req.shadow_reg_valid = 1;
+
+               if (config->num_shadow_reg_cfg >
+                   QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+                       req.shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+               else
+                       req.shadow_reg_len = config->num_shadow_reg_cfg;
+               memcpy(req.shadow_reg, config->shadow_reg_cfg,
+                      sizeof(struct wlfw_shadow_reg_cfg_s_v01)
+                      * req.shadow_reg_len);
+       }
+
        req.shadow_reg_v2_valid = 1;
        if (config->num_shadow_reg_v2_cfg >
            QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
@@ -310,6 +324,30 @@ int cnss_wlan_enable(struct device *dev,
               sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
               * req.shadow_reg_v2_len);
 
+       if (config->rri_over_ddr_cfg_valid) {
+               req.rri_over_ddr_cfg_valid = 1;
+               req.rri_over_ddr_cfg.base_addr_low =
+                       config->rri_over_ddr_cfg.base_addr_low;
+               req.rri_over_ddr_cfg.base_addr_high =
+                       config->rri_over_ddr_cfg.base_addr_high;
+       }
+
+       if (plat_priv->device_id == QCN7605_DEVICE_ID) {
+               ret = cnss_get_user_msi_assignment(dev, CE_MSI_NAME,
+                                                  &num_vectors,
+                                                  &user_base_data,
+                                                  &base_vector);
+               if (!ret) {
+                       req.msi_cfg_valid = 1;
+                       req.msi_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+                       for (ce_id = 0; ce_id < QMI_WLFW_MAX_NUM_CE_V01;
+                               ce_id++) {
+                               req.msi_cfg[ce_id].ce_id = ce_id;
+                               req.msi_cfg[ce_id].msi_vector =
+                                       (ce_id % num_vectors) + base_vector;
+                       }
+               }
+       }
        ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, &req);
        if (ret)
                goto out;
@@ -521,6 +559,15 @@ out:
        return ret;
 }
 
+static int cnss_cal_update_hdlr(struct cnss_plat_data *plat_priv)
+{
+       /* QCN7605 store the cal data sent by FW to calDB memory area
+        * get out of this after complete data is uploaded. FW is expected
+        * to send cal done
+       */
+       return 0;
+}
+
 static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
 {
        switch (type) {
@@ -538,6 +585,10 @@ static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
                return "COLD_BOOT_CAL_START";
        case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
                return "COLD_BOOT_CAL_DONE";
+       case CNSS_DRIVER_EVENT_CAL_UPDATE:
+               return "COLD_BOOT_CAL_DATA_UPDATE";
+       case CNSS_DRIVER_EVENT_CAL_DOWNLOAD:
+               return "COLD_BOOT_CAL_DATA_DOWNLOAD";
        case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
                return "REGISTER_DRIVER";
        case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
@@ -1089,6 +1140,30 @@ int cnss_force_fw_assert(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_force_fw_assert);
 
+static int cnss_wlfw_server_arrive_hdlr(struct cnss_plat_data *plat_priv)
+{
+       int ret;
+
+       ret = cnss_wlfw_server_arrive(plat_priv);
+       if (ret)
+               goto out;
+
+       if (!cnss_bus_req_mem_ind_valid(plat_priv)) {
+               ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
+               if (ret)
+                       goto out;
+
+               ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+               if (ret)
+                       goto out;
+               /*cnss driver sends  meta data report and waits for FW_READY*/
+               if (cnss_bus_dev_cal_rep_valid(plat_priv))
+                       ret = cnss_wlfw_cal_report_send_sync(plat_priv);
+       }
+out:
+       return ret;
+}
+
 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 {
        int ret = 0;
@@ -1108,9 +1183,9 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
        if (plat_priv->device_id == QCN7605_DEVICE_ID)
                goto skip_shutdown;
        cnss_bus_dev_shutdown(plat_priv);
+
 skip_shutdown:
        clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
-
        return 0;
 }
 
@@ -1156,7 +1231,7 @@ static void cnss_driver_event_work(struct work_struct *work)
 
                switch (event->type) {
                case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
-                       ret = cnss_wlfw_server_arrive(plat_priv);
+                       ret = cnss_wlfw_server_arrive_hdlr(plat_priv);
                        break;
                case CNSS_DRIVER_EVENT_SERVER_EXIT:
                        ret = cnss_wlfw_server_exit(plat_priv);
@@ -1176,6 +1251,9 @@ static void cnss_driver_event_work(struct work_struct *work)
                case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
                        ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
                        break;
+               case CNSS_DRIVER_EVENT_CAL_UPDATE:
+                       ret = cnss_cal_update_hdlr(plat_priv);
+                       break;
                case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
                        ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
                        break;
@@ -1240,6 +1318,8 @@ int cnss_register_subsys(struct cnss_plat_data *plat_priv)
                subsys_info->subsys_desc.name = "QCA6290";
                break;
        case QCN7605_DEVICE_ID:
+       case QCN7605_STANDALONE_DEVICE_ID:
+       case QCN7605_COMPOSITE_DEVICE_ID:
                subsys_info->subsys_desc.name = "QCN7605";
                break;
        default:
@@ -1460,6 +1540,10 @@ int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
        case QCN7605_DEVICE_ID:
                ret = cnss_register_ramdump_v2(plat_priv);
                break;
+       case QCN7605_COMPOSITE_DEVICE_ID:
+       case QCN7605_STANDALONE_DEVICE_ID:
+               break;
+
        default:
                cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
                ret = -ENODEV;
@@ -1478,6 +1562,9 @@ void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
        case QCA6290_DEVICE_ID:
                cnss_unregister_ramdump_v2(plat_priv);
                break;
+       case QCN7605_COMPOSITE_DEVICE_ID:
+       case QCN7605_STANDALONE_DEVICE_ID:
+               break;
        default:
                cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
                break;
index 9dc64e0..ec67d31 100644 (file)
@@ -33,6 +33,7 @@
 enum cnss_dev_bus_type {
        CNSS_BUS_NONE = -1,
        CNSS_BUS_PCI,
+       CNSS_BUS_USB,
 };
 
 struct cnss_vreg_info {
@@ -124,6 +125,8 @@ enum cnss_driver_event_type {
        CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
        CNSS_DRIVER_EVENT_POWER_UP,
        CNSS_DRIVER_EVENT_POWER_DOWN,
+       CNSS_DRIVER_EVENT_CAL_UPDATE,
+       CNSS_DRIVER_EVENT_CAL_DOWNLOAD,
        CNSS_DRIVER_EVENT_MAX,
 };
 
@@ -245,4 +248,5 @@ void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
 void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
 u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
 bool *cnss_get_qmi_bypass(void);
+bool is_qcn7605_device(u16 device_id);
 #endif /* _CNSS_MAIN_H */
index 1d4b5b9..427b42c 100644 (file)
@@ -1297,6 +1297,94 @@ int cnss_pm_request_resume(struct cnss_pci_data *pci_priv)
        return pm_request_resume(&pci_dev->dev);
 }
 
+#ifdef CONFIG_CNSS_QCA6390
+int cnss_pci_force_wake_request(struct device *dev)
+{
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+       struct mhi_controller *mhi_ctrl;
+
+       if (!pci_priv)
+               return -ENODEV;
+
+       if (pci_priv->device_id != QCA6390_DEVICE_ID)
+               return 0;
+
+       mhi_ctrl = pci_priv->mhi_ctrl;
+       if (!mhi_ctrl)
+               return -EINVAL;
+
+       read_lock_bh(&mhi_ctrl->pm_lock);
+       mhi_ctrl->wake_get(mhi_ctrl, true);
+       read_unlock_bh(&mhi_ctrl->pm_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+       struct mhi_controller *mhi_ctrl;
+
+       if (!pci_priv)
+               return -ENODEV;
+
+       if (pci_priv->device_id != QCA6390_DEVICE_ID)
+               return true;
+
+       mhi_ctrl = pci_priv->mhi_ctrl;
+       if (!mhi_ctrl)
+               return -EINVAL;
+
+       return mhi_ctrl->dev_state == MHI_STATE_M0 ? true : false;
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+       struct mhi_controller *mhi_ctrl;
+
+       if (!pci_priv)
+               return -ENODEV;
+
+       if (pci_priv->device_id != QCA6390_DEVICE_ID)
+               return 0;
+
+       mhi_ctrl = pci_priv->mhi_ctrl;
+       if (!mhi_ctrl)
+               return -EINVAL;
+
+       read_lock_bh(&mhi_ctrl->pm_lock);
+       mhi_ctrl->wake_put(mhi_ctrl, false);
+       read_unlock_bh(&mhi_ctrl->pm_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+#else
+int cnss_pci_force_wake_request(struct device *dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+       return true;
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+#endif
+
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
 {
        struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
index 669816c..fb28413 100644 (file)
@@ -238,6 +238,10 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
        req.fw_init_done_enable = 1;
        req.pin_connect_result_enable_valid = 1;
        req.pin_connect_result_enable = 1;
+       req.initiate_cal_download_enable_valid = 1;
+       req.initiate_cal_download_enable = 1;
+       req.initiate_cal_update_enable_valid = 1;
+       req.initiate_cal_update_enable = 1;
 
        req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
        req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
@@ -269,6 +273,13 @@ out:
        return ret;
 }
 
+static int cnss_qmi_initiate_cal_update_ind_hdlr(
+                                        struct cnss_plat_data *plat_priv,
+                                        void *msg, unsigned int msg_len)
+{
+       return 0;
+}
+
 static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
                                          void *msg, unsigned int msg_len)
 {
@@ -941,6 +952,11 @@ out:
        return ret;
 }
 
+int cnss_wlfw_cal_report_send_sync(struct cnss_plat_data *plat_priv)
+{
+       return 0;
+}
+
 static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
                               unsigned int msg_id, void *msg,
                               unsigned int msg_len, void *ind_cb_priv)
@@ -977,6 +993,9 @@ static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
        case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
                cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
                break;
+       case QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01:
+               cnss_qmi_initiate_cal_update_ind_hdlr(plat_priv, msg, msg_len);
+               break;
        default:
                cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n",
                            msg_id);
index 70d8d40..c6a1e67 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,5 +37,5 @@ int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
                                      u32 data_len, u8 *data);
 int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
                            u8 fw_log_mode);
-
+int cnss_wlfw_cal_report_send_sync(struct cnss_plat_data *plat_priv);
 #endif /* _CNSS_QMI_H */
diff --git a/drivers/net/wireless/cnss2/usb.c b/drivers/net/wireless/cnss2/usb.c
new file mode 100644 (file)
index 0000000..60d81b1
--- /dev/null
@@ -0,0 +1,350 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "main.h"
+#include "bus.h"
+#include "debug.h"
+#include "usb.h"
+
+int cnss_usb_dev_powerup(struct cnss_usb_data *usb_priv)
+{
+       int ret = 0;
+
+       if (!usb_priv) {
+               cnss_pr_err("usb_priv is NULL\n");
+               return -ENODEV;
+       }
+       return ret;
+}
+
+int cnss_usb_wlan_register_driver(struct cnss_usb_wlan_driver *driver_ops)
+{
+       int ret = 0;
+       struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+       struct cnss_usb_data *usb_priv;
+
+       if (!plat_priv) {
+               cnss_pr_err("plat_priv is NULL\n");
+               return -ENODEV;
+       }
+
+       usb_priv = plat_priv->bus_priv;
+       if (!usb_priv) {
+               cnss_pr_err("usb_priv is NULL\n");
+               return -ENODEV;
+       }
+
+       if (usb_priv->driver_ops) {
+               cnss_pr_err("Driver has already registered\n");
+               return -EEXIST;
+       }
+
+       ret = cnss_driver_event_post(plat_priv,
+                                    CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+                                    CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
+                                    driver_ops);
+       return ret;
+}
+EXPORT_SYMBOL(cnss_usb_wlan_register_driver);
+
+void cnss_usb_wlan_unregister_driver(struct cnss_usb_wlan_driver *driver_ops)
+{
+       struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+       if (!plat_priv) {
+               cnss_pr_err("plat_priv is NULL\n");
+               return;
+       }
+
+       cnss_driver_event_post(plat_priv,
+                              CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+                              CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(cnss_usb_wlan_unregister_driver);
+
+int cnss_usb_register_driver_hdlr(struct cnss_usb_data *usb_priv,
+                                 void *data)
+{
+       int ret = 0;
+       struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+       set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+       usb_priv->driver_ops = data;
+
+       ret = cnss_bus_call_driver_probe(plat_priv);
+
+       return ret;
+}
+
+int cnss_usb_unregister_driver_hdlr(struct cnss_usb_data *usb_priv)
+{
+       struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+       set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+       cnss_usb_dev_shutdown(usb_priv);
+       usb_priv->driver_ops = NULL;
+
+       return 0;
+}
+
+int cnss_usb_dev_shutdown(struct cnss_usb_data *usb_priv)
+{
+       int ret = 0;
+
+       if (!usb_priv) {
+               cnss_pr_err("usb_priv is NULL\n");
+               return -ENODEV;
+       }
+
+       switch (usb_priv->device_id) {
+       case QCN7605_COMPOSITE_DEVICE_ID:
+       case QCN7605_STANDALONE_DEVICE_ID:
+               break;
+       default:
+               cnss_pr_err("Unknown device_id found: 0x%x\n",
+                           usb_priv->device_id);
+               ret = -ENODEV;
+       }
+       return ret;
+}
+
+int cnss_usb_call_driver_probe(struct cnss_usb_data *usb_priv)
+{
+       int ret = 0;
+       struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+       if (!usb_priv->driver_ops) {
+               cnss_pr_err("driver_ops is NULL\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+           test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+               ret = usb_priv->driver_ops->reinit(usb_priv->usb_intf,
+                                                  usb_priv->usb_device_id);
+               if (ret) {
+                       cnss_pr_err("Failed to reinit host driver, err = %d\n",
+                                   ret);
+                       goto out;
+               }
+               clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+       } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+               ret = usb_priv->driver_ops->probe(usb_priv->usb_intf,
+                                                 usb_priv->usb_device_id);
+               if (ret) {
+                       cnss_pr_err("Failed to probe host driver, err = %d\n",
+                                   ret);
+                       goto out;
+               }
+               clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+               clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+               set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+       }
+
+       return 0;
+
+out:
+       return ret;
+}
+
+int cnss_usb_call_driver_remove(struct cnss_usb_data *usb_priv)
+{
+       struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+       if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+           test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+           test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+               cnss_pr_dbg("Skip driver remove\n");
+               return 0;
+       }
+
+       if (!usb_priv->driver_ops) {
+               cnss_pr_err("driver_ops is NULL\n");
+               return -EINVAL;
+       }
+
+       if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+           test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+               usb_priv->driver_ops->shutdown(usb_priv->usb_intf);
+       } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+               usb_priv->driver_ops->remove(usb_priv->usb_intf);
+               clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+       }
+
+       return 0;
+}
+
+static struct usb_driver cnss_usb_driver;
+#define QCN7605_WLAN_STANDALONE_INTERFACE_NUM  0x0000
+#define QCN7605_WLAN_COMPOSITE_INTERFACE_NUM   0x0002
+
+static int cnss_usb_probe(struct usb_interface *interface,
+                         const struct usb_device_id *id)
+{
+       int ret = 0;
+       struct cnss_usb_data *usb_priv;
+       struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+       struct usb_device *usb_dev;
+       unsigned short bcd_device;
+
+       cnss_pr_dbg("USB probe, vendor ID: 0x%x, product ID: 0x%x\n",
+                   id->idVendor, id->idProduct);
+
+       usb_dev = interface_to_usbdev(interface);
+       usb_priv = devm_kzalloc(&usb_dev->dev, sizeof(*usb_priv),
+                               GFP_KERNEL);
+       if (!usb_priv) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       bcd_device = le16_to_cpu(usb_dev->descriptor.bcdDevice);
+       usb_priv->plat_priv = plat_priv;
+       usb_priv->usb_intf = interface;
+       usb_priv->usb_device_id = id;
+       usb_priv->device_id = id->idProduct;
+       usb_priv->target_version = bcd_device;
+       cnss_set_usb_priv(interface, usb_priv);
+       plat_priv->device_id = usb_priv->device_id;
+       plat_priv->bus_priv = usb_priv;
+
+       /*increment the ref count of usb dev structure*/
+       usb_get_dev(usb_dev);
+
+       ret = cnss_register_subsys(plat_priv);
+       if (ret)
+               goto reset_ctx;
+
+       ret = cnss_register_ramdump(plat_priv);
+       if (ret)
+               goto unregister_subsys;
+
+       switch (usb_priv->device_id) {
+       case QCN7605_COMPOSITE_DEVICE_ID:
+       case QCN7605_STANDALONE_DEVICE_ID:
+               break;
+       default:
+               cnss_pr_err("Unknown USB device found: 0x%x\n",
+                           usb_priv->device_id);
+               ret = -ENODEV;
+               goto unregister_ramdump;
+       }
+
+       return 0;
+
+unregister_ramdump:
+       cnss_unregister_ramdump(plat_priv);
+unregister_subsys:
+       cnss_unregister_subsys(plat_priv);
+reset_ctx:
+       plat_priv->bus_priv = NULL;
+       devm_kfree(&usb_dev->dev, usb_priv);
+out:
+       return ret;
+}
+
+static void cnss_usb_remove(struct usb_interface *interface)
+{
+       struct usb_device *usb_dev;
+       struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+       struct cnss_usb_data *usb_priv = plat_priv->bus_priv;
+
+       usb_priv->plat_priv = NULL;
+       plat_priv->bus_priv = NULL;
+       usb_dev = interface_to_usbdev(interface);
+       usb_put_dev(usb_dev);
+       devm_kfree(&usb_dev->dev, usb_priv);
+}
+
+static int cnss_usb_suspend(struct usb_interface *interface, pm_message_t state)
+{
+       int ret = 0;
+       struct cnss_usb_data *usb_priv;
+       struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+       usb_priv = plat_priv->bus_priv;
+       if (!usb_priv->driver_ops) {
+               cnss_pr_err("driver_ops is NULL\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       ret = usb_priv->driver_ops->suspend(usb_priv->usb_intf,
+                                                 state);
+out:
+       return ret;
+}
+
+static int cnss_usb_resume(struct usb_interface *interface)
+{
+       int ret = 0;
+       struct cnss_usb_data *usb_priv;
+       struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+       usb_priv = plat_priv->bus_priv;
+       if (!usb_priv->driver_ops) {
+               cnss_pr_err("driver_ops is NULL\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       ret = usb_priv->driver_ops->resume(usb_priv->usb_intf);
+
+out:
+       return ret;
+}
+
+static int cnss_usb_reset_resume(struct usb_interface *interface)
+{
+       return 0;
+}
+
+static struct usb_device_id cnss_usb_id_table[] = {
+       { USB_DEVICE_INTERFACE_NUMBER(QCN7605_USB_VENDOR_ID,
+                                     QCN7605_COMPOSITE_PRODUCT_ID,
+                                     QCN7605_WLAN_COMPOSITE_INTERFACE_NUM) },
+       { USB_DEVICE_INTERFACE_NUMBER(QCN7605_USB_VENDOR_ID,
+                                     QCN7605_STANDALONE_PRODUCT_ID,
+                                     QCN7605_WLAN_STANDALONE_INTERFACE_NUM) },
+       {}                      /* Terminating entry */
+};
+
+static struct usb_driver cnss_usb_driver = {
+       .name       = "cnss_usb",
+       .id_table   = cnss_usb_id_table,
+       .probe      = cnss_usb_probe,
+       .disconnect = cnss_usb_remove,
+       .suspend    = cnss_usb_suspend,
+       .resume     = cnss_usb_resume,
+       .reset_resume = cnss_usb_reset_resume,
+       .supports_autosuspend = true,
+};
+
+int cnss_usb_init(struct cnss_plat_data *plat_priv)
+{
+       int ret = 0;
+
+       ret = usb_register(&cnss_usb_driver);
+       if (ret) {
+               cnss_pr_err("Failed to register to Linux USB framework, err = %d\n",
+                           ret);
+               goto out;
+       }
+
+       return 0;
+out:
+       return ret;
+}
+
+void cnss_usb_deinit(struct cnss_plat_data *plat_priv)
+{
+       usb_deregister(&cnss_usb_driver);
+}
diff --git a/drivers/net/wireless/cnss2/usb.h b/drivers/net/wireless/cnss2/usb.h
new file mode 100644 (file)
index 0000000..b285dc2
--- /dev/null
@@ -0,0 +1,66 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_USB_H
+#define _CNSS_USB_H
+
+#include <linux/usb.h>
+
+#include "main.h"
+
+struct cnss_usb_data {
+       struct usb_interface *usb_intf;
+       struct cnss_plat_data *plat_priv;
+       const struct usb_device_id *usb_device_id;
+       u16 device_id; /*USB PID*/
+       u16 target_version; /* [QCN7605] - from bcdDevice*/
+       struct cnss_usb_wlan_driver *driver_ops;
+};
+
+static inline void cnss_set_usb_priv(struct usb_interface *usb_intf, void *data)
+{
+       usb_set_intfdata(usb_intf, data);
+}
+
+static inline struct cnss_usb_data *cnss_get_usb_priv(struct usb_interface
+                                                     *usb_intf)
+{
+       return usb_get_intfdata(usb_intf);
+}
+
+static inline struct cnss_plat_data *cnss_usb_priv_to_plat_priv(void *bus_priv)
+{
+       struct cnss_usb_data *usb_priv = bus_priv;
+
+       return usb_priv->plat_priv;
+}
+
+int cnss_usb_init(struct cnss_plat_data *plat_priv);
+void cnss_usb_deinit(struct cnss_plat_data *plat_priv);
+void cnss_usb_collect_dump_info(struct cnss_usb_data *usb_priv, bool in_panic);
+void cnss_usb_clear_dump_info(struct cnss_usb_data *usb_priv);
+int cnss_usb_force_fw_assert_hdlr(struct cnss_usb_data *usb_priv);
+void cnss_usb_fw_boot_timeout_hdlr(struct cnss_usb_data *usb_priv);
+int cnss_usb_call_driver_probe(struct cnss_usb_data *usb_priv);
+int cnss_usb_call_driver_remove(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_powerup(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_shutdown(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_crash_shutdown(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_ramdump(struct cnss_usb_data *usb_priv);
+
+int cnss_usb_register_driver_hdlr(struct cnss_usb_data *usb_priv, void *data);
+
+int cnss_usb_unregister_driver_hdlr(struct cnss_usb_data *usb_priv);
+int cnss_usb_call_driver_modem_status(struct cnss_usb_data *usb_priv,
+                                     int modem_current_status);
+
+#endif /* _CNSS_USB_H */
index bbf707b..be66fd6 100644 (file)
@@ -144,6 +144,60 @@ static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
        },
 };
 
+static struct elem_info wlfw_rri_over_ddr_cfg_s_v01_ei[] = {
+       {
+                       .data_type      = QMI_UNSIGNED_4_BYTE,
+                       .elem_len       = 1,
+                       .elem_size      = sizeof(u32),
+                       .is_array       = NO_ARRAY,
+                       .tlv_type       = 0,
+                       .offset         = offsetof(struct
+                                                  wlfw_rri_over_ddr_cfg_s_v01,
+                                                  base_addr_low),
+       },
+       {
+                       .data_type      = QMI_UNSIGNED_4_BYTE,
+                       .elem_len       = 1,
+                       .elem_size      = sizeof(u32),
+                       .is_array       = NO_ARRAY,
+                       .tlv_type       = 0,
+                       .offset         = offsetof(struct
+                                                  wlfw_rri_over_ddr_cfg_s_v01,
+                                                  base_addr_high),
+       },
+       {
+                       .data_type      = QMI_EOTI,
+                       .is_array       = NO_ARRAY,
+                       .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
+static struct elem_info wlfw_msi_cfg_s_v01_ei[] = {
+       {
+                       .data_type      = QMI_UNSIGNED_2_BYTE,
+                       .elem_len       = 1,
+                       .elem_size      = sizeof(u16),
+                       .is_array       = NO_ARRAY,
+                       .tlv_type       = 0,
+                       .offset         = offsetof(struct wlfw_msi_cfg_s_v01,
+                                                  ce_id),
+       },
+       {
+                       .data_type      = QMI_UNSIGNED_2_BYTE,
+                       .elem_len       = 1,
+                       .elem_size      = sizeof(u16),
+                       .is_array       = NO_ARRAY,
+                       .tlv_type       = 0,
+                       .offset         = offsetof(struct wlfw_msi_cfg_s_v01,
+                                                  msi_vector),
+       },
+       {
+                       .data_type      = QMI_EOTI,
+                       .is_array       = NO_ARRAY,
+                       .tlv_type       = QMI_COMMON_TLV_TYPE,
+       },
+};
+
 static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
        {
                .data_type      = QMI_UNSIGNED_8_BYTE,
@@ -922,6 +976,53 @@ struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
                .ei_array      = wlfw_shadow_reg_v2_cfg_s_v01_ei,
        },
        {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+                                          rri_over_ddr_cfg_valid),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = 1,
+               .elem_size      = sizeof(struct wlfw_rri_over_ddr_cfg_s_v01),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+                                          rri_over_ddr_cfg),
+               .ei_array      = wlfw_rri_over_ddr_cfg_s_v01_ei,
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+                                          msi_cfg_valid),
+       },
+       {
+               .data_type      = QMI_DATA_LEN,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+                                          msi_cfg_len),
+       },
+       {
+               .data_type      = QMI_STRUCT,
+               .elem_len       = QMI_WLFW_MAX_NUM_CE_V01,
+               .elem_size      = sizeof(struct wlfw_msi_cfg_s_v01),
+               .is_array       = VAR_LEN_ARRAY,
+               .tlv_type       = 0x16,
+               .offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+                                          msi_cfg),
+               .ei_array      = wlfw_msi_cfg_s_v01_ei,
+       },
+       {
                .data_type      = QMI_EOTI,
                .is_array       = NO_ARRAY,
                .tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -1311,6 +1412,46 @@ struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
                                  cal_id),
        },
        {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(
+                                 struct wlfw_initiate_cal_download_ind_msg_v01,
+                                 total_size_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u32),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(
+                                 struct wlfw_initiate_cal_download_ind_msg_v01,
+                                 total_size),
+       },
+       {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(
+                                 struct wlfw_initiate_cal_download_ind_msg_v01,
+                                 cal_data_location_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u32),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x11,
+               .offset         = offsetof(
+                                 struct wlfw_initiate_cal_download_ind_msg_v01,
+                                 cal_data_location),
+       },
+       {
                .data_type      = QMI_EOTI,
                .is_array       = NO_ARRAY,
                .tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -1427,6 +1568,24 @@ struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
                                           end),
        },
        {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+                                          cal_data_location_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u32),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+                                          cal_data_location),
+       },
+       {
                .data_type      = QMI_EOTI,
                .is_array       = NO_ARRAY,
                .tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -1474,6 +1633,26 @@ struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
                                           total_size),
        },
        {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct
+                                          wlfw_initiate_cal_update_ind_msg_v01,
+                                          cal_data_location_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u32),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x10,
+               .offset         = offsetof(struct
+                                          wlfw_initiate_cal_update_ind_msg_v01,
+                                          cal_data_location),
+       },
+       {
                .data_type      = QMI_EOTI,
                .is_array       = NO_ARRAY,
                .tlv_type       = QMI_COMMON_TLV_TYPE,
@@ -1617,6 +1796,24 @@ struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
                                           end),
        },
        {
+               .data_type      = QMI_OPT_FLAG,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u8),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+                                          cal_data_location_valid),
+       },
+       {
+               .data_type      = QMI_UNSIGNED_4_BYTE,
+               .elem_len       = 1,
+               .elem_size      = sizeof(u32),
+               .is_array       = NO_ARRAY,
+               .tlv_type       = 0x15,
+               .offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+                                          cal_data_location),
+       },
+       {
                .data_type      = QMI_EOTI,
                .is_array       = NO_ARRAY,
                .tlv_type       = QMI_COMMON_TLV_TYPE,
index 00a873d..c264373 100644 (file)
@@ -170,6 +170,16 @@ struct wlfw_shadow_reg_v2_cfg_s_v01 {
        u32 addr;
 };
 
+struct wlfw_rri_over_ddr_cfg_s_v01 {
+       u32 base_addr_low;
+       u32 base_addr_high;
+};
+
+struct wlfw_msi_cfg_s_v01 {
+       u16 ce_id;
+       u16 msi_vector;
+};
+
 struct wlfw_memory_region_info_s_v01 {
        u64 region_addr;
        u32 size;
@@ -312,6 +322,11 @@ struct wlfw_wlan_cfg_req_msg_v01 {
        u32 shadow_reg_v2_len;
        struct wlfw_shadow_reg_v2_cfg_s_v01
                shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+       u8 rri_over_ddr_cfg_valid;
+       struct wlfw_rri_over_ddr_cfg_s_v01 rri_over_ddr_cfg;
+       u8 msi_cfg_valid;
+       u32 msi_cfg_len;
+       struct wlfw_msi_cfg_s_v01 msi_cfg[QMI_WLFW_MAX_NUM_CE_V01];
 };
 
 #define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
@@ -396,9 +411,13 @@ extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
 
 struct wlfw_initiate_cal_download_ind_msg_v01 {
        enum wlfw_cal_temp_id_enum_v01 cal_id;
+       u8 total_size_valid;
+       u32 total_size;
+       u8 cal_data_location_valid;
+       u32 cal_data_location;
 };
 
-#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 21
 extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
 
 struct wlfw_cal_download_req_msg_v01 {
@@ -414,9 +433,11 @@ struct wlfw_cal_download_req_msg_v01 {
        u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
        u8 end_valid;
        u8 end;
+       u8 cal_data_location_valid;
+       u32 cal_data_location;
 };
 
-#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6185
 extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
 
 struct wlfw_cal_download_resp_msg_v01 {
@@ -429,9 +450,11 @@ extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
 struct wlfw_initiate_cal_update_ind_msg_v01 {
        enum wlfw_cal_temp_id_enum_v01 cal_id;
        u32 total_size;
+       u8 cal_data_location_valid;
+       u32 cal_data_location;
 };
 
-#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 21
 extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
 
 struct wlfw_cal_update_req_msg_v01 {
@@ -455,9 +478,11 @@ struct wlfw_cal_update_resp_msg_v01 {
        u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
        u8 end_valid;
        u8 end;
+       u8 cal_data_location_valid;
+       u32 cal_data_location;
 };
 
-#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6188
 extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
 
 struct wlfw_msa_info_req_msg_v01 {
index e8b770a..c98cb96 100644 (file)
@@ -2453,9 +2453,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                IEEE80211_VHT_CAP_SHORT_GI_80 |
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                                IEEE80211_VHT_CAP_TXSTBC |
-                               IEEE80211_VHT_CAP_RXSTBC_1 |
-                               IEEE80211_VHT_CAP_RXSTBC_2 |
-                               IEEE80211_VHT_CAP_RXSTBC_3 |
                                IEEE80211_VHT_CAP_RXSTBC_4 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
                        sband->vht_cap.vht_mcs.rx_mcs_map =
index 2595900..c76e0cf 100644 (file)
@@ -2919,6 +2919,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
 
        while (buflen >= sizeof(*auth_req)) {
                auth_req = (void *)buf;
+               if (buflen < le32_to_cpu(auth_req->length))
+                       return;
                type = "unknown";
                flags = le32_to_cpu(auth_req->flags);
                pairwise_error = false;
index f01d24b..15dc7a3 100644 (file)
@@ -35,6 +35,7 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
+#include "ps.h"
 #include "tx.h"
 #include "hw_ops.h"
 
@@ -191,6 +192,10 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
 
        timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
 
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               return ret;
+
        do {
                if (time_after(jiffies, timeout_time)) {
                        wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +227,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
        } while (!event);
 
 out:
+       wl1271_ps_elp_sleep(wl);
        kfree(events_vector);
        return ret;
 }
index 13ae5c3..b97e550 100644 (file)
@@ -2368,6 +2368,12 @@ static void wcnss_nvbin_dnld(void)
                goto out;
        }
 
+       if (nv->size <= 4) {
+               pr_err("wcnss: %s: request_firmware failed for %s (file size = %zu)\n",
+                      __func__, NVBIN_FILE, nv->size);
+               goto out;
+       }
+
        /* First 4 bytes in nv blob is validity bitmap.
         * We cannot validate nv, so skip those 4 bytes.
         */
index 3270b43..0a4bd73 100644 (file)
@@ -892,7 +892,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        BUG_ON(pull_to <= skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
-               BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+               if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+                       queue->rx.rsp_cons = ++cons;
+                       kfree_skb(nskb);
+                       return ~0U;
+               }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                skb_frag_page(nfrag),
@@ -1029,6 +1033,8 @@ err:
                skb->len += rx->status;
 
                i = xennet_fill_frags(queue, skb, &tmpq);
+               if (unlikely(i == ~0U))
+                       goto err;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
                        skb->ip_summed = CHECKSUM_PARTIAL;
index 2a547ca..2eac3df 100644 (file)
@@ -553,6 +553,9 @@ static void __init of_unittest_parse_interrupts(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -627,6 +630,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -778,15 +784,19 @@ static void __init of_unittest_platform_populate(void)
        pdev = of_find_device_by_node(np);
        unittest(pdev, "device 1 creation failed\n");
 
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
-
-       /* Test that a parsing failure does not return -EPROBE_DEFER */
-       np = of_find_node_by_path("/testcase-data/testcase-device2");
-       pdev = of_find_device_by_node(np);
-       unittest(pdev, "device 2 creation failed\n");
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+       if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq == -EPROBE_DEFER,
+                        "device deferred probe failed - %d\n", irq);
+
+               /* Test that a parsing failure does not return -EPROBE_DEFER */
+               np = of_find_node_by_path("/testcase-data/testcase-device2");
+               pdev = of_find_device_by_node(np);
+               unittest(pdev, "device 2 creation failed\n");
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq < 0 && irq != -EPROBE_DEFER,
+                        "device parsing error failed - %d\n", irq);
+       }
 
        np = of_find_node_by_path("/testcase-data/platform-tests");
        unittest(np, "No testcase data in device tree\n");
index 295bf14..5073ab0 100644 (file)
@@ -1064,12 +1064,12 @@ int pci_save_state(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-                                    u32 saved_val, int retry)
+                                    u32 saved_val, int retry, bool force)
 {
        u32 val;
 
        pci_read_config_dword(pdev, offset, &val);
-       if (val == saved_val)
+       if (!force && val == saved_val)
                return;
 
        for (;;) {
@@ -1088,25 +1088,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-                                          int start, int end, int retry)
+                                          int start, int end, int retry,
+                                          bool force)
 {
        int index;
 
        for (index = end; index >= start; index--)
                pci_restore_config_dword(pdev, 4 * index,
                                         pdev->saved_config_space[index],
-                                        retry);
+                                        retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-               pci_restore_config_space_range(pdev, 10, 15, 0);
+               pci_restore_config_space_range(pdev, 10, 15, 0, false);
                /* Restore BARs before the command register. */
-               pci_restore_config_space_range(pdev, 4, 9, 10);
-               pci_restore_config_space_range(pdev, 0, 3, 0);
+               pci_restore_config_space_range(pdev, 4, 9, 10, false);
+               pci_restore_config_space_range(pdev, 0, 3, 0, false);
+       } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+               pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+               /*
+                * Force rewriting of prefetch registers to avoid S3 resume
+                * issues on Intel PCI bridges that occur when these
+                * registers are not explicitly written.
+                */
+               pci_restore_config_space_range(pdev, 9, 11, 0, true);
+               pci_restore_config_space_range(pdev, 0, 8, 0, false);
        } else {
-               pci_restore_config_space_range(pdev, 0, 15, 0);
+               pci_restore_config_space_range(pdev, 0, 15, 0, false);
        }
 }
 
index d9f8912..f1e27fd 100644 (file)
 
 #define IPA_SPS_PROD_TIMEOUT_MSEC 100
 
+#define EP_EMPTY_MAX_RETRY 5
+#define IPA_BAM_REG_MAP_SIZE 4
+#define IPA_BAM_REG_N_OFST 0x1000
+
+
 #ifdef CONFIG_COMPAT
 #define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
                                        IPA_IOCTL_ADD_HDR, \
@@ -2238,6 +2243,8 @@ int ipa_apps_shutdown_cleanup(void)
 
        ipa2_apps_shutdown_apps_ep_reset();
 
+       iounmap_non_ap_bam_regs();
+
        IPA_ACTIVE_CLIENTS_DEC_SPECIAL("APPS_SHUTDOWN");
 
        return 0;
@@ -2293,6 +2300,205 @@ int ipa_q6_pre_shutdown_cleanup(void)
        return 0;
 }
 
+static void __iomem *ioremap_sw_desc_ofst_bam_register(int ep_idx)
+{
+       int ep_ofst = IPA_BAM_REG_N_OFST * ep_idx;
+
+       return ioremap(ipa_ctx->ipa_wrapper_base
+               + IPA_BAM_REG_BASE_OFST
+               + IPA_BAM_SW_DESC_OFST
+               + ep_ofst,
+               IPA_BAM_REG_MAP_SIZE);
+}
+
+static void __iomem *ioremap_peer_desc_ofst_bam_register(int ep_idx)
+{
+       int ep_ofst = IPA_BAM_REG_N_OFST * ep_idx;
+
+       return ioremap(ipa_ctx->ipa_wrapper_base
+               + IPA_BAM_REG_BASE_OFST
+               + IPA_BAM_PEER_DESC_OFST
+               + ep_ofst,
+               IPA_BAM_REG_MAP_SIZE);
+}
+
+/**
+* ioremap_non_ap_bam_regs() -
+       perform ioremap of non-apps eps
+       bam sw_ofsts and evnt_ring
+       register.
+       Present only Q6 ep's are done.
+*
+* Return codes:
+* 0: success
+* non-Zero: In case of memory failure
+*/
+int ioremap_non_ap_bam_regs(void)
+{
+       int client_idx;
+       int ep_idx;
+
+       if (!ipa_ctx) {
+               IPAERR("IPA driver init not done\n");
+               return -ENODEV;
+       }
+
+       for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+               if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+                       IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
+                       IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
+                       IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
+
+                       ep_idx = ipa2_get_ep_mapping(client_idx);
+
+                       if (ep_idx == -1)
+                               continue;
+
+                       ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx] =
+                               ioremap_sw_desc_ofst_bam_register(ep_idx);
+                       ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx] =
+                               ioremap_peer_desc_ofst_bam_register(ep_idx);
+
+                       if (!ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx] ||
+                               !ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]) {
+                               IPAERR("IOREMA Failure @ ep %d\n", ep_idx);
+                               return -ENOMEM;
+                       }
+               }
+               return 0;
+}
+
+/**
+* iounmap_non_ap_bam_regs() -
+       unmap the ioremapped addr of
+       non-apps ep bam sw_ofsts and
+       evnt_ring register.
+*/
+void iounmap_non_ap_bam_regs(void)
+{
+       int client_idx;
+       int ep_idx;
+
+       if (!ipa_ctx) {
+               IPAERR("IPA driver init not done\n");
+               return;
+       }
+
+       for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+               if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+                       IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
+                       IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
+                       IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
+
+                       ep_idx = ipa2_get_ep_mapping(client_idx);
+
+                       if (ep_idx == -1)
+                               continue;
+
+                       if (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx])
+                               iounmap
+                               (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx]);
+                       if (ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx])
+                               iounmap
+                               (ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]);
+               }
+}
+
+/**
+* wait_for_ep_empty() - Wait for sps bam empty
+*
+* @client: ipa client to check for empty
+*
+* Return codes:
+* 0: success upon ep empty
+* non-Zero: Failure if ep non-empty
+*/
+
+int wait_for_ep_empty(enum ipa_client_type client)
+{
+       struct ipa_ep_context *ep = NULL;
+       u32 is_ep_empty = 0;
+       int ret = 0;
+       union ipa_bam_sw_peer_desc read_sw_desc;
+       union ipa_bam_sw_peer_desc read_peer_desc;
+       u32 retry = EP_EMPTY_MAX_RETRY;
+       int ep_idx = ipa2_get_ep_mapping(client);
+
+       if (ep_idx == -1)
+               return -ENODEV;
+
+       ep = &ipa_ctx->ep[ep_idx];
+
+check_ap_ep_empty:
+       if (ep->valid) {
+               ret = sps_is_pipe_empty(ep->ep_hdl, &is_ep_empty);
+               if (ret && retry--) {
+                       usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+                               IPA_UC_WAII_MAX_SLEEP);
+                       goto check_ap_ep_empty;
+               } else {
+                       IPAERR("Ep %d is non-empty even after retries\n",
+                               ep_idx);
+                       ret = -1;
+               }
+       } else {
+               /* Might be Q6 ep, which is non-AP ep */
+
+check_non_ap_ep_empty:
+               IPADBG("request is for non-Apps ep %d\n", ep_idx);
+
+               if (IPA_CLIENT_IS_CONS(client)) {
+                       /*
+                        * Do not wait for empty in client CONS ep's
+                        * It is software responsibility
+                        * to set sus/holb on client cons ep
+                        * and ipa would be empty due to that.
+                       */
+                       ret = 0;
+                       goto success;
+               }
+
+               if (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx] &&
+                       ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]) {
+
+                       read_sw_desc.read_reg =
+                       ioread32
+                       (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx]);
+                       read_peer_desc.read_reg =
+                       ioread32
+                       (ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]);
+
+                       IPADBG("sw_desc reg 0x%x\n",
+                               read_sw_desc.read_reg);
+                       IPADBG("sw_dsc_ofst = 0x%x\n",
+                               read_sw_desc.sw_desc.sw_dsc_ofst);
+                       IPADBG("sw_desc reg 0x%x\n",
+                               read_peer_desc.read_reg);
+                       IPADBG("p_dsc_fifo_peer_ofst = 0x%x\n",
+                       read_peer_desc.peer_desc.p_dsc_fifo_peer_ofst);
+
+                       if (read_sw_desc.sw_desc.sw_dsc_ofst ==
+                               read_peer_desc.peer_desc.p_dsc_fifo_peer_ofst) {
+                               IPADBG("EP %d is empty\n", ep_idx);
+                               ret = 0;
+                       } else if (retry) {
+                               retry--;
+                               usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+                               IPA_UC_WAII_MAX_SLEEP);
+                               goto check_non_ap_ep_empty;
+                       } else {
+                               IPAERR
+                               ("Ep %d is non-empty even after retries\n",
+                               ep_idx);
+                               ret = -1;
+                       }
+               }
+       }
+
+success:
+       return ret;
+}
+
 /**
 * ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes
 *                    in IPA HW after modem shutdown. This is performed
@@ -2331,7 +2537,22 @@ int ipa_q6_post_shutdown_cleanup(void)
                        IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
                        IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
                        IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
-                       res = ipa_uc_reset_pipe(client_idx);
+
+                       if (ipa_ctx->is_apps_shutdown_support &&
+                               (ipa2_get_ep_mapping(client_idx) != -1)) {
+                               /*
+                                * Check  for Q6 ep empty
+                                * before issue a reset
+                                */
+                               res = wait_for_ep_empty(client_idx);
+                               if (res)
+                                       IPAERR("ep %d not empty\n",
+                                       ipa2_get_ep_mapping(client_idx));
+                               else
+                                       res = ipa_uc_reset_pipe(client_idx);
+                       } else {
+                               res = ipa_uc_reset_pipe(client_idx);
+                       }
                        if (res)
                                BUG();
                }
@@ -4514,6 +4735,15 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
 
        ipa_register_panic_hdlr();
 
+       if (ipa_ctx->is_apps_shutdown_support) {
+               result = ioremap_non_ap_bam_regs();
+               if (result) {
+                       IPAERR(":IOREMAP Failed (%d)\n", result);
+                       goto fail_add_interrupt_handler;
+               } else {
+                       IPAERR(":IOREMAP success (%d)\n", result);
+               }
+       }
        pr_info("IPA driver initialization was successful.\n");
 
        return 0;
index 1d34564..1c9eeb5 100644 (file)
@@ -1105,6 +1105,8 @@ struct ipa_context {
        struct cdev cdev;
        unsigned long bam_handle;
        struct ipa_ep_context ep[IPA_MAX_NUM_PIPES];
+       void __iomem *ipa_non_ap_bam_s_desc_iova[IPA_MAX_NUM_PIPES];
+       void __iomem *ipa_non_ap_bam_p_desc_iova[IPA_MAX_NUM_PIPES];
        bool skip_ep_cfg_shadow[IPA_MAX_NUM_PIPES];
        bool resume_on_connect[IPA_CLIENT_MAX];
        struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
@@ -1900,6 +1902,9 @@ int ipa_q6_pre_shutdown_cleanup(void);
 int ipa_apps_shutdown_cleanup(void);
 int register_ipa_platform_cb(int (*cb)(void));
 int ipa_q6_post_shutdown_cleanup(void);
+int wait_for_ep_empty(enum ipa_client_type client);
+int ioremap_non_ap_bam_regs(void);
+void iounmap_non_ap_bam_regs(void);
 int ipa_init_q6_smem(void);
 int ipa_q6_monitor_holb_mitigation(bool enable);
 
index 7cddbf8..a7cdf69 100644 (file)
@@ -35,6 +35,13 @@ enum nat_table_type {
 #define NAT_TABLE_ENTRY_SIZE_BYTE 32
 #define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
 
+/*
+ * Max NAT table entries is limited 1000 entries.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 1000
+#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE)
+
 static int ipa_nat_vma_fault_remap(
         struct vm_area_struct *vma, struct vm_fault *vmf)
 {
@@ -270,6 +277,13 @@ int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
                goto bail;
        }
 
+       if (mem->size > MAX_ALLOC_NAT_SIZE) {
+               IPAERR("Trying allocate more size = %zu, Max allowed = %d\n",
+                               mem->size, MAX_ALLOC_NAT_SIZE);
+               result = -EPERM;
+               goto bail;
+       }
+
        if (mem->size <= 0 ||
                        nat_ctx->is_dev_init == true) {
                IPAERR_RL("Invalid Parameters or device is already init\n");
index 33e42ae..19aa1d1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -93,6 +93,9 @@
  */
 #define IPA_ENABLED_PIPES_OFST 0x000005DC
 #define IPA_YELLOW_MARKER_SYS_CFG_OFST 0x00000728
+#define IPA_BAM_SW_DESC_OFST 0x00013800
+#define IPA_BAM_PEER_DESC_OFST 0x00013818
+
 /*
  * End of IPA 2.6/2.6L Registers
  */
index 5bab6d0..4fea125 100644 (file)
@@ -1342,6 +1342,8 @@ int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only)
        struct ipa_rt_entry *rule_next;
        struct ipa_rt_tbl_set *rset;
        u32 apps_start_idx;
+       struct ipa_hdr_entry *hdr_entry;
+       struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
        int id;
        bool tbl_user = false;
 
@@ -1395,6 +1397,27 @@ int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only)
                        if (!user_only ||
                                rule->ipacm_installed) {
                                list_del(&rule->link);
+                               if (rule->hdr) {
+                                       hdr_entry = ipa_id_find(
+                                               rule->rule.hdr_hdl);
+                                       if (!hdr_entry ||
+                                       hdr_entry->cookie != IPA_HDR_COOKIE) {
+                                               IPAERR_RL(
+                                               "Header already deleted\n");
+                                               return -EINVAL;
+                                       }
+                               } else if (rule->proc_ctx) {
+                                       hdr_proc_entry =
+                                               ipa_id_find(
+                                               rule->rule.hdr_proc_ctx_hdl);
+                                       if (!hdr_proc_entry ||
+                                               hdr_proc_entry->cookie !=
+                                                       IPA_PROC_HDR_COOKIE) {
+                                       IPAERR_RL(
+                                               "Proc entry already deleted\n");
+                                               return -EINVAL;
+                                       }
+                               }
                                tbl->rule_cnt--;
                                if (rule->hdr)
                                        __ipa_release_hdr(rule->hdr->id);
index 17e4cae..0b52acd 100644 (file)
@@ -34,6 +34,13 @@ enum nat_table_type {
 #define NAT_TABLE_ENTRY_SIZE_BYTE 32
 #define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
 
+/*
+ * Max NAT table entries is limited 1000 entries.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 1000
+#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE)
+
 static int ipa3_nat_vma_fault_remap(
         struct vm_area_struct *vma, struct vm_fault *vmf)
 {
@@ -272,6 +279,13 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
                goto bail;
        }
 
+       if (mem->size > MAX_ALLOC_NAT_SIZE) {
+               IPAERR("Trying allocate more size = %zu, Max allowed = %d\n",
+                               mem->size, MAX_ALLOC_NAT_SIZE);
+               result = -EPERM;
+               goto bail;
+       }
+
        if (mem->size <= 0 ||
                        nat_ctx->is_dev_init == true) {
                IPAERR_RL("Invalid Parameters or device is already init\n");
index 8f6024c..2568430 100644 (file)
@@ -1486,6 +1486,8 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
        struct ipa3_rt_entry *rule;
        struct ipa3_rt_entry *rule_next;
        struct ipa3_rt_tbl_set *rset;
+       struct ipa3_hdr_entry *hdr_entry;
+       struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
        u32 apps_start_idx;
        int id;
        bool tbl_user = false;
@@ -1539,6 +1541,27 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
                        if (!user_only ||
                                rule->ipacm_installed) {
                                list_del(&rule->link);
+                               if (rule->hdr) {
+                                       hdr_entry = ipa3_id_find(
+                                                       rule->rule.hdr_hdl);
+                                       if (!hdr_entry ||
+                                       hdr_entry->cookie != IPA_HDR_COOKIE) {
+                                               IPAERR_RL(
+                                               "Header already deleted\n");
+                                               return -EINVAL;
+                                       }
+                               } else if (rule->proc_ctx) {
+                                       hdr_proc_entry =
+                                               ipa3_id_find(
+                                               rule->rule.hdr_proc_ctx_hdl);
+                                       if (!hdr_proc_entry ||
+                                               hdr_proc_entry->cookie !=
+                                                       IPA_PROC_HDR_COOKIE) {
+                                               IPAERR_RL(
+                                               "Proc entry already deleted\n");
+                                               return -EINVAL;
+                                       }
+                               }
                                tbl->rule_cnt--;
                                if (rule->hdr)
                                        __ipa3_release_hdr(rule->hdr->id);
@@ -1546,7 +1569,9 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
                                        __ipa3_release_hdr_proc_ctx(
                                                rule->proc_ctx->id);
                                rule->cookie = 0;
-                               idr_remove(&tbl->rule_ids, rule->rule_id);
+                               if (!rule->rule_id_valid)
+                                       idr_remove(&tbl->rule_ids,
+                                               rule->rule_id);
                                id = rule->id;
                                kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
 
index 1e1e594..3df47c1 100644 (file)
@@ -463,6 +463,7 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
                if (obj && obj->type == ACPI_TYPE_INTEGER)
                        *out_data = (u32) obj->integer.value;
        }
+       kfree(output.pointer);
        return status;
 
 }
index 6a9bf70..ccb6196 100644 (file)
@@ -35,6 +35,7 @@ static void vexpress_reset_do(struct device *dev, const char *what)
 }
 
 static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
 
 static void vexpress_power_off(void)
 {
@@ -99,10 +100,13 @@ static int _vexpress_register_restart_handler(struct device *dev)
        int err;
 
        vexpress_restart_device = dev;
-       err = register_restart_handler(&vexpress_restart_nb);
-       if (err) {
-               dev_err(dev, "cannot register restart handler (err=%d)\n", err);
-               return err;
+       if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+               err = register_restart_handler(&vexpress_restart_nb);
+               if (err) {
+                       dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+                       atomic_dec(&vexpress_restart_nb_refcnt);
+                       return err;
+               }
        }
        device_create_file(dev, &dev_attr_active);
 
index acdb5cc..34d3b7a 100644 (file)
@@ -523,7 +523,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index bbdb3b6..2cc9bc1 100644 (file)
@@ -1902,7 +1902,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index fb072cc..dada9ce 100644 (file)
@@ -2742,6 +2742,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                                              BNX2X_DOORBELL_PCI_BAR);
                reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
                ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+               if (!ep->qp.ctx_base)
+                       return -ENOMEM;
                goto arm_cq;
        }
 
index adfef9d..e26747a 100644 (file)
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 static int fast_fail = 1;
 static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -261,7 +261,7 @@ static void gather_partition_info(void)
 
        ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
        if (ppartition_name)
-               strncpy(partition_name, ppartition_name,
+               strlcpy(partition_name, ppartition_name,
                                sizeof(partition_name));
        p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
        if (p_number_ptr)
index 5e565c8..7dff5d3 100644 (file)
@@ -15,8 +15,7 @@ obj-$(CONFIG_MSM_GLINK_SPI_XPRT)      +=      glink_spi_xprt.o
 obj-$(CONFIG_MSM_SMEM_LOGGING) +=      smem_log.o
 obj-$(CONFIG_MSM_SYSMON_GLINK_COMM)    += sysmon-glink.o sysmon-qmi.o
 obj-$(CONFIG_ARCH_MSM8996) +=  kryo-l2-accessors.o
-obj-$(CONFIG_MSM_SMP2P)        +=      smp2p.o smp2p_debug.o smp2p_sleepstate.o
-obj-$(CONFIG_MSM_SMP2P_TEST)   +=      smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
+obj-$(CONFIG_MSM_SMP2P)        +=      smp2p.o smp2p_loopback.o smp2p_debug.o smp2p_sleepstate.o
 obj-$(CONFIG_MSM_QMI_INTERFACE)        +=      qmi_interface.o
 obj-$(CONFIG_MSM_RPM_SMD)      +=      rpm-smd.o
 obj-$(CONFIG_MSM_HVC) += hvc.o
index db4961f..ebe7dfc 100644 (file)
@@ -21,7 +21,7 @@
        .openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
        }
 
-static const char hab_info_str[] = "Change: 16764735 Revision: #76";
+static const char hab_info_str[] = "Change: 17280941 Revision: #81";
 
 /*
  * The following has to match habmm definitions, order does not matter if
@@ -42,15 +42,13 @@ static struct hab_device hab_devices[] = {
        HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10),
        HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11),
        HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12),
-       HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 13),
-       HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 14),
-       HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 15),
-       HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 16),
-       HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17),
-       HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18),
-       HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19),
-       HAB_DEVICE_CNSTR(DEVICE_FDE1_NAME, MM_FDE_1, 20),
-       HAB_DEVICE_CNSTR(DEVICE_BUFFERQ1_NAME, MM_BUFFERQ_1, 21),
+       HAB_DEVICE_CNSTR(DEVICE_VID2_NAME, MM_VID_2, 13),
+       HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 14),
+       HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 15),
+       HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 16),
+       HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 17),
+       HAB_DEVICE_CNSTR(DEVICE_FDE1_NAME, MM_FDE_1, 18),
+       HAB_DEVICE_CNSTR(DEVICE_BUFFERQ1_NAME, MM_BUFFERQ_1, 19),
 };
 
 struct hab_driver hab_driver = {
@@ -218,7 +216,15 @@ struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
        read_lock(&ctx->ctx_lock);
        list_for_each_entry(vchan, &ctx->vchannels, node) {
                if (vcid == vchan->id) {
-                       kref_get(&vchan->refcount);
+                       if (vchan->otherend_closed || vchan->closed ||
+                               !kref_get_unless_zero(&vchan->refcount)) {
+                               pr_debug("failed to inc vcid %x remote %x session %d refcnt %d close_flg remote %d local %d\n",
+                                       vchan->id, vchan->otherend_id,
+                                       vchan->session_id,
+                                       get_refcnt(vchan->refcount),
+                                       vchan->otherend_closed, vchan->closed);
+                               vchan = NULL;
+                       }
                        read_unlock(&ctx->ctx_lock);
                        return vchan;
                }
@@ -1074,15 +1080,25 @@ static int hab_release(struct inode *inodep, struct file *filep)
        /* notify remote side on vchan closing */
        list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
                list_del(&vchan->node); /* vchan is not in this ctx anymore */
-               hab_vchan_stop_notify(vchan);
+
+               if (!vchan->closed) { /* locally hasn't closed yet */
+                       if (!kref_get_unless_zero(&vchan->refcount)) {
+                               pr_err("vchan %x %x refcnt %d mismanaged closed %d remote closed %d\n",
+                                       vchan->id,
+                                       vchan->otherend_id,
+                                       get_refcnt(vchan->refcount),
+                                       vchan->closed, vchan->otherend_closed);
+                               continue; /* vchan is already being freed */
+                       } else {
+                               hab_vchan_stop_notify(vchan);
+                               /* put for notify. shouldn't cause free */
+                               hab_vchan_put(vchan);
+                       }
+               } else
+                       continue;
+
                write_unlock(&ctx->ctx_lock);
-               if (!vchan->closed) {
-                       pr_warn("potential leak vc %pK %x remote %x session %d refcnt %d\n",
-                                       vchan, vchan->id, vchan->otherend_id,
-                                       vchan->session_id,
-                                       get_refcnt(vchan->refcount));
-                       hab_vchan_put(vchan); /* there is a lock inside */
-               }
+               hab_vchan_put(vchan); /* there is a lock inside */
                write_lock(&ctx->ctx_lock);
        }
 
@@ -1316,7 +1332,6 @@ static int __init hab_init(void)
        dev_t dev;
 
        place_marker("M - HAB INIT Start");
-
        result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
 
        if (result < 0) {
@@ -1371,11 +1386,8 @@ static int __init hab_init(void)
                } else
                        set_dma_ops(hab_driver.dev, &hab_dma_ops);
        }
-
        hab_stat_init(&hab_driver);
-
        place_marker("M - HAB INIT End");
-
        return result;
 
 err:
index c4e8eaa..cbc049e 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/reboot.h>
 #include <linux/kobject.h>
 #include <linux/sysfs.h>
+#include <linux/delay.h>
 #include <soc/qcom/boot_stats.h>
 
 enum hab_payload_type {
@@ -81,11 +82,9 @@ enum hab_payload_type {
 #define DEVICE_DISP5_NAME "hab_disp5"
 #define DEVICE_GFX_NAME "hab_ogles"
 #define DEVICE_VID_NAME "hab_vid"
+#define DEVICE_VID2_NAME "hab_vid2"
 #define DEVICE_MISC_NAME "hab_misc"
 #define DEVICE_QCPE1_NAME "hab_qcpe_vm1"
-#define DEVICE_QCPE2_NAME "hab_qcpe_vm2"
-#define DEVICE_QCPE3_NAME "hab_qcpe_vm3"
-#define DEVICE_QCPE4_NAME "hab_qcpe_vm4"
 #define DEVICE_CLK1_NAME "hab_clock_vm1"
 #define DEVICE_CLK2_NAME "hab_clock_vm2"
 #define DEVICE_FDE1_NAME "hab_fde1"
@@ -346,6 +345,8 @@ struct hab_driver {
 };
 
 struct virtual_channel {
+       struct list_head node; /* for ctx */
+       struct list_head pnode; /* for pchan */
        /*
         * refcount is used to track the references from hab core to the virtual
         * channel such as references from physical channels,
@@ -354,8 +355,6 @@ struct virtual_channel {
        struct kref refcount;
        struct physical_channel *pchan;
        struct uhab_context *ctx;
-       struct list_head node; /* for ctx */
-       struct list_head pnode; /* for pchan */
        struct list_head rx_list;
        wait_queue_head_t rx_queue;
        spinlock_t rx_lock;
index e743d9b..a445aa1 100644 (file)
@@ -14,6 +14,7 @@
 #include "hab.h"
 #include "hab_ghs.h"
 
+#define GIPC_VM_SET_CNT    22
 static const char * const dt_gipc_path_name[] = {
        "testgipc1",
        "testgipc2",
@@ -39,12 +40,41 @@ static const char * const dt_gipc_path_name[] = {
        "testgipc22",
 };
 
+
+/* same vmid assignment for all the vms. it should matches dt_gipc_path_name */
+int mmid_order[GIPC_VM_SET_CNT] = {
+       MM_AUD_1,
+       MM_AUD_2,
+       MM_AUD_3,
+       MM_AUD_4,
+       MM_CAM_1,
+       MM_CAM_2,
+       MM_DISP_1,
+       MM_DISP_2,
+       MM_DISP_3,
+       MM_DISP_4,
+       MM_DISP_5,
+       MM_GFX,
+       MM_VID,
+       MM_MISC,
+       MM_QCPE_VM1,
+       MM_VID_2, /* newly recycled */
+       0,
+       0,
+       MM_CLK_VM1,
+       MM_CLK_VM2,
+       MM_FDE_1,
+       MM_BUFFERQ_1,
+};
+
 static struct ghs_vmm_plugin_info_s {
        const char * const *dt_name;
+       int *mmid_dt_mapping;
        int curr;
        int probe_cnt;
 } ghs_vmm_plugin_info = {
        dt_gipc_path_name,
+       mmid_order,
        0,
        ARRAY_SIZE(dt_gipc_path_name),
 };
@@ -59,6 +89,33 @@ static void ghs_irq_handler(void *cookie)
                tasklet_schedule(&dev->task);
 }
 
+static int get_dt_name_idx(int vmid_base, int mmid,
+                               struct ghs_vmm_plugin_info_s *plugin_info)
+{
+       int idx = -1;
+       int i;
+
+       if (vmid_base < 0 || vmid_base > plugin_info->probe_cnt /
+                                               GIPC_VM_SET_CNT) {
+               pr_err("vmid %d overflow expected max %d\n", vmid_base,
+                               plugin_info->probe_cnt / GIPC_VM_SET_CNT);
+               return idx;
+       }
+
+       for (i = 0; i < GIPC_VM_SET_CNT; i++) {
+               if (mmid == plugin_info->mmid_dt_mapping[i]) {
+                       idx = vmid_base * GIPC_VM_SET_CNT + i;
+                       if (idx > plugin_info->probe_cnt) {
+                               pr_err("dt name idx %d overflow max %d\n",
+                                               idx, plugin_info->probe_cnt);
+                               idx = -1;
+                       }
+                       break;
+               }
+       }
+       return idx;
+}
+
 /* static struct physical_channel *habhyp_commdev_alloc(int id) */
 int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
                struct hab_device *mmid_device)
@@ -67,6 +124,7 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
        struct physical_channel *pchan = NULL;
        struct physical_channel **ppchan = (struct physical_channel **)commdev;
        int ret = 0;
+       int dt_name_idx = 0;
 
        if (ghs_vmm_plugin_info.curr > ghs_vmm_plugin_info.probe_cnt) {
                pr_err("too many commdev alloc %d, supported is %d\n",
@@ -101,13 +159,25 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
                gvh_dn = of_find_node_by_path("/aliases");
                if (gvh_dn) {
                        const char *ep_path = NULL;
-                       struct device_node *ep_dn;
+                       struct device_node *ep_dn = NULL;
+
+                       dt_name_idx = get_dt_name_idx(vmid_remote,
+                                                       mmid_device->id,
+                                                       &ghs_vmm_plugin_info);
+                       if (dt_name_idx < 0) {
+                               pr_err("failed to find %s for vmid %d ret %d\n",
+                                               mmid_device->name,
+                                               mmid_device->id,
+                                               dt_name_idx);
+                               ret = -ENOENT;
+                               goto err;
+                       }
 
                        ret = of_property_read_string(gvh_dn,
-                       ghs_vmm_plugin_info.dt_name[ghs_vmm_plugin_info.curr],
-                       &ep_path);
+                               ghs_vmm_plugin_info.dt_name[dt_name_idx],
+                               &ep_path);
                        if (ret)
-                               pr_err("failed to read endpoint string ret %d\n",
+                               pr_err("failed to read endpoint str ret %d\n",
                                        ret);
                        of_node_put(gvh_dn);
 
@@ -117,22 +187,23 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
                                of_node_put(ep_dn);
                                if (IS_ERR(dev->endpoint)) {
                                        ret = PTR_ERR(dev->endpoint);
-                                       pr_err("KGIPC alloc failed id: %d, ret: %d\n",
-                                          ghs_vmm_plugin_info.curr, ret);
+                                       pr_err("alloc failed %d %s ret %d\n",
+                                               dt_name_idx, mmid_device->name,
+                                               ret);
                                        goto err;
                                } else {
-                                       pr_debug("gipc ep found for %d\n",
-                                               ghs_vmm_plugin_info.curr);
+                                       pr_debug("gipc ep found for %d %s\n",
+                                               dt_name_idx, mmid_device->name);
                                }
                        } else {
-                               pr_err("of_parse_phandle failed id: %d\n",
-                                          ghs_vmm_plugin_info.curr);
+                               pr_err("of_parse_phandle failed id %d %s\n",
+                                          dt_name_idx, mmid_device->name);
                                ret = -ENOENT;
                                goto err;
                        }
                } else {
-                       pr_err("of_find_compatible_node failed id: %d\n",
-                                  ghs_vmm_plugin_info.curr);
+                       pr_err("of_find_compatible_node failed id %d %s\n",
+                                  dt_name_idx, mmid_device->name);
                        ret = -ENOENT;
                        goto err;
                }
@@ -149,6 +220,7 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
        pchan->hyp_data = (void *)dev;
        pchan->is_be = is_be;
        strlcpy(dev->name, name, sizeof(dev->name));
+       strlcpy(pchan->name, name, sizeof(pchan->name));
        *ppchan = pchan;
        dev->read_data = kmalloc(GIPC_RECV_BUFF_SIZE_BYTES, GFP_KERNEL);
        if (!dev->read_data) {
index b5afd98..60156c6 100644 (file)
@@ -21,18 +21,14 @@ struct pages_list {
        struct page **pages;
        long npages;
        uint64_t index; /* for mmap first call */
-       int kernel;
        void *kva;
-       void *uva;
-       int refcntk;
-       int refcntu;
        uint32_t userflags;
        struct file *filp_owner;
        struct file *filp_mapper;
-       struct dma_buf *dmabuf;
        int32_t export_id;
        int32_t vcid;
        struct physical_channel *pchan;
+       struct kref refcount;
 };
 
 struct importer_context {
@@ -42,6 +38,118 @@ struct importer_context {
        rwlock_t implist_lock;
 };
 
+static struct pages_list *pages_list_create(
+       void *imp_ctx,
+       struct export_desc *exp,
+       uint32_t userflags)
+{
+       struct page **pages;
+       struct compressed_pfns *pfn_table =
+               (struct compressed_pfns *)exp->payload;
+       struct pages_list *pglist;
+       unsigned long pfn;
+       int i, j, k = 0, size;
+
+       if (!pfn_table)
+               return ERR_PTR(-EINVAL);
+
+       size = exp->payload_count * sizeof(struct page *);
+       pages = kmalloc(size, GFP_KERNEL);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+
+       pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
+       if (!pglist) {
+               kfree(pages);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       pfn = pfn_table->first_pfn;
+       for (i = 0; i < pfn_table->nregions; i++) {
+               for (j = 0; j < pfn_table->region[i].size; j++) {
+                       pages[k] = pfn_to_page(pfn+j);
+                       k++;
+               }
+               pfn += pfn_table->region[i].size + pfn_table->region[i].space;
+       }
+
+       pglist->pages = pages;
+       pglist->npages = exp->payload_count;
+       pglist->userflags = userflags;
+       pglist->export_id = exp->export_id;
+       pglist->vcid = exp->vcid_remote;
+       pglist->pchan = exp->pchan;
+
+       kref_init(&pglist->refcount);
+
+       return pglist;
+}
+
+static void pages_list_destroy(struct kref *refcount)
+{
+       struct pages_list *pglist = container_of(refcount,
+                               struct pages_list, refcount);
+
+       if (pglist->kva)
+               vunmap(pglist->kva);
+
+       kfree(pglist->pages);
+
+       kfree(pglist);
+}
+
+static void pages_list_get(struct pages_list *pglist)
+{
+       kref_get(&pglist->refcount);
+}
+
+static int pages_list_put(struct pages_list *pglist)
+{
+       return kref_put(&pglist->refcount, pages_list_destroy);
+}
+
+static struct pages_list *pages_list_lookup(
+               struct importer_context *imp_ctx,
+               uint32_t export_id, struct physical_channel *pchan)
+{
+       struct pages_list *pglist, *tmp;
+
+       read_lock(&imp_ctx->implist_lock);
+       list_for_each_entry_safe(pglist, tmp, &imp_ctx->imp_list, list) {
+               if (pglist->export_id == export_id &&
+                       pglist->pchan == pchan) {
+                       pages_list_get(pglist);
+                       read_unlock(&imp_ctx->implist_lock);
+                       return pglist;
+               }
+       }
+       read_unlock(&imp_ctx->implist_lock);
+
+       return NULL;
+}
+
+static void pages_list_add(struct importer_context *imp_ctx,
+               struct pages_list *pglist)
+{
+       pages_list_get(pglist);
+
+       write_lock(&imp_ctx->implist_lock);
+       list_add_tail(&pglist->list,  &imp_ctx->imp_list);
+       imp_ctx->cnt++;
+       write_unlock(&imp_ctx->implist_lock);
+}
+
+static void pages_list_remove(struct importer_context *imp_ctx,
+               struct pages_list *pglist)
+{
+       write_lock(&imp_ctx->implist_lock);
+       list_del(&pglist->list);
+       imp_ctx->cnt--;
+       write_unlock(&imp_ctx->implist_lock);
+
+       pages_list_put(pglist);
+}
+
 void *habmm_hyp_allocate_grantable(int page_count,
                uint32_t *sizebytes)
 {
@@ -192,9 +300,12 @@ static int habmem_get_dma_pages_from_fd(int32_t fd,
                for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
                        pages[rc] = nth_page(page, j);
                        rc++;
-                       if (WARN_ON(rc >= page_count))
+                       if (rc >= page_count)
                                break;
                }
+
+               if (rc >= page_count)
+                       break;
        }
 
 err:
@@ -320,16 +431,8 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel)
        if (!priv)
                return;
 
-       list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list) {
-               if (kernel && pglist->kva)
-                       vunmap(pglist->kva);
-
-               list_del(&pglist->list);
-               priv->cnt--;
-
-               kfree(pglist->pages);
-               kfree(pglist);
-       }
+       list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list)
+               pages_list_remove(priv, pglist);
 
        kfree(priv);
 }
@@ -406,10 +509,19 @@ static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 static void hab_map_open(struct vm_area_struct *vma)
 {
+       struct pages_list *pglist =
+           (struct pages_list *)vma->vm_private_data;
+
+       pages_list_get(pglist);
 }
 
 static void hab_map_close(struct vm_area_struct *vma)
 {
+       struct pages_list *pglist =
+           (struct pages_list *)vma->vm_private_data;
+
+       pages_list_put(pglist);
+       vma->vm_private_data = NULL;
 }
 
 static const struct vm_operations_struct habmem_vm_ops = {
@@ -418,6 +530,51 @@ static const struct vm_operations_struct habmem_vm_ops = {
        .close = hab_map_close,
 };
 
+static int hab_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct pages_list *pglist = vma->vm_private_data;
+       pgoff_t page_offset;
+       int ret;
+
+       page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+               PAGE_SHIFT;
+
+       if (page_offset > pglist->npages)
+               return VM_FAULT_SIGBUS;
+
+       ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+                            pglist->pages[page_offset]);
+
+       switch (ret) {
+       case 0:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       case -EBUSY:
+               return VM_FAULT_RETRY;
+       case -EFAULT:
+       case -EINVAL:
+               return VM_FAULT_SIGBUS;
+       default:
+               WARN_ON(1);
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+static void hab_buffer_open(struct vm_area_struct *vma)
+{
+}
+
+static void hab_buffer_close(struct vm_area_struct *vma)
+{
+}
+
+static const struct vm_operations_struct hab_buffer_vm_ops = {
+       .fault = hab_buffer_fault,
+       .open = hab_buffer_open,
+       .close = hab_buffer_close,
+};
+
 static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
        struct pages_list *pglist = dmabuf->priv;
@@ -431,7 +588,7 @@ static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_ops = &habmem_vm_ops;
+       vma->vm_ops = &hab_buffer_vm_ops;
        vma->vm_private_data = pglist;
        vma->vm_flags |= VM_MIXEDMAP;
 
@@ -440,6 +597,9 @@ static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 
 static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
 {
+       struct pages_list *pglist = dmabuf->priv;
+
+       pages_list_put(pglist);
 }
 
 static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
@@ -470,82 +630,50 @@ static int habmem_imp_hyp_map_fd(void *imp_ctx,
        uint32_t userflags,
        int32_t *pfd)
 {
-       struct page **pages;
-       struct compressed_pfns *pfn_table =
-                       (struct compressed_pfns *)exp->payload;
        struct pages_list *pglist;
        struct importer_context *priv = imp_ctx;
-       unsigned long pfn;
-       int i, j, k = 0;
-       pgprot_t prot = PAGE_KERNEL;
-       int32_t fd, size;
+       int32_t fd = -1;
        int ret;
+       struct dma_buf *dmabuf;
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
-       if (!pfn_table || !priv)
+       if (!priv)
                return -EINVAL;
-       size = exp->payload_count * sizeof(struct page *);
-       pages = kmalloc(size, GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
-
-       pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
-       if (!pglist) {
-               kfree(pages);
-               return -ENOMEM;
-       }
 
-       pfn = pfn_table->first_pfn;
-       for (i = 0; i < pfn_table->nregions; i++) {
-               for (j = 0; j < pfn_table->region[i].size; j++) {
-                       pages[k] = pfn_to_page(pfn+j);
-                       k++;
-               }
-               pfn += pfn_table->region[i].size + pfn_table->region[i].space;
-       }
+       pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+       if (pglist)
+               goto buffer_ready;
 
-       pglist->pages = pages;
-       pglist->npages = exp->payload_count;
-       pglist->kernel = 0;
-       pglist->index = 0;
-       pglist->refcntk = pglist->refcntu = 0;
-       pglist->userflags = userflags;
-       pglist->export_id = exp->export_id;
-       pglist->vcid = exp->vcid_remote;
-       pglist->pchan = exp->pchan;
+       pglist = pages_list_create(imp_ctx, exp, userflags);
+       if (IS_ERR(pglist))
+               return PTR_ERR(pglist);
 
-       if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
-               prot = pgprot_writecombine(prot);
+       pages_list_add(priv, pglist);
 
+buffer_ready:
        exp_info.ops = &dma_buf_ops;
-       exp_info.size = exp->payload_count << PAGE_SHIFT;
+       exp_info.size = pglist->npages << PAGE_SHIFT;
        exp_info.flags = O_RDWR;
        exp_info.priv = pglist;
-       pglist->dmabuf = dma_buf_export(&exp_info);
-       if (IS_ERR(pglist->dmabuf)) {
-               ret = PTR_ERR(pglist->dmabuf);
-               kfree(pages);
-               kfree(pglist);
-               return ret;
+       dmabuf = dma_buf_export(&exp_info);
+       if (IS_ERR(dmabuf)) {
+               pr_err("export to dmabuf failed\n");
+               ret = PTR_ERR(dmabuf);
+               goto proc_end;
        }
+       pages_list_get(pglist);
 
-       fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
+       fd = dma_buf_fd(dmabuf, O_CLOEXEC);
        if (fd < 0) {
-               dma_buf_put(pglist->dmabuf);
-               kfree(pages);
-               kfree(pglist);
-               return -EINVAL;
+               pr_err("dma buf to fd failed\n");
+               dma_buf_put(dmabuf);
+               ret = -EINVAL;
+               goto proc_end;
        }
 
-       pglist->refcntk++;
-
-       write_lock(&priv->implist_lock);
-       list_add_tail(&pglist->list,  &priv->imp_list);
-       priv->cnt++;
-       write_unlock(&priv->implist_lock);
-
+proc_end:
        *pfd = fd;
-
+       pages_list_put(pglist);
        return 0;
 }
 
@@ -554,68 +682,45 @@ static int habmem_imp_hyp_map_kva(void *imp_ctx,
        uint32_t userflags,
        void **pkva)
 {
-       struct page **pages;
-       struct compressed_pfns *pfn_table =
-               (struct compressed_pfns *)exp->payload;
        struct pages_list *pglist;
        struct importer_context *priv = imp_ctx;
-       unsigned long pfn;
-       int i, j, k = 0, size;
        pgprot_t prot = PAGE_KERNEL;
 
-       if (!pfn_table || !priv)
+       if (!priv)
                return -EINVAL;
-       size = exp->payload_count * sizeof(struct page *);
-       pages = kmalloc(size, GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
-       pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
-       if (!pglist) {
-               kfree(pages);
-               return -ENOMEM;
-       }
 
-       pfn = pfn_table->first_pfn;
-       for (i = 0; i < pfn_table->nregions; i++) {
-               for (j = 0; j < pfn_table->region[i].size; j++) {
-                       pages[k] = pfn_to_page(pfn+j);
-                       k++;
-               }
-               pfn += pfn_table->region[i].size + pfn_table->region[i].space;
-       }
+       pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+       if (pglist)
+               goto buffer_ready;
 
-       pglist->pages = pages;
-       pglist->npages = exp->payload_count;
-       pglist->kernel = 1;
-       pglist->refcntk = pglist->refcntu = 0;
-       pglist->userflags = userflags;
-       pglist->export_id = exp->export_id;
-       pglist->vcid = exp->vcid_remote;
-       pglist->pchan = exp->pchan;
+       pglist = pages_list_create(imp_ctx, exp, userflags);
+       if (IS_ERR(pglist))
+               return PTR_ERR(pglist);
+
+       pages_list_add(priv, pglist);
+
+buffer_ready:
+       if (pglist->kva)
+               goto pro_end;
+
+       if (pglist->userflags != userflags) {
+               pr_info("exp %d: userflags: 0x%x -> 0x%x\n",
+                       exp->export_id, pglist->userflags, userflags);
+               pglist->userflags = userflags;
+       }
 
        if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
                prot = pgprot_writecombine(prot);
 
        pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
        if (pglist->kva == NULL) {
-               kfree(pages);
                pr_err("%ld pages vmap failed\n", pglist->npages);
-               kfree(pglist);
                return -ENOMEM;
        }
 
-       pr_debug("%ld pages vmap pass, return %p\n",
-                       pglist->npages, pglist->kva);
-
-       pglist->refcntk++;
-
-       write_lock(&priv->implist_lock);
-       list_add_tail(&pglist->list,  &priv->imp_list);
-       priv->cnt++;
-       write_unlock(&priv->implist_lock);
-
+pro_end:
        *pkva = pglist->kva;
-
+       pages_list_put(pglist);
        return 0;
 }
 
@@ -624,52 +729,31 @@ static int habmem_imp_hyp_map_uva(void *imp_ctx,
        uint32_t userflags,
        uint64_t *index)
 {
-       struct page **pages;
-       struct compressed_pfns *pfn_table =
-               (struct compressed_pfns *)exp->payload;
        struct pages_list *pglist;
        struct importer_context *priv = imp_ctx;
-       unsigned long pfn;
-       int i, j, k = 0, size;
 
-       if (!pfn_table || !priv)
+       if (!priv)
                return -EINVAL;
-       size = exp->payload_count * sizeof(struct page *);
-       pages = kmalloc(size, GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
 
-       pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
-       if (!pglist) {
-               kfree(pages);
-               return -ENOMEM;
-       }
+       pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+       if (pglist)
+               goto buffer_ready;
 
-       pfn = pfn_table->first_pfn;
-       for (i = 0; i < pfn_table->nregions; i++) {
-               for (j = 0; j < pfn_table->region[i].size; j++) {
-                       pages[k] = pfn_to_page(pfn+j);
-                       k++;
-               }
-               pfn += pfn_table->region[i].size + pfn_table->region[i].space;
-       }
+       pglist = pages_list_create(imp_ctx, exp, userflags);
+       if (IS_ERR(pglist))
+               return PTR_ERR(pglist);
 
-       pglist->pages = pages;
-       pglist->npages = exp->payload_count;
-       pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
-       pglist->refcntk = pglist->refcntu = 0;
-       pglist->userflags = userflags;
-       pglist->export_id = exp->export_id;
-       pglist->vcid = exp->vcid_remote;
-       pglist->pchan = exp->pchan;
+       pages_list_add(priv, pglist);
 
-       write_lock(&priv->implist_lock);
-       list_add_tail(&pglist->list,  &priv->imp_list);
-       priv->cnt++;
-       write_unlock(&priv->implist_lock);
+buffer_ready:
+       if (pglist->index)
+               goto proc_end;
 
-       *index = pglist->index << PAGE_SHIFT;
+       pglist->index = page_to_phys(pglist->pages[0]) >> PAGE_SHIFT;
 
+proc_end:
+       *index = pglist->index << PAGE_SHIFT;
+       pages_list_put(pglist);
        return 0;
 }
 
@@ -697,37 +781,17 @@ int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
 int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
 {
        struct importer_context *priv = imp_ctx;
-       struct pages_list *pglist, *tmp;
-       int found = 0;
-
-       write_lock(&priv->implist_lock);
-       list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
-               if (pglist->export_id == exp->export_id &&
-                       pglist->pchan == exp->pchan) {
-                       found = 1;
-                       list_del(&pglist->list);
-                       priv->cnt--;
-                       break;
-               }
-       }
-       write_unlock(&priv->implist_lock);
+       struct pages_list *pglist;
 
-       if (!found) {
+       pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+       if (!pglist) {
                pr_err("failed to find export id %u\n", exp->export_id);
                return -EINVAL;
        }
 
-       pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
-               pglist, pglist->kernel, priv->cnt);
+       pages_list_remove(priv, pglist);
 
-       if (pglist->kva)
-               vunmap(pglist->kva);
-
-       if (pglist->dmabuf)
-               dma_buf_put(pglist->dmabuf);
-
-       kfree(pglist->pages);
-       kfree(pglist);
+       pages_list_put(pglist);
 
        return 0;
 }
@@ -739,12 +803,14 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
        long length = vma->vm_end - vma->vm_start;
        struct pages_list *pglist;
        int bfound = 0;
+       int ret = 0;
 
        read_lock(&imp_ctx->implist_lock);
        list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
                if ((pglist->index == vma->vm_pgoff) &&
                        ((length <= pglist->npages * PAGE_SIZE))) {
                        bfound = 1;
+                       pages_list_get(pglist);
                        break;
                }
        }
@@ -758,7 +824,8 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
        if (length > pglist->npages * PAGE_SIZE) {
                pr_err("Error vma length %ld not matching page list %ld\n",
                        length, pglist->npages * PAGE_SIZE);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto proc_end;
        }
 
        vma->vm_ops = &habmem_vm_ops;
@@ -769,6 +836,10 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
        return 0;
+
+proc_end:
+       pages_list_put(pglist);
+       return ret;
 }
 
 int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp)
@@ -777,15 +848,11 @@ int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp)
        struct pages_list *pglist;
        int found = 0;
 
-       read_lock(&priv->implist_lock);
-       list_for_each_entry(pglist, &priv->imp_list, list) {
-               if (pglist->export_id == exp->export_id &&
-                       pglist->pchan == exp->pchan) {
-                       found = 1;
-                       break;
-               }
+       pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+       if (pglist) {
+               found = 1;
+               pages_list_put(pglist);
        }
-       read_unlock(&priv->implist_lock);
 
        return found;
 }
index d578617..efa4bb3 100644 (file)
@@ -124,8 +124,11 @@ void habmem_remove_export(struct export_desc *exp)
        struct uhab_context *ctx;
 
        if (!exp || !exp->ctx || !exp->pchan) {
-               pr_err("failed to find valid info in exp %pK ctx %pK pchan %pK\n",
+               if (exp)
+                       pr_err("invalid info in exp %pK ctx %pK pchan %pK\n",
                           exp, exp->ctx, exp->pchan);
+               else
+                       pr_err("invalid exp\n");
                return;
        }
 
@@ -373,6 +376,13 @@ int hab_mem_import(struct uhab_context *ctx,
        }
        spin_unlock_bh(&ctx->imp_lock);
 
+       if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) {
+               pr_err("input size %d don't match buffer size %d\n",
+                       param->sizebytes, exp->payload_count << PAGE_SHIFT);
+               ret = -EINVAL;
+               goto err_imp;
+       }
+
        if (!found) {
                pr_err("Fail to get export descriptor from export id %d\n",
                        param->exportid);
index 3765623..71010be 100644 (file)
@@ -64,9 +64,14 @@ hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
                }
        }
 
-       /* return all the received messages before the remote close */
-       if ((!ret || (ret == -ERESTARTSYS)) && !hab_rx_queue_empty(vchan)) {
-               spin_lock_bh(&vchan->rx_lock);
+       /*
+        * return all the received messages before the remote close,
+        * and need empty check again in case the list is empty now due to
+        * dequeue by other threads
+        */
+       spin_lock_bh(&vchan->rx_lock);
+
+       if ((!ret || (ret == -ERESTARTSYS)) && !list_empty(&vchan->rx_list)) {
                message = list_first_entry(&vchan->rx_list,
                                struct hab_message, node);
                if (message) {
@@ -76,18 +81,20 @@ hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
                                ret = 0;
                                *rsize = message->sizebytes;
                        } else {
-                               pr_err("rcv buffer too small %d < %zd\n",
-                                          *rsize, message->sizebytes);
+                               pr_err("vcid %x rcv buf too small %d < %zd\n",
+                                          vchan->id, *rsize,
+                                          message->sizebytes);
                                *rsize = message->sizebytes;
                                message = NULL;
                                ret = -EOVERFLOW; /* come back again */
                        }
                }
-               spin_unlock_bh(&vchan->rx_lock);
        } else
                /* no message received, retain the original status */
                *rsize = 0;
 
+       spin_unlock_bh(&vchan->rx_lock);
+
        *msg = message;
        return ret;
 }
@@ -281,7 +288,13 @@ int hab_msg_recv(struct physical_channel *pchan,
                        break;
                }
 
-               exp_desc->domid_local = pchan->dom_id;
+               if (pchan->vmid_local != exp_desc->domid_remote ||
+                       pchan->vmid_remote != exp_desc->domid_local)
+                       pr_err("corrupted vmid %d != %d %d != %d\n",
+                               pchan->vmid_local, exp_desc->domid_remote,
+                               pchan->vmid_remote, exp_desc->domid_local);
+               exp_desc->domid_remote = pchan->vmid_remote;
+               exp_desc->domid_local = pchan->vmid_local;
                exp_desc->pchan = pchan;
 
                hab_export_enqueue(vchan, exp_desc);
index f740a43..f09a1df 100644 (file)
@@ -152,10 +152,13 @@ int hab_open_listen(struct uhab_context *ctx,
                ret = wait_event_interruptible_timeout(dev->openq,
                        hab_open_request_find(ctx, dev, listen, recv_request),
                        ms_timeout);
-               if (!ret || (-ERESTARTSYS == ret)) {
+               if (!ret) {
+                       pr_debug("%s timeout in open listen\n", dev->name);
+                       ret = -EAGAIN; /* condition not met */
+               } else if (-ERESTARTSYS == ret) {
                        pr_warn("something failed in open listen ret %d\n",
                                        ret);
-                       ret = -EAGAIN; /* condition not met */
+                       ret = -EINTR; /* condition not met */
                } else if (ret > 0)
                        ret = 0; /* condition met */
        } else { /* fe case */
index e42b27f..8d818b8 100644 (file)
@@ -87,7 +87,6 @@ hab_vchan_free(struct kref *ref)
 
        /* the release vchan from ctx was done earlier in vchan close() */
        hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
-       vchan->ctx = NULL;
 
        /* release vchan from pchan. no more msg for this vchan */
        write_lock_bh(&pchan->vchans_lock);
@@ -144,6 +143,13 @@ hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
                                get_refcnt(vchan->refcount),
                                payload_type, sizebytes);
                        vchan = NULL;
+               } else if (vchan->otherend_closed || vchan->closed) {
+                       pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
+                               vchan->otherend_closed, vchan->closed,
+                               vchan->id, vchan->otherend_id,
+                               vchan->session_id, get_refcnt(vchan->refcount),
+                               vchan_id, session_id, payload_type, sizebytes);
+                       vchan = NULL;
                } else if (!kref_get_unless_zero(&vchan->refcount)) {
                        /*
                         * this happens when refcnt is already zero
@@ -154,13 +160,6 @@ hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
                                vchan->session_id, get_refcnt(vchan->refcount),
                                vchan_id, session_id, payload_type, sizebytes);
                        vchan = NULL;
-               } else if (vchan->otherend_closed || vchan->closed) {
-                       pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
-                               vchan->otherend_closed, vchan->closed,
-                               vchan->id, vchan->otherend_id,
-                               vchan->session_id, get_refcnt(vchan->refcount),
-                               vchan_id, session_id, payload_type, sizebytes);
-                       vchan = NULL;
                }
        }
        spin_unlock_bh(&pchan->vid_lock);
@@ -173,7 +172,10 @@ void hab_vchan_stop(struct virtual_channel *vchan)
        if (vchan) {
                vchan->otherend_closed = 1;
                wake_up(&vchan->rx_queue);
-               wake_up_interruptible(&vchan->ctx->exp_wq);
+               if (vchan->ctx)
+                       wake_up_interruptible(&vchan->ctx->exp_wq);
+               else
+                       pr_err("NULL ctx for vchan %x\n", vchan->id);
        }
 }
 
@@ -200,6 +202,18 @@ static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
 
        read_lock(&pchan->vchans_lock);
        empty = list_empty(&pchan->vchannels);
+       if (!empty) {
+               struct virtual_channel *vchan;
+
+               list_for_each_entry(vchan, &pchan->vchannels, pnode) {
+                       pr_err("vchan %pK id %x remote id %x session %d ref %d closed %d remote close %d\n",
+                                  vchan, vchan->id, vchan->otherend_id,
+                                  vchan->session_id,
+                                  get_refcnt(vchan->refcount), vchan->closed,
+                                  vchan->otherend_closed);
+               }
+
+       }
        read_unlock(&pchan->vchans_lock);
 
        return empty;
@@ -220,6 +234,8 @@ static int hab_vchans_empty(int vmid)
                                if (!hab_vchans_per_pchan_empty(pchan)) {
                                        empty = 0;
                                        spin_unlock_bh(&hab_dev->pchan_lock);
+                                       pr_info("vmid %d %s's vchans are not closed\n",
+                                                       vmid, pchan->name);
                                        break;
                                }
                        }
@@ -239,7 +255,7 @@ void hab_vchans_empty_wait(int vmid)
        pr_info("waiting for GVM%d's sockets closure\n", vmid);
 
        while (!hab_vchans_empty(vmid))
-               schedule();
+               usleep_range(10000, 12000);
 
        pr_info("all of GVM%d's sockets are closed\n", vmid);
 }
index 52c12bb..281d127 100644 (file)
@@ -91,6 +91,7 @@ static uint32_t hab_handle_tx;
 static uint32_t hab_handle_rx;
 static char apr_tx_buf[APR_TX_BUF_SIZE];
 static char apr_rx_buf[APR_RX_BUF_SIZE];
+static spinlock_t hab_tx_lock;
 
 /* apr callback thread task */
 static struct task_struct *apr_vm_cb_thread_task;
@@ -114,8 +115,6 @@ struct apr_svc_table {
  *    apr handle and store in svc tbl.
  */
 
-static struct mutex m_lock_tbl_qdsp6;
-
 static struct apr_svc_table svc_tbl_qdsp6[] = {
        {
                .name = "AFE",
@@ -206,8 +205,6 @@ static struct apr_svc_table svc_tbl_qdsp6[] = {
        },
 };
 
-static struct mutex m_lock_tbl_voice;
-
 static struct apr_svc_table svc_tbl_voice[] = {
        {
                .name = "VSM",
@@ -573,10 +570,10 @@ static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
        int i;
        int size;
        struct apr_svc_table *tbl;
-       struct mutex *lock;
        struct aprv2_vm_cmd_register_rsp_t apr_rsp;
        uint32_t apr_len;
        int ret = 0;
+       unsigned long flags;
        struct {
                uint32_t cmd_id;
                struct aprv2_vm_cmd_register_t reg_cmd;
@@ -585,14 +582,12 @@ static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
        if (domain_id == APR_DOMAIN_ADSP) {
                tbl = svc_tbl_qdsp6;
                size = ARRAY_SIZE(svc_tbl_qdsp6);
-               lock = &m_lock_tbl_qdsp6;
        } else {
                tbl = svc_tbl_voice;
                size = ARRAY_SIZE(svc_tbl_voice);
-               lock = &m_lock_tbl_voice;
        }
 
-       mutex_lock(lock);
+       spin_lock_irqsave(&hab_tx_lock, flags);
        for (i = 0; i < size; i++) {
                if (!strcmp(svc_name, tbl[i].name)) {
                        *client_id = tbl[i].client_id;
@@ -616,7 +611,8 @@ static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
                                if (ret) {
                                        pr_err("%s: habmm_socket_send failed %d\n",
                                                __func__, ret);
-                                       mutex_unlock(lock);
+                                       spin_unlock_irqrestore(&hab_tx_lock,
+                                                               flags);
                                        return ret;
                                }
                                /* wait for response */
@@ -628,14 +624,16 @@ static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
                                if (ret) {
                                        pr_err("%s: apr_vm_nb_receive failed %d\n",
                                                __func__, ret);
-                                       mutex_unlock(lock);
+                                       spin_unlock_irqrestore(&hab_tx_lock,
+                                                               flags);
                                        return ret;
                                }
                                if (apr_rsp.status) {
                                        pr_err("%s: apr_vm_nb_receive status %d\n",
                                                __func__, apr_rsp.status);
                                        ret = apr_rsp.status;
-                                       mutex_unlock(lock);
+                                       spin_unlock_irqrestore(&hab_tx_lock,
+                                                               flags);
                                        return ret;
                                }
                                /* update svc table */
@@ -649,7 +647,7 @@ static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
                        break;
                }
        }
-       mutex_unlock(lock);
+       spin_unlock_irqrestore(&hab_tx_lock, flags);
 
        pr_debug("%s: svc_name = %s client_id = %d domain_id = %d\n",
                 __func__, svc_name, *client_id, domain_id);
@@ -669,10 +667,10 @@ static int apr_vm_rel_svc(int domain_id, int svc_id, int handle)
        int i;
        int size;
        struct apr_svc_table *tbl;
-       struct mutex *lock;
        struct aprv2_vm_cmd_deregister_rsp_t apr_rsp;
        uint32_t apr_len;
        int ret = 0;
+       unsigned long flags;
        struct {
                uint32_t cmd_id;
                struct aprv2_vm_cmd_deregister_t dereg_cmd;
@@ -681,14 +679,12 @@ static int apr_vm_rel_svc(int domain_id, int svc_id, int handle)
        if (domain_id == APR_DOMAIN_ADSP) {
                tbl = svc_tbl_qdsp6;
                size = ARRAY_SIZE(svc_tbl_qdsp6);
-               lock = &m_lock_tbl_qdsp6;
        } else {
                tbl = svc_tbl_voice;
                size = ARRAY_SIZE(svc_tbl_voice);
-               lock = &m_lock_tbl_voice;
        }
 
-       mutex_lock(lock);
+       spin_lock_irqsave(&hab_tx_lock, flags);
        for (i = 0; i < size; i++) {
                if (tbl[i].id == svc_id && tbl[i].handle == handle) {
                        /* need to deregister a service */
@@ -728,7 +724,7 @@ static int apr_vm_rel_svc(int domain_id, int svc_id, int handle)
                        break;
                }
        }
-       mutex_unlock(lock);
+       spin_unlock_irqrestore(&hab_tx_lock, flags);
 
        if (i == size) {
                pr_err("%s: APR: Wrong svc id %d handle %d\n",
@@ -772,7 +768,7 @@ int apr_send_pkt(void *handle, uint32_t *buf)
                return -ENETRESET;
        }
 
-       spin_lock_irqsave(&svc->w_lock, flags);
+       spin_lock_irqsave(&hab_tx_lock, flags);
        if (!svc->id || !svc->vm_handle) {
                pr_err("APR: Still service is not yet opened\n");
                ret = -EINVAL;
@@ -839,7 +835,7 @@ int apr_send_pkt(void *handle, uint32_t *buf)
        ret = hdr->pkt_size;
 
 done:
-       spin_unlock_irqrestore(&svc->w_lock, flags);
+       spin_unlock_irqrestore(&hab_tx_lock, flags);
        return ret;
 }
 
@@ -1173,6 +1169,7 @@ static int __init apr_init(void)
                pr_err("%s: habmm_socket_open tx failed %d\n", __func__, ret);
                return ret;
        }
+       spin_lock_init(&hab_tx_lock);
 
        ret = habmm_socket_open(&hab_handle_rx,
                        MM_AUD_2,
@@ -1201,15 +1198,11 @@ static int __init apr_init(void)
        pr_info("%s: apr_vm_cb_thread started pid %d\n",
                        __func__, pid);
 
-       mutex_init(&m_lock_tbl_qdsp6);
-       mutex_init(&m_lock_tbl_voice);
-
        for (i = 0; i < APR_DEST_MAX; i++)
                for (j = 0; j < APR_CLIENT_MAX; j++) {
                        mutex_init(&client[i][j].m_lock);
                        for (k = 0; k < APR_SVC_MAX; k++) {
                                mutex_init(&client[i][j].svc[k].m_lock);
-                               spin_lock_init(&client[i][j].svc[k].w_lock);
                        }
                }
 
index 75b114e..95562d2 100644 (file)
@@ -53,8 +53,6 @@ struct anc_tdm_group_set_info {
 
 struct anc_dev_drv_info {
        uint32_t state;
-       uint32_t rpm;
-       uint32_t bypass_mode;
        uint32_t algo_module_id;
 };
 
@@ -311,52 +309,68 @@ static int anc_dev_port_stop(int32_t which_port)
 
 int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd)
 {
-       int rc = 0;
+       int rc = -EINVAL;
 
        switch (anc_cmd) {
-       case ANC_CMD_RPM: {
-               struct audio_anc_rpm_info *rpm_info_p =
-                       (struct audio_anc_rpm_info *)info_p;
+       case ANC_CMD_ALGO_MODULE: {
+               struct audio_anc_algo_module_info *module_info_p =
+               (struct audio_anc_algo_module_info *)info_p;
+
+               rc = 0;
 
                if (this_anc_dev_info.state)
-               rc = anc_if_set_rpm(
+                       rc = anc_if_set_algo_module_id(
                        anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
-                       rpm_info_p->rpm);
+                       module_info_p->module_id);
                else
-                       this_anc_dev_info.rpm = 0;
+                       this_anc_dev_info.algo_module_id =
+                       module_info_p->module_id;
                break;
        }
-       case ANC_CMD_BYPASS_MODE: {
-               struct audio_anc_bypass_mode *bypass_mode_p =
-                       (struct audio_anc_bypass_mode *)info_p;
-
+       case ANC_CMD_ALGO_CALIBRATION: {
+               rc = -EINVAL;
                if (this_anc_dev_info.state)
-                       rc = anc_if_set_bypass_mode(
+                       rc = anc_if_set_algo_module_cali_data(
                        anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
-                       bypass_mode_p->mode);
+                       info_p);
                else
-                       this_anc_dev_info.bypass_mode = bypass_mode_p->mode;
+                       pr_err("%s: ANC is not running yet\n",
+                               __func__);
+               break;
+       }
+       default:
+               pr_err("%s: ANC cmd wrong\n",
+                       __func__);
                break;
        }
-       case ANC_CMD_ALGO_MODULE: {
-               struct audio_anc_algo_module_info *module_info_p =
-               (struct audio_anc_algo_module_info *)info_p;
 
+       return rc;
+}
+
+int msm_anc_dev_get_info(void *info_p, int32_t anc_cmd)
+{
+       int rc = -EINVAL;
+
+       switch (anc_cmd) {
+       case ANC_CMD_ALGO_CALIBRATION: {
                if (this_anc_dev_info.state)
-                       rc = anc_if_set_algo_module_id(
+                       rc = anc_if_get_algo_module_cali_data(
                        anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
-                       module_info_p->module_id);
+                       info_p);
                else
-                       this_anc_dev_info.algo_module_id =
-                       module_info_p->module_id;
+                       pr_err("%s: ANC is not running yet\n",
+                               __func__);
                break;
        }
+       default:
+               pr_err("%s: ANC cmd wrong\n",
+                       __func__);
+               break;
        }
 
        return rc;
 }
 
-
 int msm_anc_dev_start(void)
 {
        int rc = 0;
@@ -514,11 +528,6 @@ int msm_anc_dev_start(void)
                anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
                this_anc_dev_info.algo_module_id);
 
-       if (this_anc_dev_info.bypass_mode != 0)
-               rc = anc_if_set_bypass_mode(
-               anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
-               this_anc_dev_info.bypass_mode);
-
        group_id = get_group_id_from_port_id(
                        anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
 
@@ -612,8 +621,6 @@ int msm_anc_dev_stop(void)
 
        this_anc_dev_info.state = 0;
        this_anc_dev_info.algo_module_id = 0;
-       this_anc_dev_info.rpm = 0;
-       this_anc_dev_info.bypass_mode = 0;
 
        pr_debug("%s: ANC devices stop successfully!\n", __func__);
 
index 65c5858..50cc255 100644 (file)
@@ -44,17 +44,11 @@ static size_t get_user_anc_cmd_size(int32_t anc_cmd)
        case ANC_CMD_STOP:
                size = 0;
                break;
-       case ANC_CMD_RPM:
-               size = sizeof(struct audio_anc_rpm_info);
-               break;
-       case ANC_CMD_BYPASS_MODE:
-               size = sizeof(struct audio_anc_bypass_mode);
-               break;
        case ANC_CMD_ALGO_MODULE:
                size = sizeof(struct audio_anc_algo_module_info);
                break;
        case ANC_CMD_ALGO_CALIBRATION:
-               size = sizeof(struct audio_anc_algo_calibration_info);
+               size = sizeof(struct audio_anc_algo_calibration_header);
                break;
        default:
                pr_err("%s:Invalid anc cmd %d!",
@@ -77,8 +71,6 @@ static int call_set_anc(int32_t anc_cmd,
        case ANC_CMD_STOP:
                ret = msm_anc_dev_stop();
                break;
-       case ANC_CMD_RPM:
-       case ANC_CMD_BYPASS_MODE:
        case ANC_CMD_ALGO_MODULE:
        case ANC_CMD_ALGO_CALIBRATION:
                ret = msm_anc_dev_set_info(data, anc_cmd);
@@ -98,7 +90,8 @@ static int call_get_anc(int32_t anc_cmd,
        int                             ret = 0;
 
        switch (anc_cmd) {
-       case ANC_CMD_RPM:
+       case ANC_CMD_ALGO_CALIBRATION:
+               ret = msm_anc_dev_get_info(data, anc_cmd);
                break;
        default:
                break;
@@ -146,9 +139,9 @@ static long audio_anc_shared_ioctl(struct file *file, unsigned int cmd,
                pr_err("%s: Could not copy size value from user\n", __func__);
                ret = -EFAULT;
                goto done;
-       } else if (size < sizeof(struct audio_anc_packet)) {
+       } else if (size < sizeof(struct audio_anc_header)) {
                pr_err("%s: Invalid size sent to driver: %d, min size is %zd\n",
-                       __func__, size, sizeof(struct audio_anc_packet));
+                       __func__, size, sizeof(struct audio_anc_header));
                ret = -EINVAL;
                goto done;
        }
index 9294485..4e75bc8 100644 (file)
@@ -38,7 +38,7 @@ struct anc_if_ctl {
        atomic_t status;
        wait_queue_head_t wait[AFE_MAX_PORTS];
        struct task_struct *task;
-       struct anc_get_rpm_resp rpm_calib_data;
+       struct anc_get_algo_module_cali_data_resp cali_data_resp;
        uint32_t mmap_handle;
        struct mutex afe_cmd_lock;
 };
@@ -48,33 +48,23 @@ static struct anc_if_ctl this_anc_if;
 static int32_t anc_get_param_callback(uint32_t *payload,
                        uint32_t payload_size)
 {
-       u32 param_id;
-       struct anc_get_rpm_resp *resp =
-               (struct anc_get_rpm_resp *) payload;
-
-       if (!(&(resp->pdata))) {
-               pr_err("%s: Error: resp pdata is NULL\n", __func__);
+       if ((payload_size < (sizeof(uint32_t) +
+               sizeof(this_anc_if.cali_data_resp.pdata))) ||
+               (payload_size > sizeof(this_anc_if.cali_data_resp))) {
+               pr_err("%s: Error: received size %d, calib_data size %zu\n",
+                       __func__, payload_size,
+                       sizeof(this_anc_if.cali_data_resp));
                return -EINVAL;
        }
 
-       param_id = resp->pdata.param_id;
-       if (param_id == AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM) {
-               if (payload_size < sizeof(this_anc_if.rpm_calib_data)) {
-                       pr_err("%s: Error: received size %d, calib_data size %zu\n",
-                               __func__, payload_size,
-                               sizeof(this_anc_if.rpm_calib_data));
-                       return -EINVAL;
-               }
-
-               memcpy(&this_anc_if.rpm_calib_data, payload,
-                       sizeof(this_anc_if.rpm_calib_data));
-               if (!this_anc_if.rpm_calib_data.status) {
-                       atomic_set(&this_anc_if.state, 0);
-          } else {
-                       pr_debug("%s: calib resp status: %d", __func__,
-                               this_anc_if.rpm_calib_data.status);
-                       atomic_set(&this_anc_if.state, -1);
-               }
+       memcpy(&this_anc_if.cali_data_resp, payload,
+               payload_size);
+       if (!this_anc_if.cali_data_resp.status) {
+               atomic_set(&this_anc_if.state, 0);
+       } else {
+               pr_debug("%s: calib resp status: %d", __func__,
+                       this_anc_if.cali_data_resp.status);
+               atomic_set(&this_anc_if.state, -1);
        }
 
        return 0;
@@ -465,7 +455,7 @@ int anc_if_tdm_port_stop(u16 port_id)
        return anc_if_send_cmd_port_stop(port_id);
 }
 
-int anc_if_set_rpm(u16 port_id, u32 rpm)
+int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
 {
        int ret = 0;
        int index;
@@ -479,7 +469,7 @@ int anc_if_set_rpm(u16 port_id, u32 rpm)
        index = q6audio_get_port_index(port_id);
 
        {
-               struct anc_set_rpm_command config;
+               struct anc_set_algo_module_id_command config;
 
                memset(&config, 0, sizeof(config));
                config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -496,16 +486,16 @@ int anc_if_set_rpm(u16 port_id, u32 rpm)
                config.param.payload_address_lsw = 0x00;
                config.param.payload_address_msw = 0x00;
                config.param.mem_map_handle = 0x00;
-               config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
-               config.pdata.param_id = AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM;
-               config.pdata.param_size = sizeof(config.set_rpm);
-               config.set_rpm.minor_version =
-               AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM;
-               config.set_rpm.rpm = rpm;
+               config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+               config.pdata.param_id =
+               AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID;
+               config.pdata.param_size = sizeof(config.set_algo_module_id);
+               config.set_algo_module_id.minor_version = 1;
+               config.set_algo_module_id.module_id = module_id;
 
                ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
                if (ret) {
-                       pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+                       pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
                                        __func__, port_id, ret);
                }
        }
@@ -513,10 +503,10 @@ int anc_if_set_rpm(u16 port_id, u32 rpm)
        return ret;
 }
 
-int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
+int anc_if_set_anc_mic_spkr_layout(u16 port_id,
+struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
 {
        int ret = 0;
-
        int index;
 
        ret = anc_sdsp_interface_prepare();
@@ -528,7 +518,7 @@ int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
        index = q6audio_get_port_index(port_id);
 
        {
-               struct anc_set_bypass_mode_command config;
+               struct anc_set_mic_spkr_layout_info_command config;
 
                memset(&config, 0, sizeof(config));
                config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -545,17 +535,16 @@ int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
                config.param.payload_address_lsw = 0x00;
                config.param.payload_address_msw = 0x00;
                config.param.mem_map_handle = 0x00;
-               config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+               config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
                config.pdata.param_id =
-               AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE;
-               config.pdata.param_size = sizeof(config.set_bypass_mode);
-               config.set_bypass_mode.minor_version =
-               AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE;
-               config.set_bypass_mode.bypass_mode = bypass_mode;
+               AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO;
+               config.pdata.param_size = sizeof(config.set_mic_spkr_layout);
 
+               memcpy(&config.set_mic_spkr_layout, set_mic_spkr_layout_p,
+               sizeof(config.set_mic_spkr_layout));
                ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
                if (ret) {
-                       pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+                       pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
                                        __func__, port_id, ret);
                }
        }
@@ -563,10 +552,10 @@ int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
        return ret;
 }
 
-int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
+
+int anc_if_set_algo_module_cali_data(u16 port_id, void *data_p)
 {
        int ret = 0;
-
        int index;
 
        ret = anc_sdsp_interface_prepare();
@@ -578,45 +567,67 @@ int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
        index = q6audio_get_port_index(port_id);
 
        {
-               struct anc_set_algo_module_id_command config;
+               struct anc_set_algo_module_cali_data_command *cali_data_cfg_p;
+               void *config_p = NULL;
+               int cmd_size = 0;
+               void *out_payload_p = NULL;
+               uint32_t *in_payload_p = (uint32_t *)data_p;
+
+               uint32_t module_id = *in_payload_p;
+               uint32_t param_id = *(in_payload_p + 1);
+               uint32_t payload_size = *(in_payload_p + 2);
+
+               cmd_size = sizeof(struct anc_set_algo_module_cali_data_command)
+               + payload_size;
+               config_p = kzalloc(cmd_size, GFP_KERNEL);
+               if (!config_p) {
+                       ret = -ENOMEM;
+                       return ret;
+               }
 
-               memset(&config, 0, sizeof(config));
-               config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+               memset(config_p, 0, cmd_size);
+               out_payload_p = config_p
+               + sizeof(struct anc_set_algo_module_cali_data_command);
+
+               cali_data_cfg_p =
+               (struct anc_set_algo_module_cali_data_command *)config_p;
+
+               cali_data_cfg_p->hdr.hdr_field =
+               APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
                                APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
-               config.hdr.pkt_size = sizeof(config);
-               config.hdr.src_port = 0;
-               config.hdr.dest_port = 0;
-               config.hdr.token = index;
-               config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
-               config.param.port_id = q6audio_get_port_id(port_id);
-               config.param.payload_size = sizeof(config) -
+               cali_data_cfg_p->hdr.pkt_size = cmd_size;
+               cali_data_cfg_p->hdr.src_port = 0;
+               cali_data_cfg_p->hdr.dest_port = 0;
+               cali_data_cfg_p->hdr.token = index;
+               cali_data_cfg_p->hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+               cali_data_cfg_p->param.port_id = q6audio_get_port_id(port_id);
+               cali_data_cfg_p->param.payload_size = cmd_size -
                        sizeof(struct apr_hdr) -
-                       sizeof(config.param);
-               config.param.payload_address_lsw = 0x00;
-               config.param.payload_address_msw = 0x00;
-               config.param.mem_map_handle = 0x00;
-               config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
-               config.pdata.param_id =
-               AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID;
-               config.pdata.param_size = sizeof(config.set_algo_module_id);
-               config.set_algo_module_id.minor_version = 1;
-               config.set_algo_module_id.module_id = module_id;
+                       sizeof(struct aud_msvc_port_cmd_set_param_v2);
+               cali_data_cfg_p->param.payload_address_lsw = 0x00;
+               cali_data_cfg_p->param.payload_address_msw = 0x00;
+               cali_data_cfg_p->param.mem_map_handle = 0x00;
+               cali_data_cfg_p->pdata.module_id = module_id;
+               cali_data_cfg_p->pdata.param_id = param_id;
+               cali_data_cfg_p->pdata.param_size = payload_size;
+
+               memcpy(out_payload_p, (in_payload_p + 3), payload_size);
+
+               ret = anc_if_apr_send_pkt(cali_data_cfg_p,
+                       &this_anc_if.wait[index]);
+               if (ret)
+                       pr_err("%s: anc algo module calibration data for port 0x%x failed ret = %d\n",
+                       __func__, port_id, ret);
 
-               ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
-               if (ret) {
-                       pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
-                                       __func__, port_id, ret);
-               }
+               kfree(config_p);
        }
 
        return ret;
 }
 
-int anc_if_set_anc_mic_spkr_layout(u16 port_id,
-struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
+int anc_if_get_algo_module_cali_data(u16 port_id, void *data_p)
 {
        int ret = 0;
-
        int index;
 
        ret = anc_sdsp_interface_prepare();
@@ -628,35 +639,68 @@ struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
        index = q6audio_get_port_index(port_id);
 
        {
-               struct anc_set_mic_spkr_layout_info_command config;
+               struct anc_get_algo_module_cali_data_command *cali_data_cfg_p;
+               void *config_p = NULL;
+               int cmd_size = 0;
+               void *out_payload_p = NULL;
+               uint32_t *in_payload_p = (uint32_t *)data_p;
+
+               uint32_t module_id = *in_payload_p;
+               uint32_t param_id = *(in_payload_p + 1);
+               uint32_t payload_size = *(in_payload_p + 2);
+
+               cmd_size = sizeof(struct anc_get_algo_module_cali_data_command)
+               + payload_size;
+               config_p = kzalloc(cmd_size, GFP_KERNEL);
+                       if (!config_p) {
+                       ret = -ENOMEM;
+                       return ret;
+               }
 
-               memset(&config, 0, sizeof(config));
-               config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+               memset(config_p, 0, cmd_size);
+               out_payload_p = config_p +
+               sizeof(struct anc_set_algo_module_cali_data_command);
+
+               cali_data_cfg_p =
+               (struct anc_get_algo_module_cali_data_command *)config_p;
+
+               cali_data_cfg_p->hdr.hdr_field =
+               APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
                                APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
-               config.hdr.pkt_size = sizeof(config);
-               config.hdr.src_port = 0;
-               config.hdr.dest_port = 0;
-               config.hdr.token = index;
-               config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
-               config.param.port_id = q6audio_get_port_id(port_id);
-               config.param.payload_size = sizeof(config) -
+               cali_data_cfg_p->hdr.pkt_size = cmd_size;
+               cali_data_cfg_p->hdr.src_port = 0;
+               cali_data_cfg_p->hdr.dest_port = 0;
+               cali_data_cfg_p->hdr.token = index;
+               cali_data_cfg_p->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
+               cali_data_cfg_p->param.port_id = q6audio_get_port_id(port_id);
+               cali_data_cfg_p->param.payload_size = cmd_size -
                        sizeof(struct apr_hdr) -
-                       sizeof(config.param);
-               config.param.payload_address_lsw = 0x00;
-               config.param.payload_address_msw = 0x00;
-               config.param.mem_map_handle = 0x00;
-               config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
-               config.pdata.param_id =
-               AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO;
-               config.pdata.param_size = sizeof(config.set_mic_spkr_layout);
-
-               memcpy(&config.set_mic_spkr_layout, set_mic_spkr_layout_p,
-               sizeof(config.set_mic_spkr_layout));
-               ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
-               if (ret) {
-                       pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
+                       sizeof(struct aud_msvc_port_cmd_get_param_v2);
+               cali_data_cfg_p->param.payload_address_lsw = 0x00;
+               cali_data_cfg_p->param.payload_address_msw = 0x00;
+               cali_data_cfg_p->param.mem_map_handle = 0x00;
+               cali_data_cfg_p->param.module_id = module_id;
+               cali_data_cfg_p->param.param_id = param_id;
+               cali_data_cfg_p->pdata.param_size = 0;
+               cali_data_cfg_p->pdata.module_id = 0;
+               cali_data_cfg_p->pdata.param_id = 0;
+
+               ret = anc_if_apr_send_pkt(cali_data_cfg_p,
+               &this_anc_if.wait[index]);
+               if (ret)
+                       pr_err("%s: anc algo module calibration data for port 0x%x failed ret = %d\n",
                                        __func__, port_id, ret);
-               }
+
+               memcpy((in_payload_p + 3),
+               &this_anc_if.cali_data_resp.payload[0], payload_size);
+
+               *in_payload_p = this_anc_if.cali_data_resp.pdata.module_id;
+               *(in_payload_p + 1) =
+               this_anc_if.cali_data_resp.pdata.param_id;
+               *(in_payload_p + 2) =
+               this_anc_if.cali_data_resp.pdata.param_size;
+
+               kfree(config_p);
        }
 
        return ret;
@@ -700,7 +744,6 @@ int anc_if_cmd_memory_map(int port_id, phys_addr_t dma_addr_p,
        mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
        if (!mmap_region_cmd) {
                ret = -ENOMEM;
-               pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
                return ret;
        }
 
diff --git a/drivers/soc/qcom/smp2p_spinlock_test.c b/drivers/soc/qcom/smp2p_spinlock_test.c
deleted file mode 100644 (file)
index 1fe4411..0000000
+++ /dev/null
@@ -1,819 +0,0 @@
-/* drivers/soc/qcom/smp2p_spinlock_test.c
- *
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/remote_spinlock.h>
-#include <soc/qcom/smem.h>
-#include "smem_private.h"
-#include "smp2p_private.h"
-#include "smp2p_test_common.h"
-
-#define RS_END_THIEF_PID_BIT 20
-#define RS_END_THIEF_MASK 0x00f00000
-
-/* Spinlock commands used for testing Apps<->RPM spinlocks. */
-enum RPM_SPINLOCK_CMDS {
-       RPM_CMD_INVALID,
-       RPM_CMD_START,
-       RPM_CMD_LOCKED,
-       RPM_CMD_UNLOCKED,
-       RPM_CMD_END,
-};
-
-/* Shared structure for testing Apps<->RPM spinlocks. */
-struct rpm_spinlock_test {
-       uint32_t apps_cmd;
-       uint32_t apps_lock_count;
-       uint32_t rpm_cmd;
-       uint32_t rpm_lock_count;
-};
-
-static uint32_t ut_remote_spinlock_run_time = 1;
-
-/**
- * smp2p_ut_remote_spinlock_core - Verify remote spinlock.
- *
- * @s:           Pointer to output file
- * @remote_pid:  Remote processor to test
- * @use_trylock: Use trylock to prevent an Apps deadlock if the
- *               remote spinlock fails.
- */
-static void smp2p_ut_remote_spinlock_core(struct seq_file *s, int remote_pid,
-               bool use_trylock)
-{
-       int failed = 0;
-       unsigned lock_count = 0;
-       struct msm_smp2p_out *handle = NULL;
-       int ret;
-       uint32_t test_request;
-       uint32_t test_response;
-       struct mock_cb_data cb_out;
-       struct mock_cb_data cb_in;
-       unsigned long flags;
-       unsigned n;
-       bool have_lock;
-       bool timeout;
-       int failed_tmp;
-       int spinlock_owner;
-       remote_spinlock_t *smem_spinlock;
-       unsigned long end;
-
-       seq_printf(s, "Running %s for '%s' remote pid %d\n",
-                  __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-       cb_out.initialized = false;
-       cb_in.initialized = false;
-       mock_cb_data_init(&cb_out);
-       mock_cb_data_init(&cb_in);
-       do {
-               smem_spinlock = smem_get_remote_spinlock();
-               UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-               /* Open output entry */
-               ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-                       &cb_out.nb, &handle);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_out.cb_completion, HZ * 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_out.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
-               /* Open inbound entry */
-               ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-                               &cb_in.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ * 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
-               /* Send start */
-               mock_cb_data_reset(&cb_in);
-               mock_cb_data_reset(&cb_out);
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
-               SMP2P_SET_RMT_DATA(test_request, 0x0);
-               ret = msm_smp2p_out_write(handle, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ * 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-               ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-                               &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               test_response = SMP2P_GET_RMT_CMD(test_response);
-               if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
-                               test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
-                       /* invalid response from remote - abort test */
-                       test_request = 0x0;
-                       SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-                       SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-                       SMP2P_SET_RMT_DATA(test_request, 0x0);
-                       ret = msm_smp2p_out_write(handle, test_request);
-                       UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
-                                       test_response);
-               }
-
-               /* Run spinlock test */
-               if (use_trylock)
-                       seq_puts(s, "\tUsing remote_spin_trylock\n");
-               else
-                       seq_puts(s, "\tUsing remote_spin_lock\n");
-
-               flags = 0;
-               have_lock = false;
-               timeout = false;
-               spinlock_owner = 0;
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-               end = jiffies + (ut_remote_spinlock_run_time * HZ);
-               if (ut_remote_spinlock_run_time < 300) {
-                               seq_printf(s, "\tRunning test for %u seconds; ",
-                                       ut_remote_spinlock_run_time);
-                               seq_puts(s,
-                                       "on physical hardware please run >= 300 seconds by doing 'echo 300 >  ut_remote_spinlock_time'\n");
-               }
-               while (time_is_after_jiffies(end)) {
-                       /* try to acquire spinlock */
-                       if (use_trylock) {
-                               unsigned long j_start = jiffies;
-                               while (!remote_spin_trylock_irqsave(
-                                               smem_spinlock, flags)) {
-                                       if (jiffies_to_msecs(jiffies - j_start)
-                                                       > 1000) {
-                                               seq_puts(s,
-                                                       "\tFail: Timeout trying to get the lock\n");
-                                               timeout = true;
-                                               break;
-                                       }
-                               }
-                               if (timeout)
-                                       break;
-                       } else {
-                               remote_spin_lock_irqsave(smem_spinlock, flags);
-                       }
-                       have_lock = true;
-                       ++lock_count;
-
-                       /* tell the remote side that we have the lock */
-                       SMP2P_SET_RMT_DATA(test_request, lock_count);
-                       SMP2P_SET_RMT_CMD(test_request,
-                                       SMP2P_LB_CMD_RSPIN_LOCKED);
-                       ret = msm_smp2p_out_write(handle, test_request);
-                       UT_ASSERT_INT(ret, ==, 0);
-
-                       /* verify the other side doesn't say it has the lock */
-                       for (n = 0; n < 1000; ++n) {
-                               spinlock_owner =
-                                       remote_spin_owner(smem_spinlock);
-                               if (spinlock_owner != SMEM_APPS) {
-                                       /* lock stolen by remote side */
-                                       seq_puts(s, "\tFail: Remote side: ");
-                                       seq_printf(s, "%d stole lock pid: %d\n",
-                                               remote_pid, spinlock_owner);
-                                       failed = true;
-                                       break;
-                               }
-                               spinlock_owner = 0;
-
-                               ret = msm_smp2p_in_read(remote_pid,
-                                       SMP2P_RLPB_ENTRY_NAME, &test_response);
-                               UT_ASSERT_INT(ret, ==, 0);
-                               test_response =
-                                       SMP2P_GET_RMT_CMD(test_response);
-                               UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_UNLOCKED, ==,
-                                       test_response);
-                       }
-                       if (failed)
-                               break;
-
-                       /* tell remote side we are unlocked and release lock */
-                       SMP2P_SET_RMT_CMD(test_request,
-                                       SMP2P_LB_CMD_RSPIN_UNLOCKED);
-                       (void)msm_smp2p_out_write(handle, test_request);
-                       have_lock = false;
-                       remote_spin_unlock_irqrestore(smem_spinlock, flags);
-               }
-               if (have_lock)
-                       remote_spin_unlock_irqrestore(smem_spinlock, flags);
-
-               /* End test */
-               mock_cb_data_reset(&cb_in);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-               SMP2P_SET_RMT_DATA(test_request, lock_count |
-                               (spinlock_owner << RS_END_THIEF_PID_BIT));
-               (void)msm_smp2p_out_write(handle, test_request);
-
-               failed_tmp = failed;
-               failed = false;
-               do {
-                       UT_ASSERT_INT(
-                               (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ * 2),
-                               >, 0);
-                       reinit_completion(&cb_in.cb_completion);
-                       ret = msm_smp2p_in_read(remote_pid,
-                                       SMP2P_RLPB_ENTRY_NAME, &test_response);
-                       UT_ASSERT_INT(ret, ==, 0);
-               } while (!failed &&
-                       SMP2P_GET_RMT_CMD(test_response) !=
-                       SMP2P_LB_CMD_RSPIN_END);
-               if (failed)
-                       break;
-               failed = failed_tmp;
-
-               test_response = SMP2P_GET_RMT_DATA(test_response);
-               seq_puts(s, "\tLocked spinlock ");
-               seq_printf(s, "local %u times; remote %u times",
-                       lock_count,
-                       test_response & ((1 << RS_END_THIEF_PID_BIT) - 1)
-                       );
-               if (test_response & RS_END_THIEF_MASK) {
-                       seq_puts(s, "Remote side reporting lock stolen by ");
-                       seq_printf(s, "pid %d.\n",
-                               SMP2P_GET_BITS(test_response,
-                                       RS_END_THIEF_MASK,
-                                       RS_END_THIEF_PID_BIT));
-                       failed = 1;
-               }
-               seq_puts(s, "\n");
-
-               /* Cleanup */
-               ret = msm_smp2p_out_close(&handle);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_PTR(handle, ==, NULL);
-               ret = msm_smp2p_in_unregister(remote_pid,
-                               SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               if (!failed && !timeout)
-                       seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               if (handle) {
-                       /* send end command */
-                       test_request = 0;
-                       SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-                       SMP2P_SET_RMT_DATA(test_request, lock_count);
-                       (void)msm_smp2p_out_write(handle, test_request);
-                       (void)msm_smp2p_out_close(&handle);
-               }
-               (void)msm_smp2p_in_unregister(remote_pid,
-                               SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-/**
- * smp2p_ut_remote_spinlock_pid - Verify remote spinlock for a processor.
- *
- * @s:           Pointer to output file
- * @pid:         Processor to test
- * @use_trylock: Use trylock to prevent an Apps deadlock if the
- *               remote spinlock fails.
- */
-static void smp2p_ut_remote_spinlock_pid(struct seq_file *s, int pid,
-               bool use_trylock)
-{
-       struct smp2p_interrupt_config *int_cfg;
-
-       int_cfg = smp2p_get_interrupt_config();
-       if (!int_cfg) {
-               seq_puts(s, "Remote processor config unavailable\n");
-               return;
-       }
-
-       if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
-               return;
-
-       msm_smp2p_deinit_rmt_lpb_proc(pid);
-       smp2p_ut_remote_spinlock_core(s, pid, use_trylock);
-       msm_smp2p_init_rmt_lpb_proc(pid);
-}
-
-/**
- * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_remote_spinlock(struct seq_file *s)
-{
-       int pid;
-
-       for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
-               smp2p_ut_remote_spinlock_pid(s, pid, false);
-}
-
-/**
- * smp2p_ut_remote_spin_trylock - Verify remote trylock for all processors.
- *
- * @s:   Pointer to output file
- */
-static void smp2p_ut_remote_spin_trylock(struct seq_file *s)
-{
-       int pid;
-
-       for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
-               smp2p_ut_remote_spinlock_pid(s, pid, true);
-}
-
-/**
- * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
- *
- * @s:   pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_spinlock_modem(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_pid(s, SMP2P_MODEM_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_adsp(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_pid(s, SMP2P_AUDIO_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_dsps(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_pid(s, SMP2P_SENSOR_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_wcnss(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_pid(s, SMP2P_WIRELESS_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_cdsp(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_pid(s, SMP2P_CDSP_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_tz(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_pid(s, SMP2P_TZ_PROC, false);
-}
-
-/**
- * smp2p_ut_remote_spinlock_rpm - Verify remote spinlock.
- *
- * @s:   pointer to output file
- * @remote_pid:  Remote processor to test
- */
-static void smp2p_ut_remote_spinlock_rpm(struct seq_file *s)
-{
-       int failed = 0;
-       unsigned long flags;
-       unsigned n;
-       unsigned test_num;
-       struct rpm_spinlock_test *data_ptr;
-       remote_spinlock_t *smem_spinlock;
-       bool have_lock;
-
-       seq_printf(s, "Running %s for Apps<->RPM Test\n",
-                  __func__);
-       do {
-               smem_spinlock = smem_get_remote_spinlock();
-               UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-               data_ptr = smem_alloc(SMEM_ID_VENDOR0,
-                               sizeof(struct rpm_spinlock_test), 0,
-                               SMEM_ANY_HOST_FLAG);
-               UT_ASSERT_PTR(0, !=, data_ptr);
-
-               /* Send start */
-               writel_relaxed(0, &data_ptr->apps_lock_count);
-               writel_relaxed(RPM_CMD_START, &data_ptr->apps_cmd);
-
-               seq_puts(s, "\tWaiting for RPM to start test\n");
-               for (n = 0; n < 1000; ++n) {
-                       if (readl_relaxed(&data_ptr->rpm_cmd) !=
-                                       RPM_CMD_INVALID)
-                               break;
-                       usleep_range(1000, 1200);
-               }
-               if (readl_relaxed(&data_ptr->rpm_cmd) == RPM_CMD_INVALID) {
-                       /* timeout waiting for RPM */
-                       writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
-                       UT_ASSERT_INT(RPM_CMD_LOCKED, !=, RPM_CMD_INVALID);
-               }
-
-               /* Run spinlock test */
-               flags = 0;
-               have_lock = false;
-               for (test_num = 0; !failed && test_num < 10000; ++test_num) {
-                       /* acquire spinlock */
-                       remote_spin_lock_irqsave(smem_spinlock, flags);
-                       have_lock = true;
-                       data_ptr->apps_lock_count++;
-                       writel_relaxed(data_ptr->apps_lock_count,
-                               &data_ptr->apps_lock_count);
-                       writel_relaxed(RPM_CMD_LOCKED, &data_ptr->apps_cmd);
-                       /*
-                        * Ensure that the remote side sees our lock has
-                        * been acquired before we start polling their status.
-                        */
-                       wmb();
-
-                       /* verify the other side doesn't say it has the lock */
-                       for (n = 0; n < 1000; ++n) {
-                               UT_ASSERT_HEX(RPM_CMD_UNLOCKED, ==,
-                                       readl_relaxed(&data_ptr->rpm_cmd));
-                       }
-                       if (failed)
-                               break;
-
-                       /* release spinlock */
-                       have_lock = false;
-                       writel_relaxed(RPM_CMD_UNLOCKED, &data_ptr->apps_cmd);
-                       /*
-                        * Ensure that our status-update write was committed
-                        * before we unlock the spinlock.
-                        */
-                       wmb();
-                       remote_spin_unlock_irqrestore(smem_spinlock, flags);
-               }
-               if (have_lock)
-                       remote_spin_unlock_irqrestore(smem_spinlock, flags);
-
-               /* End test */
-               writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
-               seq_printf(s, "\tLocked spinlock local %u remote %u\n",
-                               readl_relaxed(&data_ptr->apps_lock_count),
-                               readl_relaxed(&data_ptr->rpm_lock_count));
-
-               if (!failed)
-                       seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-struct rmt_spinlock_work_item {
-       struct work_struct work;
-       struct completion try_lock;
-       struct completion locked;
-       bool has_locked;
-};
-
-static void ut_remote_spinlock_ssr_worker(struct work_struct *work)
-{
-       remote_spinlock_t *smem_spinlock;
-       unsigned long flags;
-       struct rmt_spinlock_work_item *work_item =
-               container_of(work, struct rmt_spinlock_work_item, work);
-
-       work_item->has_locked = false;
-       complete(&work_item->try_lock);
-       smem_spinlock = smem_get_remote_spinlock();
-       if (!smem_spinlock) {
-               pr_err("%s Failed\n", __func__);
-               return;
-       }
-
-       remote_spin_lock_irqsave(smem_spinlock, flags);
-       remote_spin_unlock_irqrestore(smem_spinlock, flags);
-       work_item->has_locked = true;
-       complete(&work_item->locked);
-}
-
-/**
- * smp2p_ut_remote_spinlock_ssr - Verify remote spinlock.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_remote_spinlock_ssr(struct seq_file *s)
-{
-       int failed = 0;
-       unsigned long flags;
-       remote_spinlock_t *smem_spinlock;
-       int spinlock_owner = 0;
-
-       struct workqueue_struct *ws = NULL;
-       struct rmt_spinlock_work_item work_item = { .has_locked = false };
-
-       seq_printf(s, " Running %s Test\n",
-                  __func__);
-       do {
-               smem_spinlock = smem_get_remote_spinlock();
-               UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-               ws = create_singlethread_workqueue("ut_remote_spinlock_ssr");
-               UT_ASSERT_PTR(ws, !=, NULL);
-               INIT_WORK(&work_item.work, ut_remote_spinlock_ssr_worker);
-               init_completion(&work_item.try_lock);
-               init_completion(&work_item.locked);
-
-               remote_spin_lock_irqsave(smem_spinlock, flags);
-               /* Unlock local spin lock and hold HW spinlock */
-               spin_unlock_irqrestore(&((smem_spinlock)->local), flags);
-
-               queue_work(ws, &work_item.work);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &work_item.try_lock, HZ * 2), >, 0);
-               UT_ASSERT_INT((int)work_item.has_locked, ==, 0);
-               spinlock_owner = remote_spin_owner(smem_spinlock);
-               UT_ASSERT_INT(spinlock_owner, ==, SMEM_APPS);
-               remote_spin_release_all(SMEM_APPS);
-
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &work_item.locked, HZ * 2), >, 0);
-
-               if (!failed)
-                       seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-/**
- * smp2p_ut_remote_spinlock_track_core - Verify remote spinlock.
- *
- * @s:           Pointer to output file
- * @remote_pid:  Remote processor to test
- *
- * This test has the remote subsystem grab the lock, and then has the local
- * subsystem attempt to grab the lock using the trylock() API. It then verifies
- * that the ID in the hw_spinlocks array matches the owner of the lock.
- */
-static void smp2p_ut_remote_spinlock_track_core(struct seq_file *s,
-               int remote_pid)
-{
-       int failed = 0;
-       struct msm_smp2p_out *handle = NULL;
-       int ret;
-       uint32_t test_request;
-       uint32_t test_response;
-       struct mock_cb_data cb_out;
-       struct mock_cb_data cb_in;
-       unsigned long flags;
-       int stored_value;
-       remote_spinlock_t *smem_spinlock;
-
-       seq_printf(s, "Running %s for '%s' remote pid %d\n",
-                  __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-       cb_out.initialized = false;
-       cb_in.initialized = false;
-       mock_cb_data_init(&cb_out);
-       mock_cb_data_init(&cb_in);
-       do {
-               smem_spinlock = smem_get_remote_spinlock();
-               UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-               /* Open output entry */
-               ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-                       &cb_out.nb, &handle);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_out.cb_completion, HZ * 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_out.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
-               /* Open inbound entry */
-               ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-                               &cb_in.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ * 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
-               /* Send start */
-               mock_cb_data_reset(&cb_in);
-               mock_cb_data_reset(&cb_out);
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
-               SMP2P_SET_RMT_DATA(test_request, 0x0);
-               ret = msm_smp2p_out_write(handle, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ * 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-               ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-                               &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               test_response = SMP2P_GET_RMT_CMD(test_response);
-               if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
-                               test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
-                       /* invalid response from remote - abort test */
-                       test_request = 0x0;
-                       SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-                       SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-                       SMP2P_SET_RMT_DATA(test_request, 0x0);
-                       ret = msm_smp2p_out_write(handle, test_request);
-                       UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
-                                       test_response);
-               }
-
-               /* Run spinlock test */
-               flags = 0;
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-
-               /* try to acquire spinlock */
-               remote_spin_trylock_irqsave(smem_spinlock, flags);
-               /*
-                * Need to check against the locking token (PID + 1)
-                * because the remote_spin_owner() API only returns the
-                * PID.
-                */
-               stored_value = remote_spin_get_hw_spinlocks_element(
-                               smem_spinlock);
-               UT_ASSERT_INT(stored_value, ==,
-                       remote_spin_owner(smem_spinlock) + 1);
-               UT_ASSERT_INT(stored_value, ==, remote_pid + 1);
-
-               /* End test */
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-               SMP2P_SET_RMT_DATA(test_request, 0x0);
-               (void)msm_smp2p_out_write(handle, test_request);
-
-               /* Cleanup */
-               ret = msm_smp2p_out_close(&handle);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_PTR(handle, ==, NULL);
-               ret = msm_smp2p_in_unregister(remote_pid,
-                               SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               if (!failed)
-                       seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               if (handle) {
-                       /* send end command */
-                       test_request = 0x0;
-                       SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-                       SMP2P_SET_RMT_DATA(test_request, 0x0);
-                       (void)msm_smp2p_out_write(handle, test_request);
-                       (void)msm_smp2p_out_close(&handle);
-               }
-               (void)msm_smp2p_in_unregister(remote_pid,
-                               SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-/**
- * smp2p_ut_remote_spinlock_track - Verify PID tracking for modem.
- *
- * @s: Pointer to output file
- * @pid:               The processor to test
- */
-static void smp2p_ut_remote_spinlock_track(struct seq_file *s, int pid)
-{
-       struct smp2p_interrupt_config *int_cfg;
-
-       int_cfg = smp2p_get_interrupt_config();
-       if (!int_cfg) {
-               seq_puts(s, "Remote processor config unavailable\n");
-               return;
-       }
-
-       if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
-               return;
-
-       msm_smp2p_deinit_rmt_lpb_proc(pid);
-       smp2p_ut_remote_spinlock_track_core(s, pid);
-       msm_smp2p_init_rmt_lpb_proc(pid);
-}
-
-/**
- * smp2p_ut_remote_spinlock_track - Verify PID tracking for all processors.
- *
- * @s: Pointer to output file
- *
- * This test verifies PID tracking for all configured remote processors.
- */
-static void smp2p_ut_remote_spinlock_track_modem(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_track(s, SMP2P_MODEM_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_adsp(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_track(s, SMP2P_AUDIO_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_dsps(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_track(s, SMP2P_SENSOR_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_wcnss(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_track(s, SMP2P_WIRELESS_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_cdsp(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_track(s, SMP2P_CDSP_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_tz(struct seq_file *s)
-{
-       smp2p_ut_remote_spinlock_track(s, SMP2P_TZ_PROC);
-}
-
-static int __init smp2p_debugfs_init(void)
-{
-       /*
-        * Add Unit Test entries.
-        *
-        * The idea with unit tests is that you can run all of them
-        * from ADB shell by doing:
-        *  adb shell
-        *  cat ut*
-        *
-        * And if particular tests fail, you can then repeatedly run the
-        * failing tests as you debug and resolve the failing test.
-        */
-       smp2p_debug_create("ut_remote_spinlock",
-               smp2p_ut_remote_spinlock);
-       smp2p_debug_create("ut_remote_spin_trylock",
-               smp2p_ut_remote_spin_trylock);
-       smp2p_debug_create("ut_remote_spinlock_modem",
-               smp2p_ut_remote_spinlock_modem);
-       smp2p_debug_create("ut_remote_spinlock_adsp",
-               smp2p_ut_remote_spinlock_adsp);
-       smp2p_debug_create("ut_remote_spinlock_dsps",
-               smp2p_ut_remote_spinlock_dsps);
-       smp2p_debug_create("ut_remote_spinlock_wcnss",
-               smp2p_ut_remote_spinlock_wcnss);
-       smp2p_debug_create("ut_remote_spinlock_cdsp",
-               smp2p_ut_remote_spinlock_cdsp);
-       smp2p_debug_create("ut_remote_spinlock_tz",
-               smp2p_ut_remote_spinlock_tz);
-       smp2p_debug_create("ut_remote_spinlock_rpm",
-               smp2p_ut_remote_spinlock_rpm);
-       smp2p_debug_create_u32("ut_remote_spinlock_time",
-               &ut_remote_spinlock_run_time);
-       smp2p_debug_create("ut_remote_spinlock_ssr",
-               &smp2p_ut_remote_spinlock_ssr);
-       smp2p_debug_create("ut_remote_spinlock_track_modem",
-               &smp2p_ut_remote_spinlock_track_modem);
-       smp2p_debug_create("ut_remote_spinlock_track_adsp",
-               &smp2p_ut_remote_spinlock_track_adsp);
-       smp2p_debug_create("ut_remote_spinlock_track_dsps",
-               &smp2p_ut_remote_spinlock_track_dsps);
-       smp2p_debug_create("ut_remote_spinlock_track_wcnss",
-               &smp2p_ut_remote_spinlock_track_wcnss);
-       smp2p_debug_create("ut_remote_spinlock_track_cdsp",
-               &smp2p_ut_remote_spinlock_track_cdsp);
-       smp2p_debug_create("ut_remote_spinlock_track_tz",
-               &smp2p_ut_remote_spinlock_track_tz);
-       return 0;
-}
-module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test.c b/drivers/soc/qcom/smp2p_test.c
deleted file mode 100644 (file)
index e81bada..0000000
+++ /dev/null
@@ -1,1327 +0,0 @@
-/* drivers/soc/qcom/smp2p_test.c
- *
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <soc/qcom/subsystem_restart.h>
-#include "smp2p_private.h"
-#include "smp2p_test_common.h"
-
-/**
- * smp2p_ut_local_basic - Basic sanity test using local loopback.
- *
- * @s: pointer to output file
- *
- * This test simulates a simple write and read
- * when remote processor does not exist.
- */
-static void smp2p_ut_local_basic(struct seq_file *s)
-{
-       int failed = 0;
-       struct msm_smp2p_out *smp2p_obj;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       int ret;
-       uint32_t test_request;
-       uint32_t test_response = 0;
-       static struct mock_cb_data cb_data;
-
-       seq_printf(s, "Running %s\n", __func__);
-       mock_cb_data_init(&cb_data);
-       do {
-               /* initialize mock edge and start opening */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-
-               msm_smp2p_set_remote_mock_exists(false);
-
-               ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
-                       &cb_data.nb, &smp2p_obj);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-               UT_ASSERT_INT(cb_data.cb_count, ==, 0);
-               rmp->rx_interrupt_count = 0;
-
-               /* simulate response from remote side */
-               rmp->remote_item.header.magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                       SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                       SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(
-               rmp->remote_item.header.feature_version, 1);
-               SMP2P_SET_FEATURES(
-               rmp->remote_item.header.feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(
-               rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(
-               rmp->remote_item.header.valid_total_ent, 0);
-               rmp->remote_item.header.flags = 0x0;
-               msm_smp2p_set_remote_mock_exists(true);
-               rmp->tx_interrupt();
-
-               /* verify port was opened */
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_data.cb_completion, HZ / 2), >, 0);
-               UT_ASSERT_INT(cb_data.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_data.event_open, ==, 1);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-               /* do write (test outbound entries) */
-               rmp->rx_interrupt_count = 0;
-               test_request = 0xC0DE;
-               ret = msm_smp2p_out_write(smp2p_obj, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               /* do read (test inbound entries) */
-               ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(test_request, ==, test_response);
-
-               ret = msm_smp2p_out_close(&smp2p_obj);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-               (void)msm_smp2p_out_close(&smp2p_obj);
-       }
-}
-
-/**
- * smp2p_ut_local_late_open - Verify post-negotiation opening.
- *
- * @s: pointer to output file
- *
- * Verify entry creation for opening entries after negotiation is complete.
- */
-static void smp2p_ut_local_late_open(struct seq_file *s)
-{
-       int failed = 0;
-       struct msm_smp2p_out *smp2p_obj;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       int ret;
-       uint32_t test_request;
-       uint32_t test_response = 0;
-       static struct mock_cb_data cb_data;
-
-       seq_printf(s, "Running %s\n", __func__);
-       mock_cb_data_init(&cb_data);
-       do {
-               /* initialize mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               rmp->remote_item.header.magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(
-                       rmp->remote_item.header.feature_version, 1);
-               SMP2P_SET_FEATURES(
-                       rmp->remote_item.header.feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(
-                       rmp->remote_item.header.valid_total_ent,
-                       SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(
-               rmp->remote_item.header.valid_total_ent, 0);
-               rmp->remote_item.header.flags = 0x0;
-
-               msm_smp2p_set_remote_mock_exists(true);
-
-               ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
-                       &cb_data.nb, &smp2p_obj);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               /* verify port was opened */
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_data.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_data.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_data.event_open, ==, 1);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-               /* do write (test outbound entries) */
-               rmp->rx_interrupt_count = 0;
-               test_request = 0xC0DE;
-               ret = msm_smp2p_out_write(smp2p_obj, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               /* do read (test inbound entries) */
-               ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(test_request, ==, test_response);
-
-               ret = msm_smp2p_out_close(&smp2p_obj);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-               (void)msm_smp2p_out_close(&smp2p_obj);
-       }
-}
-
-/**
- * smp2p_ut_local_early_open - Verify pre-negotiation opening.
- *
- * @s: pointer to output file
- *
- * Verify entry creation for opening entries before negotiation is complete.
- */
-static void smp2p_ut_local_early_open(struct seq_file *s)
-{
-       int failed = 0;
-       struct msm_smp2p_out *smp2p_obj;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       struct smp2p_smem *outbound_item;
-       int negotiation_state;
-       int ret;
-       uint32_t test_request;
-       uint32_t test_response = 0;
-       static struct mock_cb_data cb_data;
-
-       seq_printf(s, "Running %s\n", __func__);
-       mock_cb_data_init(&cb_data);
-       do {
-               /* initialize mock edge, but don't enable, yet */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               rmp->remote_item.header.magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(
-               rmp->remote_item.header.feature_version, 1);
-               SMP2P_SET_FEATURES(
-               rmp->remote_item.header.feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(
-               rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(
-               rmp->remote_item.header.valid_total_ent, 0);
-               rmp->remote_item.header.flags = 0x0;
-
-               msm_smp2p_set_remote_mock_exists(false);
-               UT_ASSERT_PTR(NULL, ==,
-                               smp2p_get_in_item(SMP2P_REMOTE_MOCK_PROC));
-
-               /* initiate open, but verify it doesn't complete */
-               ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
-                       &cb_data.nb, &smp2p_obj);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_data.cb_completion, HZ / 8),
-                       ==, 0);
-               UT_ASSERT_INT(cb_data.cb_count, ==, 0);
-               UT_ASSERT_INT(cb_data.event_open, ==, 0);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               outbound_item = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
-                               &negotiation_state);
-               UT_ASSERT_PTR(outbound_item, !=, NULL);
-               UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENING);
-               UT_ASSERT_INT(0, ==,
-                       SMP2P_GET_ENT_VALID(outbound_item->valid_total_ent));
-
-               /* verify that read/write don't work yet */
-               rmp->rx_interrupt_count = 0;
-               test_request = 0x0;
-               ret = msm_smp2p_out_write(smp2p_obj, test_request);
-               UT_ASSERT_INT(ret, ==, -ENODEV);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
-
-               ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-               UT_ASSERT_INT(ret, ==, -ENODEV);
-
-               /* allocate remote entry and verify open */
-               msm_smp2p_set_remote_mock_exists(true);
-               rmp->tx_interrupt();
-
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_data.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_data.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_data.event_open, ==, 1);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-               /* do write (test outbound entries) */
-               rmp->rx_interrupt_count = 0;
-               test_request = 0xC0DE;
-               ret = msm_smp2p_out_write(smp2p_obj, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               /* do read (test inbound entries) */
-               ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(test_request, ==, test_response);
-
-               ret = msm_smp2p_out_close(&smp2p_obj);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-               (void)msm_smp2p_out_close(&smp2p_obj);
-       }
-}
-
-/**
- * smp2p_ut_mock_loopback - Exercise the remote loopback using remote mock.
- *
- * @s: pointer to output file
- *
- * This test exercises the remote loopback code using
- * remote mock object. The remote mock object simulates the remote
- * processor sending remote loopback commands to the local processor.
- */
-static void smp2p_ut_mock_loopback(struct seq_file *s)
-{
-       int failed = 0;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       int ret;
-       uint32_t test_request = 0;
-       uint32_t test_response = 0;
-       struct msm_smp2p_out  *local;
-
-       seq_printf(s, "Running %s\n", __func__);
-       do {
-               /* Initialize the mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-
-               memset(&rmp->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               rmp->remote_item.header.magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(
-               rmp->remote_item.header.feature_version, 1);
-               SMP2P_SET_FEATURES(
-               rmp->remote_item.header.feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(
-               rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(
-               rmp->remote_item.header.valid_total_ent, 1);
-               rmp->remote_item.header.flags = 0x0;
-               msm_smp2p_set_remote_mock_exists(true);
-
-               /* Create test entry and attach loopback server */
-               rmp->rx_interrupt_count = 0;
-               reinit_completion(&rmp->cb_completion);
-               strlcpy(rmp->remote_item.entries[0].name, "smp2p",
-                                                       SMP2P_MAX_ENTRY_NAME);
-               rmp->remote_item.entries[0].entry = 0;
-               rmp->tx_interrupt();
-
-               local = msm_smp2p_init_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &rmp->cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-               /* Send Echo Command */
-               rmp->rx_interrupt_count = 0;
-               reinit_completion(&rmp->cb_completion);
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
-               SMP2P_SET_RMT_DATA(test_request, 10);
-               rmp->remote_item.entries[0].entry = test_request;
-               rmp->tx_interrupt();
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &rmp->cb_completion, HZ / 2),
-                       >, 0);
-
-               /* Verify Echo Response */
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-               ret = msm_smp2p_out_read(local,
-                                                       &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               test_response = SMP2P_GET_RMT_DATA(test_response);
-               UT_ASSERT_INT(test_response, ==, 10);
-
-               /* Send PINGPONG command */
-               test_request = 0;
-               test_response = 0;
-               rmp->rx_interrupt_count = 0;
-               reinit_completion(&rmp->cb_completion);
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
-               SMP2P_SET_RMT_DATA(test_request, 10);
-               rmp->remote_item.entries[0].entry = test_request;
-               rmp->tx_interrupt();
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &rmp->cb_completion, HZ / 2),
-                       >, 0);
-
-               /* Verify PINGPONG Response */
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-               ret = msm_smp2p_out_read(local, &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               test_response = SMP2P_GET_RMT_DATA(test_response);
-               UT_ASSERT_INT(test_response, ==, 9);
-
-               /* Send CLEARALL command */
-               test_request = 0;
-               test_response = 0;
-               rmp->rx_interrupt_count = 0;
-               reinit_completion(&rmp->cb_completion);
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
-               SMP2P_SET_RMT_DATA(test_request, 10);
-               rmp->remote_item.entries[0].entry = test_request;
-               rmp->tx_interrupt();
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &rmp->cb_completion, HZ / 2),
-                       >, 0);
-
-               /* Verify CLEARALL response */
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-               ret = msm_smp2p_out_read(local, &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               test_response = SMP2P_GET_RMT_DATA(test_response);
-               UT_ASSERT_INT(test_response, ==, 0);
-
-               ret = msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
-               UT_ASSERT_INT(ret, ==, 0);
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-               msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
-       }
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s: pointer to output file
- * @remote_pid:  Remote processor to test
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid)
-{
-       int failed = 0;
-       struct msm_smp2p_out *handle;
-       int ret;
-       uint32_t test_request;
-       uint32_t test_response = 0;
-       static struct mock_cb_data cb_out;
-       static struct mock_cb_data cb_in;
-
-       seq_printf(s, "Running %s for '%s' remote pid %d\n",
-                  __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-       mock_cb_data_init(&cb_out);
-       mock_cb_data_init(&cb_in);
-       do {
-               /* Open output entry */
-               ret = msm_smp2p_out_open(remote_pid, "smp2p",
-                       &cb_out.nb, &handle);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_out.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_out.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
-               /* Open inbound entry */
-               ret = msm_smp2p_in_register(remote_pid, "smp2p",
-                               &cb_in.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
-               /* Write an echo request */
-               mock_cb_data_reset(&cb_out);
-               mock_cb_data_reset(&cb_in);
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
-               SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-               ret = msm_smp2p_out_write(handle, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               /* Verify inbound reply */
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-               UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
-                           cb_in.entry_data.current_value), ==, 0xAA55);
-
-               ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
-               UT_ASSERT_INT(SMP2P_LB_CMD_ECHO, ==,
-                               SMP2P_GET_RMT_CMD(test_response));
-               UT_ASSERT_INT(0xAA55, ==, SMP2P_GET_RMT_DATA(test_response));
-
-               /* Write a clear all request */
-               mock_cb_data_reset(&cb_in);
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
-               SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-               ret = msm_smp2p_out_write(handle, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               /* Verify inbound reply */
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-               UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
-                           cb_in.entry_data.current_value), ==, 0x0000);
-
-               ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
-               UT_ASSERT_INT(0x0000, ==, SMP2P_GET_RMT_DATA(test_response));
-
-               /* Write a decrement request */
-               mock_cb_data_reset(&cb_in);
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
-               SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-               ret = msm_smp2p_out_write(handle, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               /* Verify inbound reply */
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ / 2),
-                       >, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-               UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
-                           cb_in.entry_data.current_value), ==, 0xAA54);
-
-               ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
-               UT_ASSERT_INT(SMP2P_LB_CMD_PINGPONG, ==,
-                               SMP2P_GET_RMT_CMD(test_response));
-               UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
-
-               /* Test the ignore flag */
-               mock_cb_data_reset(&cb_in);
-               test_request = 0x0;
-               SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-               SMP2P_SET_RMT_CMD(test_request, SMP2P_RLPB_IGNORE);
-               SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-               ret = msm_smp2p_out_write(handle, test_request);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               UT_ASSERT_INT(
-                       (int)wait_for_completion_timeout(
-                                       &cb_in.cb_completion, HZ / 2),
-                       ==, 0);
-               UT_ASSERT_INT(cb_in.cb_count, ==, 0);
-               UT_ASSERT_INT(cb_in.event_entry_update, ==, 0);
-               ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
-
-               /* Cleanup */
-               ret = msm_smp2p_out_close(&handle);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_PTR(handle, ==, 0);
-               ret = msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               if (handle)
-                       (void)msm_smp2p_out_close(&handle);
-               (void)msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
-
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s: pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
-       struct smp2p_interrupt_config *int_cfg;
-       int pid;
-
-       int_cfg = smp2p_get_interrupt_config();
-       if (!int_cfg) {
-               seq_puts(s, "Remote processor config unavailable\n");
-               return;
-       }
-
-       for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-               if (!int_cfg[pid].is_configured)
-                       continue;
-
-               msm_smp2p_deinit_rmt_lpb_proc(pid);
-               smp2p_ut_remote_inout_core(s, pid);
-               msm_smp2p_init_rmt_lpb_proc(pid);
-       }
-}
-
-/**
- * smp2p_ut_remote_out_max_entries_core - Verify open functionality.
- *
- * @s: pointer to output file
- * @remote_pid:  Remote processor for which the test is executed.
- *
- * This test verifies open functionality by creating maximum outbound entries.
- */
-static void smp2p_ut_remote_out_max_entries_core(struct seq_file *s,
-       int remote_pid)
-{
-       int j = 0;
-       int failed = 0;
-       struct msm_smp2p_out *handle[SMP2P_MAX_ENTRY];
-       int ret;
-       static struct mock_cb_data cb_out[SMP2P_MAX_ENTRY];
-       char entry_name[SMP2P_MAX_ENTRY_NAME];
-       int num_created;
-
-       seq_printf(s, "Running %s for '%s' remote pid %d\n",
-                  __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-       for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-               handle[j] = NULL;
-               mock_cb_data_init(&cb_out[j]);
-       }
-
-       do {
-               num_created = 0;
-               for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-                       /* Open as many output entries as possible */
-                       scnprintf((char *)entry_name, SMP2P_MAX_ENTRY_NAME,
-                               "smp2p%d", j);
-                       ret = msm_smp2p_out_open(remote_pid, entry_name,
-                               &cb_out[j].nb, &handle[j]);
-                       if (ret == -ENOMEM)
-                               /* hit max number */
-                               break;
-                       UT_ASSERT_INT(ret, ==, 0);
-                       ++num_created;
-               }
-               if (failed)
-                       break;
-
-               /* verify we created more than 1 entry */
-               UT_ASSERT_INT(num_created, <=, SMP2P_MAX_ENTRY);
-               UT_ASSERT_INT(num_created, >, 0);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-
-       /* cleanup */
-       for (j = 0; j < SMP2P_MAX_ENTRY; j++)
-               ret = msm_smp2p_out_close(&handle[j]);
-}
-
-/**
- * smp2p_ut_remote_out_max_entries - Verify open for all configured processors.
- *
- * @s: pointer to output file
- *
- * This test verifies creating max number of entries for
- * all configured remote processor.
- */
-static void smp2p_ut_remote_out_max_entries(struct seq_file *s)
-{
-       struct smp2p_interrupt_config *int_cfg;
-       int pid;
-
-       int_cfg = smp2p_get_interrupt_config();
-       if (!int_cfg) {
-               seq_puts(s, "Remote processor config unavailable\n");
-               return;
-       }
-
-       for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-               if (!int_cfg[pid].is_configured)
-                       continue;
-
-               smp2p_ut_remote_out_max_entries_core(s, pid);
-       }
-}
-
-/**
- * smp2p_ut_local_in_max_entries - Verify registering and unregistering.
- *
- * @s: pointer to output file
- *
- * This test verifies registering and unregistering for inbound entries using
- * the remote mock processor.
- */
-static void smp2p_ut_local_in_max_entries(struct seq_file *s)
-{
-       int j = 0;
-       int failed = 0;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       int ret;
-       static struct mock_cb_data cb_in[SMP2P_MAX_ENTRY];
-       static struct mock_cb_data cb_out;
-
-       seq_printf(s, "Running %s\n", __func__);
-
-       for (j = 0; j < SMP2P_MAX_ENTRY; j++)
-               mock_cb_data_init(&cb_in[j]);
-
-       mock_cb_data_init(&cb_out);
-
-       do {
-               /* Initialize mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               rmp->remote_item.header.magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(
-               rmp->remote_item.header.feature_version, 1);
-               SMP2P_SET_FEATURES(
-               rmp->remote_item.header.feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(
-               rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(
-               rmp->remote_item.header.valid_total_ent, 0);
-               rmp->remote_item.header.flags = 0x0;
-               msm_smp2p_set_remote_mock_exists(true);
-
-               /* Create Max Entries in the remote mock object */
-               for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-                       scnprintf(rmp->remote_item.entries[j].name,
-                               SMP2P_MAX_ENTRY_NAME, "smp2p%d", j);
-                       rmp->remote_item.entries[j].entry = 0;
-                       rmp->tx_interrupt();
-               }
-
-               /* Register for in entries */
-               for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-                       ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[j].name,
-                               &(cb_in[j].nb));
-                       UT_ASSERT_INT(ret, ==, 0);
-                       UT_ASSERT_INT(
-                               (int)wait_for_completion_timeout(
-                                       &(cb_in[j].cb_completion), HZ / 2),
-                               >, 0);
-                       UT_ASSERT_INT(cb_in[j].cb_count, ==, 1);
-                       UT_ASSERT_INT(cb_in[j].event_entry_update, ==, 0);
-               }
-               UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
-
-               /* Unregister */
-               for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-                       ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[j].name,
-                               &(cb_in[j].nb));
-                   UT_ASSERT_INT(ret, ==, 0);
-               }
-               UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-
-               for (j = 0; j < SMP2P_MAX_ENTRY; j++)
-                       ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[j].name,
-                               &(cb_in[j].nb));
-       }
-}
-
-/**
- * smp2p_ut_local_in_multiple - Verify Multiple Inbound Registration.
- *
- * @s: pointer to output file
- *
- * This test verifies multiple clients registering for same inbound entries
- * using the remote mock processor.
- */
-static void smp2p_ut_local_in_multiple(struct seq_file *s)
-{
-       int failed = 0;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       int ret;
-       static struct mock_cb_data cb_in_1;
-       static struct mock_cb_data cb_in_2;
-       static struct mock_cb_data cb_out;
-
-       seq_printf(s, "Running %s\n", __func__);
-
-       mock_cb_data_init(&cb_in_1);
-       mock_cb_data_init(&cb_in_2);
-       mock_cb_data_init(&cb_out);
-
-       do {
-               /* Initialize mock edge */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0,
-                       sizeof(struct smp2p_smem_item));
-               rmp->remote_item.header.magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(
-               rmp->remote_item.header.rem_loc_proc_id,
-                                               SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(
-               rmp->remote_item.header.feature_version, 1);
-               SMP2P_SET_FEATURES(
-               rmp->remote_item.header.feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(
-               rmp->remote_item.header.valid_total_ent, 1);
-               SMP2P_SET_ENT_VALID(
-               rmp->remote_item.header.valid_total_ent, 0);
-               rmp->remote_item.header.flags = 0x0;
-               msm_smp2p_set_remote_mock_exists(true);
-
-               /* Create an Entry in the remote mock object */
-               scnprintf(rmp->remote_item.entries[0].name,
-                               SMP2P_MAX_ENTRY_NAME, "smp2p%d", 1);
-               rmp->remote_item.entries[0].entry = 0;
-               rmp->tx_interrupt();
-
-               /* Register multiple clients for the inbound entry */
-               ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[0].name,
-                               &cb_in_1.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                               (int)wait_for_completion_timeout(
-                               &(cb_in_1.cb_completion), HZ / 2),
-                               >, 0);
-               UT_ASSERT_INT(cb_in_1.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in_1.event_entry_update, ==, 0);
-
-               ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[0].name,
-                               &cb_in_2.nb);
-               UT_ASSERT_INT(ret, ==, 0);
-               UT_ASSERT_INT(
-                               (int)wait_for_completion_timeout(
-                               &(cb_in_2.cb_completion), HZ / 2),
-                               >, 0);
-               UT_ASSERT_INT(cb_in_2.cb_count, ==, 1);
-               UT_ASSERT_INT(cb_in_2.event_entry_update, ==, 0);
-
-
-               /* Unregister the clients */
-               ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[0].name,
-                               &(cb_in_1.nb));
-               UT_ASSERT_INT(ret, ==, 0);
-
-               ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[0].name,
-                               &(cb_in_2.nb));
-               UT_ASSERT_INT(ret, ==, 0);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-
-               ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[0].name,
-                               &(cb_in_1.nb));
-
-               ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-                               rmp->remote_item.entries[0].name,
-                               &(cb_in_2.nb));
-       }
-}
-
-/**
- * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_ssr_ack(struct seq_file *s)
-{
-       int failed = 0;
-       struct msm_smp2p_remote_mock *rmp = NULL;
-       int ret;
-
-       seq_printf(s, "Running %s\n", __func__);
-       do {
-               struct smp2p_smem *rhdr;
-               struct smp2p_smem *lhdr;
-               int negotiation_state;
-
-               /* initialize v1 without SMP2P_FEATURE_SSR_ACK enabled */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-               rhdr = &rmp->remote_item.header;
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
-               rhdr->magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
-                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(rhdr->feature_version, 1);
-               SMP2P_SET_FEATURES(rhdr->feature_version, 0);
-               SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
-               rhdr->flags = 0x0;
-               msm_smp2p_set_remote_mock_exists(true);
-               rmp->tx_interrupt();
-
-               /* verify edge is open */
-               lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
-                                       &negotiation_state);
-               UT_ASSERT_PTR(NULL, !=, lhdr);
-               UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               /* verify no response to ack feature */
-               rmp->rx_interrupt_count = 0;
-               SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
-               rmp->tx_interrupt();
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
-
-               /* initialize v1 with SMP2P_FEATURE_SSR_ACK enabled */
-               ret = smp2p_reset_mock_edge();
-               UT_ASSERT_INT(ret, ==, 0);
-               rmp = msm_smp2p_get_remote_mock();
-               UT_ASSERT_PTR(rmp, !=, NULL);
-               rhdr = &rmp->remote_item.header;
-
-               rmp->rx_interrupt_count = 0;
-               memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
-               rhdr->magic = SMP2P_MAGIC;
-               SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
-                               SMP2P_REMOTE_MOCK_PROC);
-               SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
-               SMP2P_SET_VERSION(rhdr->feature_version, 1);
-               SMP2P_SET_FEATURES(rhdr->feature_version,
-                               SMP2P_FEATURE_SSR_ACK);
-               SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
-               SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
-               rmp->rx_interrupt_count = 0;
-               rhdr->flags = 0x0;
-               msm_smp2p_set_remote_mock_exists(true);
-               rmp->tx_interrupt();
-
-               /* verify edge is open */
-               lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
-                                       &negotiation_state);
-               UT_ASSERT_PTR(NULL, !=, lhdr);
-               UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               /* verify response to ack feature */
-               rmp->rx_interrupt_count = 0;
-               SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
-               rmp->tx_interrupt();
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
-               UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               rmp->rx_interrupt_count = 0;
-               SMP2P_SET_RESTART_DONE(rhdr->flags, 0);
-               rmp->tx_interrupt();
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
-               UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
-               UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-/**
- * get_ssr_name_for_proc - Retrieve an SSR name from the provided list
- *
- * @names:     List of possible processor names
- * @name_len:  The length of @names
- * @index:     Index into @names
- *
- * Return: Pointer to the next processor name, NULL in error conditions
- */
-static char *get_ssr_name_for_proc(char *names[], size_t name_len, int index)
-{
-       if (index >= name_len) {
-               pr_err("%s: SSR failed; check subsys name table\n",
-                               __func__);
-               return NULL;
-       }
-
-       return names[index];
-}
-
-/**
- * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- * @rpid: Remote processor ID
- * @int_cfg: Interrupt config
- */
-static void smp2p_ut_remotesubsys_ssr_ack(struct seq_file *s, uint32_t rpid,
-               struct smp2p_interrupt_config *int_cfg)
-{
-       int failed = 0;
-
-       seq_printf(s, "Running %s\n", __func__);
-       do {
-               struct smp2p_smem *rhdr;
-               struct smp2p_smem *lhdr;
-               int negotiation_state;
-               int name_index;
-               int ret;
-               uint32_t ssr_done_start;
-               bool ssr_ack_enabled = false;
-               bool ssr_success = false;
-               char *name = NULL;
-
-               static char *mpss_names[] = {"modem", "mpss"};
-               static char *lpass_names[] = {"adsp", "lpass"};
-               static char *sensor_names[] = {"slpi", "dsps"};
-               static char *wcnss_names[] = {"wcnss"};
-
-               lhdr = smp2p_get_out_item(rpid, &negotiation_state);
-               UT_ASSERT_PTR(NULL, !=, lhdr);
-               UT_ASSERT_INT(SMP2P_EDGE_STATE_OPENED, ==, negotiation_state);
-
-               rhdr = smp2p_get_in_item(rpid);
-               UT_ASSERT_PTR(NULL, !=, rhdr);
-
-               /* get initial state of SSR flags */
-               if (SMP2P_GET_FEATURES(rhdr->feature_version)
-                               & SMP2P_FEATURE_SSR_ACK)
-                       ssr_ack_enabled = true;
-               else
-                       ssr_ack_enabled = false;
-
-               ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr->flags);
-               UT_ASSERT_INT(ssr_done_start, ==,
-                               SMP2P_GET_RESTART_ACK(lhdr->flags));
-
-               /* trigger restart */
-               name_index = 0;
-               while (!ssr_success) {
-
-                       switch (rpid) {
-                       case SMP2P_MODEM_PROC:
-                               name = get_ssr_name_for_proc(mpss_names,
-                                               ARRAY_SIZE(mpss_names),
-                                               name_index);
-                               break;
-                       case SMP2P_AUDIO_PROC:
-                               name = get_ssr_name_for_proc(lpass_names,
-                                               ARRAY_SIZE(lpass_names),
-                                               name_index);
-                               break;
-                       case SMP2P_SENSOR_PROC:
-                               name = get_ssr_name_for_proc(sensor_names,
-                                               ARRAY_SIZE(sensor_names),
-                                               name_index);
-                               break;
-                       case SMP2P_WIRELESS_PROC:
-                               name = get_ssr_name_for_proc(wcnss_names,
-                                               ARRAY_SIZE(wcnss_names),
-                                               name_index);
-                               break;
-                       default:
-                               pr_err("%s: Invalid proc ID %d given for ssr\n",
-                                               __func__, rpid);
-                       }
-
-                       if (!name) {
-                               seq_puts(s, "\tSSR failed; check subsys name table\n");
-                               failed = true;
-                               break;
-                       }
-
-                       seq_printf(s, "Restarting '%s'\n", name);
-                       ret = subsystem_restart(name);
-                       if (ret == -ENODEV) {
-                               seq_puts(s, "\tSSR call failed\n");
-                               ++name_index;
-                               continue;
-                       }
-                       ssr_success = true;
-               }
-               if (failed)
-                       break;
-
-               msleep(10*1000);
-
-               /* verify ack signaling */
-               if (ssr_ack_enabled) {
-                       ssr_done_start ^= 1;
-                       UT_ASSERT_INT(ssr_done_start, ==,
-                                       SMP2P_GET_RESTART_ACK(lhdr->flags));
-                       UT_ASSERT_INT(ssr_done_start, ==,
-                                       SMP2P_GET_RESTART_DONE(rhdr->flags));
-                       UT_ASSERT_INT(0, ==,
-                                       SMP2P_GET_RESTART_DONE(lhdr->flags));
-                       seq_puts(s, "\tSSR ACK Enabled and Toggled\n");
-               } else {
-                       UT_ASSERT_INT(0, ==,
-                                       SMP2P_GET_RESTART_DONE(lhdr->flags));
-                       UT_ASSERT_INT(0, ==,
-                                       SMP2P_GET_RESTART_ACK(lhdr->flags));
-
-                       UT_ASSERT_INT(0, ==,
-                                       SMP2P_GET_RESTART_DONE(rhdr->flags));
-                       UT_ASSERT_INT(0, ==,
-                                       SMP2P_GET_RESTART_ACK(rhdr->flags));
-                       seq_puts(s, "\tSSR ACK Disabled\n");
-               }
-
-               seq_puts(s, "\tOK\n");
-       } while (0);
-
-       if (failed) {
-               pr_err("%s: Failed\n", __func__);
-               seq_puts(s, "\tFailed\n");
-       }
-}
-
-/**
- * smp2p_ut_remote_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- *
- * Triggers SSR for each subsystem.
- */
-static void smp2p_ut_remote_ssr_ack(struct seq_file *s)
-{
-       struct smp2p_interrupt_config *int_cfg;
-       int pid;
-
-       int_cfg = smp2p_get_interrupt_config();
-       if (!int_cfg) {
-               seq_puts(s,
-                       "Remote processor config unavailable\n");
-               return;
-       }
-
-       for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-               if (!int_cfg[pid].is_configured)
-                       continue;
-
-               msm_smp2p_deinit_rmt_lpb_proc(pid);
-               smp2p_ut_remotesubsys_ssr_ack(s, pid, &int_cfg[pid]);
-               msm_smp2p_init_rmt_lpb_proc(pid);
-       }
-}
-
-static struct dentry *dent;
-static DEFINE_MUTEX(show_lock);
-
-static int debugfs_show(struct seq_file *s, void *data)
-{
-       void (*show)(struct seq_file *) = s->private;
-
-       mutex_lock(&show_lock);
-       show(s);
-       mutex_unlock(&show_lock);
-
-       return 0;
-}
-
-static int debug_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, debugfs_show, inode->i_private);
-}
-
-static const struct file_operations debug_ops = {
-       .open = debug_open,
-       .release = single_release,
-       .read = seq_read,
-       .llseek = seq_lseek,
-};
-
-void smp2p_debug_create(const char *name,
-                        void (*show)(struct seq_file *))
-{
-       struct dentry *file;
-
-       file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
-       if (!file)
-               pr_err("%s: unable to create file '%s'\n", __func__, name);
-}
-
-void smp2p_debug_create_u32(const char *name, uint32_t *value)
-{
-       struct dentry *file;
-
-       file = debugfs_create_u32(name, S_IRUGO | S_IWUSR, dent, value);
-       if (!file)
-               pr_err("%s: unable to create file '%s'\n", __func__, name);
-}
-
-static int __init smp2p_debugfs_init(void)
-{
-       dent = debugfs_create_dir("smp2p_test", 0);
-       if (IS_ERR(dent))
-               return PTR_ERR(dent);
-
-       /*
-        * Add Unit Test entries.
-        *
-        * The idea with unit tests is that you can run all of them
-        * from ADB shell by doing:
-        *  adb shell
-        *  cat ut*
-        *
-        * And if particular tests fail, you can then repeatedly run the
-        * failing tests as you debug and resolve the failing test.
-        */
-       smp2p_debug_create("ut_local_basic",
-                       smp2p_ut_local_basic);
-       smp2p_debug_create("ut_local_late_open",
-                       smp2p_ut_local_late_open);
-       smp2p_debug_create("ut_local_early_open",
-                       smp2p_ut_local_early_open);
-       smp2p_debug_create("ut_mock_loopback",
-                       smp2p_ut_mock_loopback);
-       smp2p_debug_create("ut_remote_inout",
-                       smp2p_ut_remote_inout);
-       smp2p_debug_create("ut_local_in_max_entries",
-               smp2p_ut_local_in_max_entries);
-       smp2p_debug_create("ut_remote_out_max_entries",
-                       smp2p_ut_remote_out_max_entries);
-       smp2p_debug_create("ut_local_in_multiple",
-                       smp2p_ut_local_in_multiple);
-       smp2p_debug_create("ut_local_ssr_ack",
-                       smp2p_ut_local_ssr_ack);
-       smp2p_debug_create("ut_remote_ssr_ack",
-                       smp2p_ut_remote_ssr_ack);
-
-       return 0;
-}
-module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test_common.h b/drivers/soc/qcom/smp2p_test_common.h
deleted file mode 100644 (file)
index 3be519b..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-/* drivers/soc/qcom/smp2p_test_common.h
- *
- * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef _SMP2P_TEST_COMMON_H_
-#define _SMP2P_TEST_COMMON_H_
-
-#include <linux/debugfs.h>
-
-/**
- * Unit test assertion for logging test cases.
- *
- * @a lval
- * @b rval
- * @cmp comparison operator
- *
- * Assertion fails if (@a cmp @b) is not true which then
- * logs the function and line number where the error occurred
- * along with the values of @a and @b.
- *
- * Assumes that the following local variables exist:
- * @s - sequential output file pointer
- * @failed - set to true if test fails
- */
-#define UT_ASSERT_INT(a, cmp, b) \
-       { \
-       int a_tmp = (a); \
-       int b_tmp = (b); \
-       if (!((a_tmp)cmp(b_tmp))) { \
-               seq_printf(s, "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
-                               __func__, __LINE__, \
-                               a_tmp, b_tmp); \
-               failed = 1; \
-               break; \
-       } \
-       }
-
-#define UT_ASSERT_PTR(a, cmp, b) \
-       { \
-       void *a_tmp = (a); \
-       void *b_tmp = (b); \
-       if (!((a_tmp)cmp(b_tmp))) { \
-               seq_printf(s, "%s:%d Fail: " #a "(%pK) " #cmp \
-                               " " #b "(%pK)\n", \
-                               __func__, __LINE__, \
-                               a_tmp, b_tmp); \
-               failed = 1; \
-               break; \
-       } \
-       }
-
-#define UT_ASSERT_UINT(a, cmp, b) \
-       { \
-       unsigned a_tmp = (a); \
-       unsigned b_tmp = (b); \
-       if (!((a_tmp)cmp(b_tmp))) { \
-               seq_printf(s, "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
-                               __func__, __LINE__, \
-                               a_tmp, b_tmp); \
-               failed = 1; \
-               break; \
-       } \
-       }
-
-#define UT_ASSERT_HEX(a, cmp, b) \
-       { \
-       unsigned a_tmp = (a); \
-       unsigned b_tmp = (b); \
-       if (!((a_tmp)cmp(b_tmp))) { \
-               seq_printf(s, "%s:%d Fail: " #a "(%x) " #cmp " " #b "(%x)\n", \
-                               __func__, __LINE__, \
-                               a_tmp, b_tmp); \
-               failed = 1; \
-               break; \
-       } \
-       }
-
-/**
- * In-range unit test assertion for test cases.
- *
- * @a lval
- * @minv Minimum value
- * @maxv Maximum value
- *
- * Assertion fails if @a is not on the exclusive range minv, maxv
- * ((@a < @minv) or (@a > @maxv)).  In the failure case, the macro
- * logs the function and line number where the error occurred along
- * with the values of @a and @minv, @maxv.
- *
- * Assumes that the following local variables exist:
- * @s - sequential output file pointer
- * @failed - set to true if test fails
- */
-#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
-       { \
-       int a_tmp = (a); \
-       int minv_tmp = (minv); \
-       int maxv_tmp = (maxv); \
-       if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
-               seq_printf(s, "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
-                                #a "(%d) > " #maxv "(%d)\n", \
-                               __func__, __LINE__, \
-                               a_tmp, minv_tmp, a_tmp, maxv_tmp); \
-               failed = 1; \
-               break; \
-       } \
-       }
-
-/* Structure to track state changes for the notifier callback. */
-struct mock_cb_data {
-       bool initialized;
-       spinlock_t lock;
-       struct notifier_block nb;
-
-       /* events */
-       struct completion cb_completion;
-       int cb_count;
-       int event_open;
-       int event_entry_update;
-       struct msm_smp2p_update_notif entry_data;
-};
-
-void smp2p_debug_create(const char *name, void (*show)(struct seq_file *));
-void smp2p_debug_create_u32(const char *name, uint32_t *value);
-static inline int smp2p_test_notify(struct notifier_block *self,
-       unsigned long event, void *data);
-
-/**
- * Reset mock callback data to default values.
- *
- * @cb:  Mock callback data
- */
-static inline void mock_cb_data_reset(struct mock_cb_data *cb)
-{
-       reinit_completion(&cb->cb_completion);
-       cb->cb_count = 0;
-       cb->event_open = 0;
-       cb->event_entry_update = 0;
-       memset(&cb->entry_data, 0,
-               sizeof(struct msm_smp2p_update_notif));
-}
-
-
-/**
- * Initialize mock callback data.
- *
- * @cb:  Mock callback data
- */
-static inline void mock_cb_data_init(struct mock_cb_data *cb)
-{
-       if (!cb->initialized) {
-               init_completion(&cb->cb_completion);
-               spin_lock_init(&cb->lock);
-               cb->initialized = true;
-               cb->nb.notifier_call = smp2p_test_notify;
-               memset(&cb->entry_data, 0,
-                       sizeof(struct msm_smp2p_update_notif));
-       }
-       mock_cb_data_reset(cb);
-}
-
-/**
- * Notifier function passed into SMP2P for testing.
- *
- * @self:       Pointer to calling notifier block
- * @event:         Event
- * @data:       Event-specific data
- * @returns:    0
- */
-static inline int smp2p_test_notify(struct notifier_block *self,
-               unsigned long event, void *data)
-{
-       struct mock_cb_data *cb_data_ptr;
-       unsigned long flags;
-
-       cb_data_ptr = container_of(self, struct mock_cb_data, nb);
-
-       spin_lock_irqsave(&cb_data_ptr->lock, flags);
-
-       switch (event) {
-       case SMP2P_OPEN:
-               ++cb_data_ptr->event_open;
-               if (data) {
-                       cb_data_ptr->entry_data =
-                       *(struct msm_smp2p_update_notif *)(data);
-               }
-               break;
-       case SMP2P_ENTRY_UPDATE:
-               ++cb_data_ptr->event_entry_update;
-               if (data) {
-                       cb_data_ptr->entry_data =
-                       *(struct msm_smp2p_update_notif *)(data);
-               }
-               break;
-       default:
-               pr_err("%s Unknown event\n", __func__);
-               break;
-       }
-
-       ++cb_data_ptr->cb_count;
-       complete(&cb_data_ptr->cb_completion);
-       spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
-       return 0;
-}
-#endif /* _SMP2P_TEST_COMMON_H_ */
index f8ce53c..5c9acf7 100644 (file)
@@ -43,7 +43,7 @@ struct subsystem_descriptor {
        enum subsystem_type type;
        struct notifier_block nb;
        void *handle;
-       unsigned int ssr_irq;
+       int ssr_irq;
        struct list_head subsystem_list;
        struct work_struct work;
 };
@@ -91,7 +91,7 @@ static int subsys_notif_virt_probe(struct platform_device *pdev)
        struct device_node *child = NULL;
        const char *ss_type;
        struct resource *res;
-       struct subsystem_descriptor *subsystem;
+       struct subsystem_descriptor *subsystem = NULL;
        int ret = 0;
 
        if (!pdev) {
@@ -193,7 +193,8 @@ static int subsys_notif_virt_probe(struct platform_device *pdev)
                }
        }
 
-       INIT_WORK(&subsystem->work, subsystem_notif_wq_func);
+       if (subsystem)
+               INIT_WORK(&subsystem->work, subsystem_notif_wq_func);
        return 0;
 err:
        destroy_workqueue(ssr_wq);
index 8188433..9882d93 100644 (file)
@@ -587,11 +587,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 
        ret = wait_event_interruptible_timeout(rspi->wait,
                                               rspi->dma_callbacked, HZ);
-       if (ret > 0 && rspi->dma_callbacked)
+       if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
-       else if (!ret) {
-               dev_err(&rspi->master->dev, "DMA timeout\n");
-               ret = -ETIMEDOUT;
+       } else {
+               if (!ret) {
+                       dev_err(&rspi->master->dev, "DMA timeout\n");
+                       ret = -ETIMEDOUT;
+               }
                if (tx)
                        dmaengine_terminate_all(rspi->master->dma_tx);
                if (rx)
@@ -1303,12 +1305,36 @@ static const struct platform_device_id spi_driver_ids[] = {
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS     &rspi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
        .probe =        rspi_probe,
        .remove =       rspi_remove,
        .id_table =     spi_driver_ids,
        .driver         = {
                .name = "renesas_spi",
+               .pm = DEV_PM_OPS,
                .of_match_table = of_match_ptr(rspi_of_match),
        },
 };
index 3de39bd..03b5668 100644 (file)
@@ -374,7 +374,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+       sh_msiof_write(p, STR,
+                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1275,12 +1276,37 @@ static const struct platform_device_id spi_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+                        sh_msiof_spi_resume);
+#define DEV_PM_OPS     &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
        .probe          = sh_msiof_spi_probe,
        .remove         = sh_msiof_spi_remove,
        .id_table       = spi_driver_ids,
        .driver         = {
                .name           = "spi_sh_msiof",
+               .pm             = DEV_PM_OPS,
                .of_match_table = of_match_ptr(sh_msiof_match),
        },
 };
index 85c91f5..af2880d 100644 (file)
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
                goto exit_free_master;
        }
 
+       /* disabled clock may cause interrupt storm upon request */
+       tspi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(tspi->clk)) {
+               ret = PTR_ERR(tspi->clk);
+               dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_prepare(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_enable(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+               goto exit_free_master;
+       }
+
        spi_irq = platform_get_irq(pdev, 0);
        tspi->irq = spi_irq;
        ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
                                        tspi->irq);
-               goto exit_free_master;
-       }
-
-       tspi->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tspi->clk)) {
-               dev_err(&pdev->dev, "can not get clock\n");
-               ret = PTR_ERR(tspi->clk);
-               goto exit_free_irq;
+               goto exit_clk_disable;
        }
 
        tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
        tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
        free_irq(spi_irq, tspi);
+exit_clk_disable:
+       clk_disable(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
 
        free_irq(tspi->irq, tspi);
 
+       clk_disable(tspi->clk);
+
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
 
index cba6b4e..208e07f 100644 (file)
@@ -370,6 +370,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
                goto out;
        }
 
+       /* requested mapping size larger than object size */
+       if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        /* requested protection bits must match our allowed protection mask */
        if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
                     calc_vm_prot_bits(PROT_MASK))) {
index 65e3425..05fc6a7 100644 (file)
@@ -3,7 +3,7 @@
  * drivers/staging/android/ion/ion.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -495,8 +495,8 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
        return ERR_PTR(-EINVAL);
 }
 
-static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
-                                               int id)
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+                                              int id)
 {
        struct ion_handle *handle;
 
@@ -507,20 +507,7 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
        return ERR_PTR(-EINVAL);
 }
 
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-                                               int id)
-{
-       struct ion_handle *handle;
-
-       mutex_lock(&client->lock);
-       handle = ion_handle_get_by_id_nolock(client, id);
-       mutex_unlock(&client->lock);
-
-       return handle;
-}
-
-static bool ion_handle_validate(struct ion_client *client,
-                               struct ion_handle *handle)
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
 {
        WARN_ON(!mutex_is_locked(&client->lock));
        return idr_find(&client->idr, handle->id) == handle;
@@ -674,7 +661,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
 }
 EXPORT_SYMBOL(ion_alloc);
 
-static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
 {
        bool valid_handle;
 
@@ -717,15 +704,17 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
 }
 EXPORT_SYMBOL(ion_free);
 
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
-            ion_phys_addr_t *addr, size_t *len)
+static int __ion_phys(struct ion_client *client, struct ion_handle *handle,
+                     ion_phys_addr_t *addr, size_t *len, bool lock_client)
 {
        struct ion_buffer *buffer;
        int ret;
 
-       mutex_lock(&client->lock);
+       if (lock_client)
+               mutex_lock(&client->lock);
        if (!ion_handle_validate(client, handle)) {
-               mutex_unlock(&client->lock);
+               if (lock_client)
+                       mutex_unlock(&client->lock);
                return -EINVAL;
        }
 
@@ -734,15 +723,29 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
        if (!buffer->heap->ops->phys) {
                pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
                        __func__, buffer->heap->name, buffer->heap->type);
-               mutex_unlock(&client->lock);
+               if (lock_client)
+                       mutex_unlock(&client->lock);
                return -ENODEV;
        }
-       mutex_unlock(&client->lock);
+       if (lock_client)
+               mutex_unlock(&client->lock);
        ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
        return ret;
 }
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+            ion_phys_addr_t *addr, size_t *len)
+{
+       return __ion_phys(client, handle, addr, len, true);
+}
 EXPORT_SYMBOL(ion_phys);
 
+int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
+                   ion_phys_addr_t *addr, size_t *len)
+{
+       return __ion_phys(client, handle, addr, len, false);
+}
+
 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 {
        void *vaddr;
@@ -1511,7 +1514,8 @@ static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
        return __ion_share_dma_buf_fd(client, handle, false);
 }
 
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+static struct ion_handle *__ion_import_dma_buf(struct ion_client *client,
+                                              int fd, bool lock_client)
 {
        struct dma_buf *dmabuf;
        struct ion_buffer *buffer;
@@ -1531,25 +1535,32 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
        }
        buffer = dmabuf->priv;
 
-       mutex_lock(&client->lock);
+       if (lock_client)
+               mutex_lock(&client->lock);
        /* if a handle exists for this buffer just take a reference to it */
        handle = ion_handle_lookup(client, buffer);
        if (!IS_ERR(handle)) {
                handle = ion_handle_get_check_overflow(handle);
-               mutex_unlock(&client->lock);
+               if (lock_client)
+                       mutex_unlock(&client->lock);
                goto end;
        }
 
        handle = ion_handle_create(client, buffer);
        if (IS_ERR(handle)) {
-               mutex_unlock(&client->lock);
+               if (lock_client)
+                       mutex_unlock(&client->lock);
                goto end;
        }
 
        ret = ion_handle_add(client, handle);
-       mutex_unlock(&client->lock);
+       if (lock_client)
+               mutex_unlock(&client->lock);
        if (ret) {
-               ion_handle_put(handle);
+               if (lock_client)
+                       ion_handle_put(handle);
+               else
+                       ion_handle_put_nolock(handle);
                handle = ERR_PTR(ret);
        }
 
@@ -1557,8 +1568,18 @@ end:
        dma_buf_put(dmabuf);
        return handle;
 }
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+       return __ion_import_dma_buf(client, fd, true);
+}
 EXPORT_SYMBOL(ion_import_dma_buf);
 
+struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd)
+{
+       return __ion_import_dma_buf(client, fd, false);
+}
+
 static int ion_sync_for_device(struct ion_client *client, int fd)
 {
        struct dma_buf *dmabuf;
@@ -2201,3 +2222,18 @@ void __init ion_reserve(struct ion_platform_data *data)
                        data->heaps[i].size);
        }
 }
+
+void lock_client(struct ion_client *client)
+{
+       mutex_lock(&client->lock);
+}
+
+void unlock_client(struct ion_client *client)
+{
+       mutex_unlock(&client->lock);
+}
+
+struct ion_buffer *get_buffer(struct ion_handle *handle)
+{
+       return handle->buffer;
+}
index f2f2ca1..77bd0f8 100644 (file)
@@ -2,7 +2,7 @@
  * drivers/staging/android/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -505,9 +505,34 @@ int ion_walk_heaps(struct ion_client *client, int heap_id,
                        enum ion_heap_type type, void *data,
                        int (*f)(struct ion_heap *heap, void *data));
 
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-                                       int id);
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+                                              int id);
 
 int ion_handle_put(struct ion_handle *handle);
 
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle);
+
+void lock_client(struct ion_client *client);
+
+void unlock_client(struct ion_client *client);
+
+struct ion_buffer *get_buffer(struct ion_handle *handle);
+
+/**
+ * This function is same as ion_free() except it won't use client->lock.
+ */
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * This function is same as ion_phys() except it won't use client->lock.
+ */
+int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
+                   ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * This function is same as ion_import_dma_buf() except it won't use
+ * client->lock.
+ */
+struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd);
+
 #endif /* _ION_PRIV_H */
index 403dade..0d851ff 100644 (file)
@@ -478,7 +478,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 
 err_free_sg2:
        /* We failed to zero buffers. Bypass pool */
-       buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+       buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
 
        if (vmid > 0)
                ion_system_secure_heap_unassign_sg(table, vmid);
index 116a6fe..d3069fe 100644 (file)
@@ -153,7 +153,13 @@ EXPORT_SYMBOL(msm_ion_client_create);
 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
                        void *vaddr, unsigned long len, unsigned int cmd)
 {
-       return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+       int ret;
+
+       lock_client(client);
+       ret = ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+       unlock_client(client);
+
+       return ret;
 }
 EXPORT_SYMBOL(msm_ion_do_cache_op);
 
@@ -162,7 +168,13 @@ int msm_ion_do_cache_offset_op(
                void *vaddr, unsigned int offset, unsigned long len,
                unsigned int cmd)
 {
-       return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+       int ret;
+
+       lock_client(client);
+       ret = ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+       unlock_client(client);
+
+       return ret;
 }
 EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
 
@@ -179,7 +191,7 @@ static int ion_no_pages_cache_ops(struct ion_client *client,
        ion_phys_addr_t buff_phys_start = 0;
        size_t buf_length = 0;
 
-       ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+       ret = ion_phys_nolock(client, handle, &buff_phys_start, &buf_length);
        if (ret)
                return -EINVAL;
 
@@ -293,9 +305,10 @@ static int ion_pages_cache_ops(struct ion_client *client,
        int i;
        unsigned int len = 0;
        void (*op)(const void *, const void *);
+       struct ion_buffer *buffer;
 
-
-       table = ion_sg_table(client, handle);
+       buffer = get_buffer(handle);
+       table = buffer->sg_table;
        if (IS_ERR_OR_NULL(table))
                return PTR_ERR(table);
 
@@ -344,10 +357,18 @@ int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
        unsigned long flags;
        struct sg_table *table;
        struct page *page;
+       struct ion_buffer *buffer;
 
-       ret = ion_handle_get_flags(client, handle, &flags);
-       if (ret)
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to %s.\n",
+                      __func__, __func__);
                return -EINVAL;
+       }
+
+       buffer = get_buffer(handle);
+       mutex_lock(&buffer->lock);
+       flags = buffer->flags;
+       mutex_unlock(&buffer->lock);
 
        if (!ION_IS_CACHED(flags))
                return 0;
@@ -355,7 +376,7 @@ int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
        if (get_secure_vmid(flags) > 0)
                return 0;
 
-       table = ion_sg_table(client, handle);
+       table = buffer->sg_table;
 
        if (IS_ERR_OR_NULL(table))
                return PTR_ERR(table);
@@ -737,19 +758,23 @@ long msm_ion_custom_ioctl(struct ion_client *client,
                int ret;
                struct mm_struct *mm = current->active_mm;
 
+               lock_client(client);
                if (data.flush_data.handle > 0) {
-                       handle = ion_handle_get_by_id(client,
-                                               (int)data.flush_data.handle);
+                       handle = ion_handle_get_by_id_nolock(
+                                       client, (int)data.flush_data.handle);
                        if (IS_ERR(handle)) {
                                pr_info("%s: Could not find handle: %d\n",
                                        __func__, (int)data.flush_data.handle);
+                               unlock_client(client);
                                return PTR_ERR(handle);
                        }
                } else {
-                       handle = ion_import_dma_buf(client, data.flush_data.fd);
+                       handle = ion_import_dma_buf_nolock(client,
+                                                          data.flush_data.fd);
                        if (IS_ERR(handle)) {
                                pr_info("%s: Could not import handle: %pK\n",
                                        __func__, handle);
+                               unlock_client(client);
                                return -EINVAL;
                        }
                }
@@ -772,8 +797,9 @@ long msm_ion_custom_ioctl(struct ion_client *client,
                }
                up_read(&mm->mmap_sem);
 
-               ion_free(client, handle);
+               ion_free_nolock(client, handle);
 
+               unlock_client(client);
                if (ret < 0)
                        return ret;
                break;
index d6c4982..9a876ce 100644 (file)
@@ -5031,7 +5031,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
                        goto SD_Execute_Write_Cmd_Failed;
                }
 
-               rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+               retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
                if (retval != STATUS_SUCCESS) {
                        rtsx_trace(chip);
                        goto SD_Execute_Write_Cmd_Failed;
index 47e249d..3184e02 100644 (file)
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
-       int j = DIV_ROUND_UP(len, 2), rc;
-
-       rc = hex2bin(dst, src, j);
-       if (rc < 0)
-               pr_debug("CHAP string contains non hex digit symbols\n");
-
-       dst[j] = '\0';
-       return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
-       int i;
-
-       for (i = 0; i < src_len; i++) {
-               sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
-       }
-}
-
 static void chap_gen_challenge(
        struct iscsi_conn *conn,
        int caller,
@@ -59,7 +38,7 @@ static void chap_gen_challenge(
        memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
 
        get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
-       chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+       bin2hex(challenge_asciihex, chap->challenge,
                                CHAP_CHALLENGE_LENGTH);
        /*
         * Set CHAP_C, and copy the generated challenge into c_str.
@@ -241,9 +220,16 @@ static int chap_server_compute_md5(
                pr_err("Could not find CHAP_R.\n");
                goto out;
        }
+       if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+               pr_err("Malformed CHAP_R\n");
+               goto out;
+       }
+       if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+               pr_err("Malformed CHAP_R\n");
+               goto out;
+       }
 
        pr_debug("[server] Got CHAP_R=%s\n", chap_r);
-       chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
        tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
@@ -292,7 +278,7 @@ static int chap_server_compute_md5(
        }
        crypto_free_hash(tfm);
 
-       chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+       bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
        pr_debug("[server] MD5 Server Digest: %s\n", response);
 
        if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -348,9 +334,7 @@ static int chap_server_compute_md5(
                pr_err("Could not find CHAP_C.\n");
                goto out;
        }
-       pr_debug("[server] Got CHAP_C=%s\n", challenge);
-       challenge_len = chap_string_to_hex(challenge_binhex, challenge,
-                               strlen(challenge));
+       challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
        if (!challenge_len) {
                pr_err("Unable to convert incoming challenge\n");
                goto out;
@@ -359,6 +343,11 @@ static int chap_server_compute_md5(
                pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
                goto out;
        }
+       if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+               pr_err("Malformed CHAP_C\n");
+               goto out;
+       }
+       pr_debug("[server] Got CHAP_C=%s\n", challenge);
        /*
         * During mutual authentication, the CHAP_C generated by the
         * initiator must not match the original CHAP_C generated by
@@ -433,7 +422,7 @@ static int chap_server_compute_md5(
        /*
         * Convert response from binary hex to ascii hext.
         */
-       chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+       bin2hex(response, digest, MD5_SIGNATURE_SIZE);
        *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
                        response);
        *nr_out_len += 1;
index 63e1dcc..761b065 100644 (file)
@@ -637,8 +637,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
                none = strstr(buf1, NONE);
                if (none)
                        goto out;
-               strncat(buf1, ",", strlen(","));
-               strncat(buf1, NONE, strlen(NONE));
+               strlcat(buf1, "," NONE, sizeof(buf1));
                if (iscsi_update_param_value(param, buf1) < 0)
                        return -EINVAL;
        }
index be4eedc..236c4eb 100644 (file)
@@ -284,10 +284,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
 
        mutex_lock(&tz->lock);
 
-       if (mode == THERMAL_DEVICE_ENABLED)
+       if (mode == THERMAL_DEVICE_ENABLED) {
                tz->polling_delay = data->polling_delay;
-       else
+               tz->passive_delay = data->passive_delay;
+       } else {
                tz->polling_delay = 0;
+               tz->passive_delay = 0;
+       }
 
        mutex_unlock(&tz->lock);
 
index 4d180c9..1a14948 100644 (file)
@@ -629,8 +629,10 @@ static int serial_config(struct pcmcia_device * link)
            (link->has_func_id) &&
            (link->socket->pcmcia_pfc == 0) &&
            ((link->func_id == CISTPL_FUNCID_MULTI) ||
-            (link->func_id == CISTPL_FUNCID_SERIAL)))
-               pcmcia_loop_config(link, serial_check_for_multi, info);
+            (link->func_id == CISTPL_FUNCID_SERIAL))) {
+               if (pcmcia_loop_config(link, serial_check_for_multi, info))
+                       goto failed;
+       }
 
        /*
         * Apply any multi-port quirk.
index d3e3d42..0040c29 100644 (file)
@@ -1068,8 +1068,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
        /* Get the address of the host memory buffer.
         */
        bdp = pinfo->rx_cur;
-       while (bdp->cbd_sc & BD_SC_EMPTY)
-               ;
+       if (bdp->cbd_sc & BD_SC_EMPTY)
+               return NO_POLL_CHAR;
 
        /* If the buffer address is in the CPM DPRAM, don't
         * convert it.
@@ -1104,7 +1104,11 @@ static int cpm_get_poll_char(struct uart_port *port)
                poll_chars = 0;
        }
        if (poll_chars <= 0) {
-               poll_chars = poll_wait_key(poll_buf, pinfo);
+               int ret = poll_wait_key(poll_buf, pinfo);
+
+               if (ret == NO_POLL_CHAR)
+                       return ret;
+               poll_chars = ret;
                pollp = poll_buf;
        }
        poll_chars--;
index 07ede98..f5f46c1 100644 (file)
@@ -1997,6 +1997,14 @@ static int serial_imx_probe(struct platform_device *pdev)
                                       dev_name(&pdev->dev), sport);
                if (ret)
                        return ret;
+
+               ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
+                                      dev_name(&pdev->dev), sport);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+                               ret);
+                       return ret;
+               }
        } else {
                ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
                                       dev_name(&pdev->dev), sport);
index 97d5a74..a86bc7a 100644 (file)
@@ -31,6 +31,8 @@
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kbd_kern.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_diacr.h>
@@ -703,6 +705,8 @@ int vt_ioctl(struct tty_struct *tty,
                if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
                        ret = -ENXIO;
                else {
+                       vsa.console = array_index_nospec(vsa.console,
+                                                        MAX_NR_CONSOLES + 1);
                        vsa.console--;
                        console_lock();
                        ret = vc_allocate(vsa.console);
index 4380e4f..61ea879 100644 (file)
@@ -453,7 +453,7 @@ static int clear_wdm_read_flag(struct wdm_device *desc)
 
        set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
-       rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+       rv = usb_submit_urb(desc->response, GFP_KERNEL);
        spin_lock_irq(&desc->iuspin);
        if (rv) {
                dev_err(&desc->intf->dev,
index ad2e6d2..5e0af15 100644 (file)
@@ -1289,10 +1289,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        struct async *as = NULL;
        struct usb_ctrlrequest *dr = NULL;
        unsigned int u, totlen, isofrmlen;
-       int i, ret, is_in, num_sgs = 0, ifnum = -1;
+       int i, ret, num_sgs = 0, ifnum = -1;
        int number_of_packets = 0;
        unsigned int stream_id = 0;
        void *buf;
+       bool is_in;
+       bool allow_short = false;
+       bool allow_zero = false;
        unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
                                USBDEVFS_URB_BULK_CONTINUATION |
                                USBDEVFS_URB_NO_FSBR |
@@ -1326,6 +1329,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = 0;
        switch (uurb->type) {
        case USBDEVFS_URB_TYPE_CONTROL:
+               if (is_in)
+                       allow_short = true;
                if (!usb_endpoint_xfer_control(&ep->desc))
                        return -EINVAL;
                /* min 8 byte setup packet */
@@ -1366,6 +1371,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                break;
 
        case USBDEVFS_URB_TYPE_BULK:
+               if (!is_in)
+                       allow_zero = true;
+               else
+                       allow_short = true;
                switch (usb_endpoint_type(&ep->desc)) {
                case USB_ENDPOINT_XFER_CONTROL:
                case USB_ENDPOINT_XFER_ISOC:
@@ -1386,6 +1395,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                if (!usb_endpoint_xfer_int(&ep->desc))
                        return -EINVAL;
  interrupt_urb:
+               if (!is_in)
+                       allow_zero = true;
+               else
+                       allow_short = true;
                break;
 
        case USBDEVFS_URB_TYPE_ISO:
@@ -1512,16 +1525,21 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
        if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
                u |= URB_ISO_ASAP;
-       if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+       if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
                u |= URB_SHORT_NOT_OK;
        if (uurb->flags & USBDEVFS_URB_NO_FSBR)
                u |= URB_NO_FSBR;
-       if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+       if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
                u |= URB_ZERO_PACKET;
        if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
                u |= URB_NO_INTERRUPT;
        as->urb->transfer_flags = u;
 
+       if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+               dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+       if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+               dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
        as->urb->transfer_buffer_length = uurb->buffer_length;
        as->urb->setup_packet = (unsigned char *)dr;
        dr = NULL;
index 5532246..7dae981 100644 (file)
@@ -509,7 +509,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        struct device *dev;
        struct usb_device *udev;
        int retval = 0;
-       int lpm_disable_error = -ENODEV;
 
        if (!iface)
                return -ENODEV;
@@ -530,16 +529,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
 
        iface->condition = USB_INTERFACE_BOUND;
 
-       /* See the comment about disabling LPM in usb_probe_interface(). */
-       if (driver->disable_hub_initiated_lpm) {
-               lpm_disable_error = usb_unlocked_disable_lpm(udev);
-               if (lpm_disable_error) {
-                       dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
-                                       __func__, driver->name);
-                       return -ENOMEM;
-               }
-       }
-
        /* Claimed interfaces are initially inactive (suspended) and
         * runtime-PM-enabled, but only if the driver has autosuspend
         * support.  Otherwise they are marked active, to prevent the
@@ -558,9 +547,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        if (device_is_registered(dev))
                retval = device_bind_driver(dev);
 
-       /* Attempt to re-enable USB3 LPM, if the disable was successful. */
-       if (!lpm_disable_error)
-               usb_unlocked_enable_lpm(udev);
+       if (retval) {
+               dev->driver = NULL;
+               usb_set_intfdata(iface, NULL);
+               iface->needs_remote_wakeup = 0;
+               iface->condition = USB_INTERFACE_UNBOUND;
+
+               /*
+                * Unbound interfaces are always runtime-PM-disabled
+                * and runtime-PM-suspended
+                */
+               if (driver->supports_autosuspend)
+                       pm_runtime_disable(dev);
+               pm_runtime_set_suspended(dev);
+       }
 
        return retval;
 }
index 062677f..74087b2 100644 (file)
@@ -95,6 +95,8 @@ struct usb_host_interface *usb_find_alt_setting(
        struct usb_interface_cache *intf_cache = NULL;
        int i;
 
+       if (!config)
+               return NULL;
        for (i = 0; i < config->desc.bNumInterfaces; i++) {
                if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
                                == iface_num) {
index 1e23738..5f54779 100644 (file)
@@ -4059,10 +4059,13 @@ static int dwc3_msm_pm_suspend(struct device *dev)
        }
 
        ret = dwc3_msm_suspend(mdwc, false);
-       if (!ret)
-               atomic_set(&mdwc->pm_suspended, 1);
+       if (ret)
+               return ret;
 
-       return ret;
+       flush_work(&mdwc->bus_vote_w);
+       atomic_set(&mdwc->pm_suspended, 1);
+
+       return 0;
 }
 
 static int dwc3_msm_pm_freeze(struct device *dev)
@@ -4091,10 +4094,13 @@ static int dwc3_msm_pm_freeze(struct device *dev)
        mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
 
        ret = dwc3_msm_suspend(mdwc, true);
-       if (!ret)
-               atomic_set(&mdwc->pm_suspended, 1);
+       if (ret)
+               return ret;
 
-       return ret;
+       flush_work(&mdwc->bus_vote_w);
+       atomic_set(&mdwc->pm_suspended, 1);
+
+       return 0;
 }
 
 static int dwc3_msm_pm_resume(struct device *dev)
index d8cc5fd..2d8d5e2 100644 (file)
@@ -823,6 +823,11 @@ static void send_file_work(struct work_struct *data)
        offset = dev->xfer_file_offset;
        count = dev->xfer_file_length;
 
+       if (count < 0) {
+               dev->xfer_result = -EINVAL;
+               return;
+       }
+
        DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
 
        if (dev->xfer_send_header) {
@@ -939,6 +944,11 @@ static void receive_file_work(struct work_struct *data)
        offset = dev->xfer_file_offset;
        count = dev->xfer_file_length;
 
+       if (count < 0) {
+               dev->xfer_result = -EINVAL;
+               return;
+       }
+
        DBG(cdev, "receive_file_work(%lld)\n", count);
        if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
                DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
index 251ebdc..6882c96 100644 (file)
@@ -583,7 +583,7 @@ static void gs_rx_push(struct work_struct *w)
                }
 
                /* push data to (open) tty */
-               if (req->actual) {
+               if (req->actual && tty) {
                        char            *packet = req->buf;
                        unsigned        size = req->actual;
                        unsigned        n;
index 6ba122c..95df2b3 100644 (file)
@@ -1066,12 +1066,15 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
 static int fotg210_udc_remove(struct platform_device *pdev)
 {
        struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+       int i;
 
        usb_del_gadget_udc(&fotg210->gadget);
        iounmap(fotg210->reg);
        free_irq(platform_get_irq(pdev, 0), fotg210);
 
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
        return 0;
@@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        /* initialize udc */
        fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
        if (fotg210 == NULL)
-               goto err_alloc;
+               goto err;
 
        for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
                _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->reg = ioremap(res->start, resource_size(res));
        if (fotg210->reg == NULL) {
                pr_err("ioremap error.\n");
-               goto err_map;
+               goto err_alloc;
        }
 
        spin_lock_init(&fotg210->lock);
@@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
                                GFP_KERNEL);
        if (fotg210->ep0_req == NULL)
-               goto err_req;
+               goto err_map;
 
        fotg210_init(fotg210);
 
@@ -1190,12 +1193,14 @@ err_req:
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
 
 err_map:
-       if (fotg210->reg)
-               iounmap(fotg210->reg);
+       iounmap(fotg210->reg);
 
 err_alloc:
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
+err:
        return ret;
 }
 
index 250a444..744157e 100644 (file)
@@ -1197,17 +1197,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                temp = readl(port_array[wIndex]);
                                break;
                        }
-
-                       /* Software should not attempt to set
-                        * port link state above '3' (U3) and the port
-                        * must be enabled.
-                        */
-                       if ((temp & PORT_PE) == 0 ||
-                               (link_state > USB_SS_PORT_LS_U3)) {
-                               xhci_warn(xhci, "Cannot set link state.\n");
+                       /* Port must be enabled */
+                       if (!(temp & PORT_PE)) {
+                               retval = -ENODEV;
+                               break;
+                       }
+                       /* Can't set port link state above '3' (U3) */
+                       if (link_state > USB_SS_PORT_LS_U3) {
+                               xhci_warn(xhci, "Cannot set port %d link state %d\n",
+                                        wIndex, link_state);
                                goto error;
                        }
-
                        if (link_state == USB_SS_PORT_LS_U3) {
                                slot_id = xhci_find_slot_id_by_port(hcd, xhci,
                                                wIndex + 1);
index cbf3be6..d6e2199 100644 (file)
@@ -174,6 +174,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
                xhci->quirks |= XHCI_MISSING_CAS;
index 4670e2f..b8f3e41 100644 (file)
@@ -279,13 +279,26 @@ config USB_QTI_KS_BRIDGE
          To compile this driver as a module, choose M here: the module
          will be called ks_bridge. If unsure, choose N.
 
+config USB_QCOM_IPC_BRIDGE
+       tristate "USB QTI IPC bridge driver"
+       depends on USB
+       depends on USB_QCOM_DIAG_BRIDGE
+       help
+         Say Y here if you have a QTI modem device connected via USB that
+         will be bridged in kernel space. This driver works as a transport
+         layer for IPC router module that enables communication between
+         APPS processor and MODEM processor. This config depends on
+         USB_QCOM_DIAG_BRIDGE because the core USB support for the transports
+         of both diag and IPC messages is in the same driver. Select this
+         config manually if you want to compile HSIC transport IPC router.
+
 config USB_QCOM_DIAG_BRIDGE
        tristate "USB QTI diagnostic bridge driver"
        depends on USB
        help
          Say Y here if you have a QTI modem device connected via USB that
          will be bridged in kernel space. This driver communicates with the
-         diagnostic and QMI interfaces and allows for bridging with the diag
-         forwarding driver for diag interface and IPC router for QMI interface.
+         diagnostic interface and allows for bridging with the diag forwarding
+         driver.
          To compile this driver as a module, choose M here: the
          module will be called diag_bridge.  If unsure, choose N.
index e8e8702..5594a4a 100644 (file)
@@ -431,6 +431,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        spin_unlock_irqrestore(&dev->lock, flags);
        mutex_unlock(&dev->io_mutex);
 
+       if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+               return -EIO;
+
        return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
index 813035f..7d25267 100644 (file)
@@ -408,12 +408,20 @@ static int kobil_tiocmget(struct tty_struct *tty)
                          transfer_buffer_length,
                          KOBIL_TIMEOUT);
 
-       dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
-               __func__, result, transfer_buffer[0]);
+       dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+                       result);
+       if (result < 1) {
+               if (result >= 0)
+                       result = -EIO;
+               goto out_free;
+       }
+
+       dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
 
        result = 0;
        if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
                result = TIOCM_DSR;
+out_free:
        kfree(transfer_buffer);
        return result;
 }
index 2674da4..6d6acf2 100644 (file)
@@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
 
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()                   \
-       { USB_DEVICE(0x0cad, 0x9011) }  /* Motorola Solutions TETRA PEI */
+       { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+       { USB_DEVICE(0x0cad, 0x9012) }  /* MTP6550 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
index b66faaf..4019c11 100644 (file)
@@ -230,7 +230,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
 
        result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
                                    0, secd, sizeof(*secd));
-       if (result < sizeof(*secd)) {
+       if (result < (int)sizeof(*secd)) {
                dev_err(dev, "Can't read security descriptor or "
                        "not enough data: %d\n", result);
                goto out;
index 1212b4b..e9ff710 100644 (file)
@@ -875,6 +875,7 @@ error_get_version:
 error_rc_add:
        usb_put_intf(iface);
        usb_put_dev(hwarc->usb_dev);
+       kfree(hwarc);
 error_alloc:
        uwb_rc_put(uwb_rc);
 error_rc_alloc:
index 63c4842..46e0e8b 100644 (file)
@@ -332,6 +332,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
+extern const u8 aty_postdividers[8];
+
 
     /*
      *  Hardware cursor support
@@ -358,7 +360,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
 
 extern void aty_reset_engine(const struct atyfb_par *par);
 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8   aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
index 7f658fa..9755a0e 100644 (file)
@@ -3093,17 +3093,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
                /*
                 * PLL Reference Divider M:
                 */
-               M = pll_regs[2];
+               M = pll_regs[PLL_REF_DIV];
 
                /*
                 * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
                 */
-               N = pll_regs[7 + (clock_cntl & 3)];
+               N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
 
                /*
                 * PLL Post Divider P (Dependent on CLOCK_CNTL):
                 */
-               P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+               P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+                                    ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
 
                /*
                 * PLL Divider Q:
index 51f29d6..af54256 100644 (file)
@@ -114,7 +114,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
  */
 
 #define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
 
 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
 {
@@ -221,7 +221,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
                pll->vclk_post_div += (q <  64*8);
                pll->vclk_post_div += (q <  32*8);
        }
-       pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+       pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
        //    pll->vclk_post_div <<= 6;
        pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
        pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -512,7 +512,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                u8 mclk_fb_div, pll_ext_cntl;
                pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
                pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
-               pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+               pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
                mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
                if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
                        mclk_fb_div <<= 1;
@@ -534,7 +534,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                xpost_div += (q <  64*8);
                xpost_div += (q <  32*8);
        }
-       pll->ct.xclk_post_div_real = postdividers[xpost_div];
+       pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
        pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
 
 #ifdef CONFIG_PPC
@@ -583,7 +583,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                        mpost_div += (q <  64*8);
                        mpost_div += (q <  32*8);
                }
-               sclk_post_div_real = postdividers[mpost_div];
+               sclk_post_div_real = aty_postdividers[mpost_div];
                pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
                pll->ct.spll_cntl2 = mpost_div << 4;
 #ifdef DEBUG
index 0d41f41..69b9149 100644 (file)
@@ -2043,10 +2043,9 @@ static void __mdss_dsi_calc_dfps_delay(struct mdss_panel_data *pdata)
 }
 
 static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
-               int new_fps)
+               u64 new_clk_rate)
 {
        int rc = 0;
-       u64 clk_rate;
        struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
        struct mdss_panel_info *pinfo;
        u32 phy_rev;
@@ -2066,14 +2065,9 @@ static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
        pinfo = &pdata->panel_info;
        phy_rev = ctrl_pdata->shared_data->phy_rev;
 
-       rc = mdss_dsi_clk_div_config
-               (&ctrl_pdata->panel_data.panel_info, new_fps);
-       if (rc) {
-               pr_err("%s: unable to initialize the clk dividers\n",
-                               __func__);
-               return rc;
-       }
-
+       pinfo->clk_rate = new_clk_rate;
+       pinfo->mipi.dsi_pclk_rate = mdss_dsi_get_pclk_rate(pinfo,
+               new_clk_rate);
        __mdss_dsi_dyn_refresh_config(ctrl_pdata);
 
        if (phy_rev == DSI_PHY_REV_20)
@@ -2086,9 +2080,8 @@ static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
        ctrl_pdata->byte_clk_rate_bkp = ctrl_pdata->byte_clk_rate;
 
        ctrl_pdata->pclk_rate = pinfo->mipi.dsi_pclk_rate;
-       clk_rate = pinfo->clk_rate;
-       do_div(clk_rate, 8U);
-       ctrl_pdata->byte_clk_rate = (u32) clk_rate;
+       do_div(new_clk_rate, 8U);
+       ctrl_pdata->byte_clk_rate = (u32) new_clk_rate;
 
        pr_debug("byte_rate=%i\n", ctrl_pdata->byte_clk_rate);
        pr_debug("pclk_rate=%i\n", ctrl_pdata->pclk_rate);
@@ -2096,8 +2089,7 @@ static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
        return rc;
 }
 
-static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata,
-               int new_fps)
+static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata)
 {
        struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
        struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
@@ -2248,12 +2240,6 @@ static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata,
        clk_disable_unprepare(ctrl_pdata->pll_byte_clk);
        clk_disable_unprepare(ctrl_pdata->pll_pixel_clk);
 
-       /* update new fps that at this point is already updated in hw */
-       pinfo->current_fps = new_fps;
-       if (sctrl_pdata) {
-               spinfo->current_fps = new_fps;
-       }
-
        return rc;
 
 dfps_timeout:
@@ -2330,13 +2316,65 @@ static void mdss_dsi_avr_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
        MDSS_XLOG(ctrl_pdata->ndx, enabled, data);
 }
 
-static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
+static int __mdss_dsi_dynamic_clock_switch(struct mdss_panel_data *pdata,
+       u64 new_clk_rate)
 {
        int rc = 0;
        struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
        struct mdss_panel_info *pinfo;
        u32 phy_rev;
-       u32 frame_rate_bkp;
+       u64 clk_rate_bkp;
+
+       pr_debug("%s+:\n", __func__);
+
+       ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+                       panel_data);
+
+       phy_rev = ctrl_pdata->shared_data->phy_rev;
+       pinfo = &pdata->panel_info;
+
+       /* get the fps configured in HW */
+       clk_rate_bkp = pinfo->clk_rate;
+
+       __mdss_dsi_mask_dfps_errors(ctrl_pdata, true);
+
+       if (phy_rev == DSI_PHY_REV_20) {
+               rc = mdss_dsi_phy_calc_timing_param(pinfo, phy_rev,
+                               new_clk_rate);
+               if (rc) {
+                       pr_err("PHY calculations failed-%lld\n", new_clk_rate);
+                       goto end_update;
+               }
+       }
+
+       rc = __mdss_dsi_dfps_calc_clks(pdata, new_clk_rate);
+       if (rc) {
+               pr_err("error calculating clocks for %lld\n", new_clk_rate);
+               goto error_clks;
+       }
+
+       rc = __mdss_dsi_dfps_update_clks(pdata);
+       if (rc) {
+               pr_err("Dynamic refresh failed-%lld\n", new_clk_rate);
+               goto error_dfps;
+       }
+       return rc;
+error_dfps:
+       if (__mdss_dsi_dfps_calc_clks(pdata, clk_rate_bkp))
+               pr_err("error reverting clock calculations for %lld\n",
+                               clk_rate_bkp);
+error_clks:
+       if (mdss_dsi_phy_calc_timing_param(pinfo, phy_rev, clk_rate_bkp))
+               pr_err("Unable to revert phy timing-%lld\n", clk_rate_bkp);
+end_update:
+       return rc;
+}
+
+static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
+{
+       int rc = 0;
+       struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+       struct mdss_panel_info *pinfo;
 
        pr_debug("%s+:\n", __func__);
 
@@ -2353,12 +2391,8 @@ static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
                return -EINVAL;
        }
 
-       phy_rev = ctrl_pdata->shared_data->phy_rev;
        pinfo = &pdata->panel_info;
 
-       /* get the fps configured in HW */
-       frame_rate_bkp = pinfo->current_fps;
-
        if (new_fps == pinfo->current_fps) {
                /*
                 * This is unlikely as mdss driver checks for previously
@@ -2374,39 +2408,45 @@ static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
                __mdss_dsi_update_video_mode_total(pdata, new_fps);
        } else if (pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
                /* Clock update method */
+               u64 new_clk_rate = mdss_dsi_calc_bitclk
+                       (&ctrl_pdata->panel_data.panel_info, new_fps);
+               if (!new_clk_rate) {
+                       pr_err("%s: unable to get the new bit clock rate\n",
+                                       __func__);
+                       rc = -EINVAL;
+                       goto end_update;
+               }
 
-               __mdss_dsi_mask_dfps_errors(ctrl_pdata, true);
+               rc = __mdss_dsi_dynamic_clock_switch(pdata, new_clk_rate);
+               if (!rc) {
+                       struct mdss_dsi_ctrl_pdata *mctrl_pdata = NULL;
+                       struct mdss_panel_info *mpinfo = NULL;
 
-               if (phy_rev == DSI_PHY_REV_20) {
-                       rc = mdss_dsi_phy_calc_timing_param(pinfo, phy_rev,
-                                       new_fps);
-                       if (rc) {
-                               pr_err("PHY calculations failed-%d\n", new_fps);
+                       if (mdss_dsi_is_hw_config_split
+                               (ctrl_pdata->shared_data) &&
+                               mdss_dsi_is_ctrl_clk_master(ctrl_pdata))
                                goto end_update;
-                       }
-               }
 
-               rc = __mdss_dsi_dfps_calc_clks(pdata, new_fps);
-               if (rc) {
-                       pr_err("error calculating clocks for %d\n", new_fps);
-                       goto error_clks;
-               }
+                       if (mdss_dsi_is_hw_config_split
+                               (ctrl_pdata->shared_data) &&
+                               mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) {
+                               mctrl_pdata = mdss_dsi_get_ctrl_clk_master();
+                               if (IS_ERR_OR_NULL(mctrl_pdata)) {
+                                       pr_err("Invalid mctrl_pdata\n");
+                                       goto end_update;
+                               }
 
-               rc = __mdss_dsi_dfps_update_clks(pdata, new_fps);
-               if (rc) {
-                       pr_err("Dynamic refresh failed-%d\n", new_fps);
-                       goto error_dfps;
+                               mpinfo = &mctrl_pdata->panel_data.panel_info;
+                       }
+                       /*
+                        * update new fps that at this point is already
+                        * updated in hw
+                        */
+                       pinfo->current_fps = new_fps;
+                       if (mctrl_pdata && mpinfo)
+                               mpinfo->current_fps = new_fps;
                }
        }
-
-       return rc;
-error_dfps:
-       if (__mdss_dsi_dfps_calc_clks(pdata, frame_rate_bkp))
-               pr_err("error reverting clock calculations for %d\n",
-                               frame_rate_bkp);
-error_clks:
-       if (mdss_dsi_phy_calc_timing_param(pinfo, phy_rev, frame_rate_bkp))
-               pr_err("Unable to revert phy timing-%d\n", frame_rate_bkp);
 end_update:
        return rc;
 }
@@ -2678,6 +2718,163 @@ static void mdss_dsi_timing_db_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
                  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
 }
 
+static struct mdss_dsi_ctrl_pdata *mdss_dsi_get_drvdata(struct device *dev)
+{
+       struct msm_fb_data_type *mfd;
+       struct mdss_panel_data *pdata;
+       struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+       struct fb_info *fbi = dev_get_drvdata(dev);
+
+       if (fbi) {
+               mfd = (struct msm_fb_data_type *)fbi->par;
+               pdata = dev_get_platdata(&mfd->pdev->dev);
+
+               ctrl_pdata = container_of(pdata,
+                       struct mdss_dsi_ctrl_pdata, panel_data);
+       }
+
+       return ctrl_pdata;
+}
+
+static ssize_t supp_bitclk_list_sysfs_rda(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       ssize_t ret = 0;
+       int i = 0;
+       struct mdss_dsi_ctrl_pdata *ctrl_pdata = mdss_dsi_get_drvdata(dev);
+       struct mdss_panel_info *pinfo = NULL;
+
+       if (!ctrl_pdata) {
+               pr_err("%s: invalid input\n", __func__);
+               return -EINVAL;
+       }
+
+       pinfo = &ctrl_pdata->panel_data.panel_info;
+       if (!pinfo) {
+               pr_err("no panel connected\n");
+               return -ENODEV;
+       }
+
+       if (!pinfo->dynamic_bitclk) {
+               pr_err_once("%s: Dynamic bitclk not enabled for this panel\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       buf[0] = 0;
+       for (i = 0; i < pinfo->supp_bitclk_len; i++) {
+               if (ret > 0)
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+                               ",%d", pinfo->supp_bitclks[i]);
+               else
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+                               "%d", pinfo->supp_bitclks[i]);
+       }
+
+       ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+       return ret;
+}
+
+static ssize_t dynamic_bitclk_sysfs_wta(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       int rc = 0, i = 0;
+       struct mdss_dsi_ctrl_pdata *ctrl_pdata = mdss_dsi_get_drvdata(dev);
+       struct mdss_panel_info *pinfo = NULL;
+       int clk_rate = 0;
+
+       if (!ctrl_pdata) {
+               pr_err("%s: invalid input\n", __func__);
+               return -EINVAL;
+       }
+
+       pinfo = &ctrl_pdata->panel_data.panel_info;
+       if (!pinfo) {
+               pr_err("no panel connected\n");
+               return -ENODEV;
+       }
+
+       if (!pinfo->dynamic_bitclk) {
+               pr_err_once("%s: Dynamic bitclk not enabled for this panel\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (mdss_panel_is_power_off(pinfo->panel_power_state)) {
+               pr_err_once("%s: Panel powered off!\n", __func__);
+               return -EINVAL;
+       }
+
+       rc = kstrtoint(buf, 10, &clk_rate);
+       if (rc) {
+               pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+               return rc;
+       }
+
+       for (i = 0; i < pinfo->supp_bitclk_len; i++) {
+               if (pinfo->supp_bitclks[i] == clk_rate)
+                       break;
+       }
+       if (i == pinfo->supp_bitclk_len) {
+               pr_err("Requested bitclk: %d not supported\n", clk_rate);
+               return -EINVAL;
+       }
+
+       rc = __mdss_dsi_dynamic_clock_switch(&ctrl_pdata->panel_data,
+               clk_rate);
+       if (!rc && mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
+               struct mdss_dsi_ctrl_pdata *octrl =
+                       mdss_dsi_get_other_ctrl(ctrl_pdata);
+               rc = __mdss_dsi_dynamic_clock_switch(&octrl->panel_data,
+                       clk_rate);
+               if (rc)
+                       pr_err("failed to switch DSI bitclk for sctrl\n");
+       } else if (rc) {
+               pr_err("failed to switch DSI bitclk\n");
+       }
+
+       return count;
+} /* dynamic_bitclk_sysfs_wta */
+
+static ssize_t dynamic_bitclk_sysfs_rda(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       ssize_t ret;
+       struct mdss_dsi_ctrl_pdata *ctrl_pdata = mdss_dsi_get_drvdata(dev);
+       struct mdss_panel_info *pinfo = NULL;
+
+       if (!ctrl_pdata) {
+               pr_err("%s: invalid input\n", __func__);
+               return -EINVAL;
+       }
+
+       pinfo = &ctrl_pdata->panel_data.panel_info;
+       if (!pinfo) {
+               pr_err("no panel connected\n");
+               return -ENODEV;
+       }
+
+       ret = snprintf(buf, PAGE_SIZE, "%llu\n", pinfo->clk_rate);
+       pr_debug("%s: '%llu'\n", __func__, pinfo->clk_rate);
+
+       return ret;
+} /* dynamic_bitclk_sysfs_rda */
+
+static DEVICE_ATTR(dynamic_bitclk, S_IRUGO | S_IWUSR | S_IWGRP,
+       dynamic_bitclk_sysfs_rda, dynamic_bitclk_sysfs_wta);
+static DEVICE_ATTR(supported_bitclk, S_IRUGO, supp_bitclk_list_sysfs_rda, NULL);
+
+static struct attribute *dynamic_bitclk_fs_attrs[] = {
+       &dev_attr_dynamic_bitclk.attr,
+       &dev_attr_supported_bitclk.attr,
+       NULL,
+};
+
+static struct attribute_group mdss_dsi_fs_attrs_group = {
+       .attrs = dynamic_bitclk_fs_attrs,
+};
+
 static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
                                  int event, void *arg)
 {
@@ -2844,6 +3041,14 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
                ctrl_pdata->kobj = &fbi->dev->kobj;
                ctrl_pdata->fb_node = fbi->node;
 
+               if (!mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) ||
+                       (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+                       mdss_dsi_is_ctrl_clk_master(ctrl_pdata))) {
+                       if (sysfs_create_group(&fbi->dev->kobj,
+                               &mdss_dsi_fs_attrs_group))
+                               pr_err("failed to create DSI sysfs group\n");
+               }
+
                if (IS_ENABLED(CONFIG_MSM_DBA) &&
                        pdata->panel_info.is_dba_panel) {
                                queue_delayed_work(ctrl_pdata->workq,
@@ -3392,7 +3597,7 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
        pinfo = &(ctrl_pdata->panel_data.panel_info);
        if (!(mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
                mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) &&
-               pinfo->dynamic_fps) {
+               (pinfo->dynamic_fps || pinfo->dynamic_bitclk)) {
                rc = mdss_dsi_shadow_clk_init(pdev, ctrl_pdata);
 
                if (rc) {
@@ -4359,11 +4564,19 @@ int dsi_panel_device_register(struct platform_device *ctrl_pdev,
                ((mipi->mode == DSI_VIDEO_MODE)
                        ? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
 
-       rc = mdss_dsi_clk_div_config(pinfo, mipi->frame_rate);
-       if (rc) {
-               pr_err("%s: unable to initialize the clk dividers\n", __func__);
-               return rc;
+       pinfo->clk_rate = mdss_dsi_calc_bitclk(pinfo, mipi->frame_rate);
+       if (!pinfo->clk_rate) {
+               pr_err("%s: unable to calculate the DSI bit clock\n", __func__);
+               return -EINVAL;
        }
+
+       pinfo->mipi.dsi_pclk_rate = mdss_dsi_get_pclk_rate(pinfo,
+               pinfo->clk_rate);
+       if (!pinfo->mipi.dsi_pclk_rate) {
+               pr_err("%s: unable to calculate the DSI pclk\n", __func__);
+               return -EINVAL;
+       }
+
        ctrl_pdata->pclk_rate = mipi->dsi_pclk_rate;
        clk_rate = pinfo->clk_rate;
        do_div(clk_rate, 8U);
index 9847016..9005139 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -633,8 +633,8 @@ void disable_esd_thread(void);
 void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 
 void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
-int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
-                           int frame_rate);
+u64 mdss_dsi_calc_bitclk(struct mdss_panel_info *panel_info, int frame_rate);
+u32 mdss_dsi_get_pclk_rate(struct mdss_panel_info *panel_info, u64 clk_rate);
 int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy);
 int mdss_dsi_link_clk_init(struct platform_device *pdev,
                      struct mdss_dsi_ctrl_pdata *ctrl_pdata);
index dbd58f9..bf695ae 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1803,17 +1803,13 @@ static bool mdss_dsi_cmp_panel_reg_v2(struct mdss_dsi_ctrl_pdata *ctrl)
        for (i = 0; i < ctrl->status_cmds.cmd_cnt; i++)
                len += lenp[i];
 
-       for (i = 0; i < len; i++) {
-               pr_debug("[%i] return:0x%x status:0x%x\n",
-                       i, (unsigned int)ctrl->return_buf[i],
-                       (unsigned int)ctrl->status_value[j + i]);
-               MDSS_XLOG(ctrl->ndx, ctrl->return_buf[i],
-                       ctrl->status_value[j + i]);
-               j += len;
-       }
-
        for (j = 0; j < ctrl->groups; ++j) {
                for (i = 0; i < len; ++i) {
+                       pr_debug("[%i] return:0x%x status:0x%x\n",
+                               i, ctrl->return_buf[i],
+                               (unsigned int)ctrl->status_value[group + i]);
+                       MDSS_XLOG(ctrl->ndx, ctrl->return_buf[i],
+                                       ctrl->status_value[group + i]);
                        if (ctrl->return_buf[i] !=
                                ctrl->status_value[group + i])
                                break;
@@ -2324,14 +2320,15 @@ static void mdss_dsi_parse_dfps_config(struct device_node *pan_node,
                        struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
        const char *data;
-       bool dynamic_fps;
+       bool dynamic_fps, dynamic_bitclk;
        struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+       int rc = 0;
 
        dynamic_fps = of_property_read_bool(pan_node,
                        "qcom,mdss-dsi-pan-enable-dynamic-fps");
 
        if (!dynamic_fps)
-               return;
+               goto dynamic_bitclk;
 
        pinfo->dynamic_fps = true;
        data = of_get_property(pan_node, "qcom,mdss-dsi-pan-fps-update", NULL);
@@ -2361,6 +2358,31 @@ static void mdss_dsi_parse_dfps_config(struct device_node *pan_node,
        pinfo->new_fps = pinfo->mipi.frame_rate;
        pinfo->current_fps = pinfo->mipi.frame_rate;
 
+dynamic_bitclk:
+       dynamic_bitclk = of_property_read_bool(pan_node,
+                       "qcom,mdss-dsi-pan-enable-dynamic-bitclk");
+       if (!dynamic_bitclk)
+               return;
+
+       of_find_property(pan_node, "qcom,mdss-dsi-dynamic-bitclk_freq",
+               &pinfo->supp_bitclk_len);
+       pinfo->supp_bitclk_len = pinfo->supp_bitclk_len/sizeof(u32);
+       if (pinfo->supp_bitclk_len < 1)
+               return;
+
+       pinfo->supp_bitclks = kzalloc((sizeof(u32) * pinfo->supp_bitclk_len),
+               GFP_KERNEL);
+       if (!pinfo->supp_bitclks)
+               return;
+
+       rc = of_property_read_u32_array(pan_node,
+               "qcom,mdss-dsi-dynamic-bitclk_freq", pinfo->supp_bitclks,
+               pinfo->supp_bitclk_len);
+       if (rc) {
+               pr_err("Error from dynamic bitclk freq u64 array read\n");
+               return;
+       }
+       pinfo->dynamic_bitclk = true;
        return;
 }
 
index 2d24986..e8e903e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1034,15 +1034,10 @@ static void mdss_dsi_phy_update_timing_param_v3(struct mdss_panel_info *pinfo,
 }
 
 int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
-               u32 frate_hz)
+               u64 clk_rate)
 {
        struct dsi_phy_t_clk_param t_clk;
        struct dsi_phy_timing t_param;
-       int hsync_period;
-       int vsync_period;
-       unsigned long inter_num;
-       uint32_t lane_config = 0;
-       unsigned long x, y;
        int rc = 0;
 
        if (!pinfo) {
@@ -1050,30 +1045,12 @@ int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
                return -EINVAL;
        }
 
-       hsync_period = mdss_panel_get_htotal(pinfo, true);
-       vsync_period = mdss_panel_get_vtotal(pinfo);
-
-       inter_num = pinfo->bpp * frate_hz;
-
-       if (pinfo->mipi.data_lane0)
-               lane_config++;
-       if (pinfo->mipi.data_lane1)
-               lane_config++;
-       if (pinfo->mipi.data_lane2)
-               lane_config++;
-       if (pinfo->mipi.data_lane3)
-               lane_config++;
-
-       x = mult_frac(vsync_period * hsync_period, inter_num, lane_config);
-       y = rounddown(x, 1);
-       t_clk.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+       t_clk.bitclk_mbps = rounddown((uint32_t) div_u64(clk_rate, 1000000), 1);
        t_clk.escclk_numer = ESC_CLK_MHZ;
        t_clk.escclk_denom = ESCCLK_MMSS_CC_PREDIV;
        t_clk.tlpx_numer_ns = TLPX_NUMER;
        t_clk.treot_ns = TR_EOT;
-       pr_debug("hperiod=%d, vperiod=%d, inter_num=%lu, lane_cfg=%d\n",
-                       hsync_period, vsync_period, inter_num, lane_config);
-       pr_debug("x=%lu, y=%lu, bitrate=%d\n", x, y, t_clk.bitclk_mbps);
+       pr_debug("bitrate=%d\n", t_clk.bitclk_mbps);
 
        rc = mdss_dsi_phy_initialize_defaults(&t_clk, &t_param, phy_rev);
        if (rc) {
index 03df17d..b0f7d68 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -42,7 +42,7 @@ enum phy_mode {
  * @frate_hz - Frame rate for which phy timing parameters are to be calculated.
  */
 int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
-               u32 frate_hz);
+               u64 clk_rate);
 
 /*
  * mdss_dsi_phy_v3_init() - initialization sequence for DSI PHY rev v3
index 6c4db0f..1b408e2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Core MDSS framebuffer driver.
  *
- * Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2007 Google Incorporated
  *
  * This software is licensed under the terms of the GNU General Public
@@ -610,7 +610,7 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
                        "red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
                        "green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
                        "blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n"
-                       "panel_orientation=%d\n",
+                       "panel_orientation=%d\ndyn_bitclk_en=%d\n",
                        pinfo->partial_update_enabled,
                        pinfo->roi_alignment.xstart_pix_align,
                        pinfo->roi_alignment.width_pix_align,
@@ -636,7 +636,7 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
                        pinfo->hdr_properties.display_primaries[5],
                        pinfo->hdr_properties.display_primaries[6],
                        pinfo->hdr_properties.display_primaries[7],
-                       pinfo->panel_orientation);
+                       pinfo->panel_orientation, pinfo->dynamic_bitclk);
 
        return ret;
 }
index acac672..de3ff0c 100644 (file)
@@ -805,6 +805,9 @@ struct mdss_panel_info {
        int pwm_lpg_chan;
        int pwm_period;
        bool dynamic_fps;
+       bool dynamic_bitclk;
+       u32 *supp_bitclks;
+       u32 supp_bitclk_len;
        bool ulps_feature_enabled;
        bool ulps_suspend_enabled;
        bool panel_ack_disabled;
index bb3b4b3..922c444 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1489,13 +1489,19 @@ int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy)
                                __func__, pinfo->mipi.frame_rate);
        }
 
-       rc = mdss_dsi_clk_div_config(&pdata->panel_info,
-                       pdata->panel_info.mipi.frame_rate);
-       if (rc) {
-               pr_err("%s: unable to initialize the clk dividers\n",
-                                                               __func__);
-               return rc;
+       pinfo->clk_rate = mdss_dsi_calc_bitclk(pinfo, pinfo->mipi.frame_rate);
+       if (!pinfo->clk_rate) {
+               pr_err("%s: unable to calculate the DSI bit clock\n", __func__);
+               return -EINVAL;
        }
+
+       pinfo->mipi.dsi_pclk_rate = mdss_dsi_get_pclk_rate(pinfo,
+               pinfo->clk_rate);
+       if (!pinfo->mipi.dsi_pclk_rate) {
+               pr_err("%s: unable to calculate the DSI pclk\n", __func__);
+               return -EINVAL;
+       }
+
        ctrl_pdata->refresh_clk_rate = false;
        ctrl_pdata->pclk_rate = pdata->panel_info.mipi.dsi_pclk_rate;
        ctrl_pdata->byte_clk_rate = pdata->panel_info.clk_rate / 8;
@@ -1524,7 +1530,7 @@ int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy)
                /* phy panel timing calaculation */
                rc = mdss_dsi_phy_calc_timing_param(pinfo,
                                ctrl_pdata->shared_data->phy_rev,
-                               pinfo->mipi.frame_rate);
+                               pdata->panel_info.clk_rate);
                if (rc) {
                        pr_err("Error in calculating phy timings\n");
                        return rc;
@@ -1811,16 +1817,9 @@ bool is_diff_frame_rate(struct mdss_panel_info *panel_info,
                return (frame_rate != panel_info->mipi.frame_rate);
 }
 
-int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
-                           int frame_rate)
+static u8 mdss_dsi_get_lane_cnt(struct mdss_panel_info *panel_info)
 {
-       struct mdss_panel_data *pdata  = container_of(panel_info,
-                       struct mdss_panel_data, panel_info);
-       struct  mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(pdata,
-                       struct mdss_dsi_ctrl_pdata, panel_data);
-       u64 h_period, v_period, clk_rate;
-       u32 dsi_pclk_rate;
-       u8 lanes = 0, bpp;
+       u8 lanes = 0;
 
        if (!panel_info)
                return -EINVAL;
@@ -1834,7 +1833,17 @@ int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
        if (panel_info->mipi.data_lane0)
                lanes += 1;
 
-       switch (panel_info->mipi.dst_format) {
+       if (!lanes)
+               lanes = 1;
+
+       return lanes;
+}
+
+static u8 mdss_dsi_get_bpp(char dst_format)
+{
+       u8 bpp = 0;
+
+       switch (dst_format) {
        case DSI_CMD_DST_FORMAT_RGB888:
        case DSI_VIDEO_DST_FORMAT_RGB888:
        case DSI_VIDEO_DST_FORMAT_RGB666_LOOSE:
@@ -1848,6 +1857,21 @@ int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
                bpp = 3;        /* Default format set to RGB888 */
                break;
        }
+       return bpp;
+}
+
+u64 mdss_dsi_calc_bitclk(struct mdss_panel_info *panel_info, int frame_rate)
+{
+       struct mdss_panel_data *pdata  = container_of(panel_info,
+               struct mdss_panel_data, panel_info);
+       struct  mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(pdata,
+               struct mdss_dsi_ctrl_pdata, panel_data);
+       u64 h_period, v_period, clk_rate = 0;
+       u8 lanes = 0, bpp;
+
+       lanes = mdss_dsi_get_lane_cnt(panel_info);
+
+       bpp = mdss_dsi_get_bpp(panel_info->mipi.dst_format);
 
        h_period = mdss_panel_get_htotal(panel_info, true);
        if (panel_info->split_link_enabled)
@@ -1855,35 +1879,40 @@ int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
        v_period = mdss_panel_get_vtotal(panel_info);
 
        if (ctrl_pdata->refresh_clk_rate || is_diff_frame_rate(panel_info,
-                       frame_rate) || (!panel_info->clk_rate)) {
-               if (lanes > 0) {
-                       panel_info->clk_rate = h_period * v_period * frame_rate
-                               * bpp * 8;
-                       do_div(panel_info->clk_rate, lanes);
-               } else {
-                       pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
-                       panel_info->clk_rate =
-                               h_period * v_period * frame_rate * bpp * 8;
-               }
+               frame_rate) || (!panel_info->clk_rate)) {
+               clk_rate = h_period * v_period * frame_rate * bpp * 8;
+               do_div(clk_rate, lanes);
+       } else if (panel_info->clk_rate) {
+               clk_rate = panel_info->clk_rate;
        }
 
-       if (panel_info->clk_rate == 0)
-               panel_info->clk_rate = 454000000;
+       if (clk_rate == 0)
+               clk_rate = 454000000;
+
+       return clk_rate;
+}
+
+u32 mdss_dsi_get_pclk_rate(struct mdss_panel_info *panel_info, u64 clk_rate)
+{
+       u8 lanes = 0, bpp;
+       u32 pclk_rate = 0;
+
+       lanes = mdss_dsi_get_lane_cnt(panel_info);
+
+       bpp = mdss_dsi_get_bpp(panel_info->mipi.dst_format);
 
-       clk_rate = panel_info->clk_rate;
        do_div(clk_rate, 8 * bpp);
 
        if (panel_info->split_link_enabled)
-               dsi_pclk_rate = (u32) clk_rate *
+               pclk_rate = (u32) clk_rate *
                        panel_info->mipi.lanes_per_sublink;
        else
-               dsi_pclk_rate = (u32) clk_rate * lanes;
+               pclk_rate = (u32) clk_rate * lanes;
 
-       if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000))
-               dsi_pclk_rate = 35000000;
-       panel_info->mipi.dsi_pclk_rate = dsi_pclk_rate;
+       if ((pclk_rate < 3300000) || (pclk_rate > 250000000))
+               pclk_rate = 35000000;
 
-       return 0;
+       return pclk_rate;
 }
 
 static bool mdss_dsi_is_ulps_req_valid(struct mdss_dsi_ctrl_pdata *ctrl,
index 9ddfdd6..34ab4f9 100644 (file)
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
        if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
                return -EFAULT;
 
+       if (mr->w > 4096 || mr->h > 4096)
+               return -EINVAL;
+
        if (mr->w * mr->h * 3 > mr->buffer_size)
                return -EINVAL;
 
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
                        mr->x, mr->y, mr->w, mr->h);
 
        if (r > 0) {
-               if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+               if (copy_to_user(mr->buffer, buf, r))
                        r = -EFAULT;
        }
 
index 5676aef..f4e59c4 100644 (file)
@@ -18,15 +18,16 @@ static void enable_hotplug_cpu(int cpu)
 
 static void disable_hotplug_cpu(int cpu)
 {
-       if (cpu_online(cpu)) {
-               lock_device_hotplug();
+       if (!cpu_is_hotpluggable(cpu))
+               return;
+       lock_device_hotplug();
+       if (cpu_online(cpu))
                device_offline(get_cpu_device(cpu));
-               unlock_device_hotplug();
-       }
-       if (cpu_present(cpu))
+       if (!cpu_online(cpu) && cpu_present(cpu)) {
                xen_arch_unregister_cpu(cpu);
-
-       set_cpu_present(cpu, false);
+               set_cpu_present(cpu, false);
+       }
+       unlock_device_hotplug();
 }
 
 static int vcpu_online(unsigned int cpu)
index 21d679f..878a409 100644 (file)
@@ -139,7 +139,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
                clear_evtchn_to_irq_row(row);
        }
 
-       evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+       evtchn_to_irq[row][col] = irq;
        return 0;
 }
 
index 2dd2858..f494126 100644 (file)
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
                /*
                 * The Xenstore watch fires directly after registering it and
                 * after a suspend/resume cycle. So ENOENT is no error but
-                * might happen in those cases.
+                * might happen in those cases. ERANGE is observed when we get
+                * an empty value (''), this happens when we acknowledge the
+                * request by writing '\0' below.
                 */
-               if (err != -ENOENT)
+               if (err != -ENOENT && err != -ERANGE)
                        pr_err("Error %d reading sysrq code in control/sysrq\n",
                               err);
                xenbus_transaction_end(xbt, 1);
index a0b3e7d..211ac47 100644 (file)
@@ -101,9 +101,6 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_LESSTHAN:
                *target = '<';
                break;
-       case SFM_SLASH:
-               *target = '\\';
-               break;
        case SFM_SPACE:
                *target = ' ';
                break;
index 63aea21..b9b8f19 100644 (file)
@@ -577,10 +577,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
        }
 
        count = 0;
+       /*
+        * We know that all the name entries in the protocols array
+        * are short (< 16 bytes anyway) and are NUL terminated.
+        */
        for (i = 0; i < CIFS_NUM_PROT; i++) {
-               strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
-               count += strlen(protocols[i].name) + 1;
-               /* null at end of source and target buffers anyway */
+               size_t len = strlen(protocols[i].name) + 1;
+
+               memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+               count += len;
        }
        inc_rfc1001_len(pSMB, count);
        pSMB->ByteCount = cpu_to_le16(count);
index 0cc699d..61a09ab 100644 (file)
@@ -406,9 +406,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                        (struct smb_com_transaction_change_notify_rsp *)buf;
                struct file_notify_information *pnotify;
                __u32 data_offset = 0;
+               size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
                if (get_bcc(buf) > sizeof(struct file_notify_information)) {
                        data_offset = le32_to_cpu(pSMBr->DataOffset);
 
+                       if (data_offset >
+                           len - sizeof(struct file_notify_information)) {
+                               cifs_dbg(FYI, "invalid data_offset %u\n",
+                                        data_offset);
+                               return true;
+                       }
                        pnotify = (struct file_notify_information *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
                        cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
index e6b1795..2725085 100644 (file)
@@ -914,7 +914,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        }
 
        srch_inf->entries_in_buffer = 0;
-       srch_inf->index_of_last_entry = 0;
+       srch_inf->index_of_last_entry = 2;
 
        rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
                                  fid->volatile_fid, 0, srch_inf);
index 33f5e2a..e452f9a 100644 (file)
@@ -74,7 +74,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
        else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
                error_msg = "rec_len is too small for name_len";
        else if (unlikely(((char *) de - buf) + rlen > size))
-               error_msg = "directory entry across range";
+               error_msg = "directory entry overrun";
        else if (unlikely(le32_to_cpu(de->inode) >
                        le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
                error_msg = "inode out of bounds";
@@ -83,18 +83,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
 
        if (filp)
                ext4_error_file(filp, function, line, bh->b_blocknr,
-                               "bad entry in directory: %s - offset=%u(%u), "
-                               "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % size),
-                               offset, le32_to_cpu(de->inode),
-                               rlen, de->name_len);
+                               "bad entry in directory: %s - offset=%u, "
+                               "inode=%u, rec_len=%d, name_len=%d, size=%d",
+                               error_msg, offset, le32_to_cpu(de->inode),
+                               rlen, de->name_len, size);
        else
                ext4_error_inode(dir, function, line, bh->b_blocknr,
-                               "bad entry in directory: %s - offset=%u(%u), "
-                               "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % size),
-                               offset, le32_to_cpu(de->inode),
-                               rlen, de->name_len);
+                               "bad entry in directory: %s - offset=%u, "
+                               "inode=%u, rec_len=%d, name_len=%d, size=%d",
+                                error_msg, offset, le32_to_cpu(de->inode),
+                                rlen, de->name_len, size);
 
        return 1;
 }
index 4aca279..465041a 100644 (file)
@@ -1768,6 +1768,7 @@ int empty_inline_dir(struct inode *dir, int *has_inline_data)
 {
        int err, inline_size;
        struct ext4_iloc iloc;
+       size_t inline_len;
        void *inline_pos;
        unsigned int offset;
        struct ext4_dir_entry_2 *de;
@@ -1795,8 +1796,9 @@ int empty_inline_dir(struct inode *dir, int *has_inline_data)
                goto out;
        }
 
+       inline_len = ext4_get_inline_size(dir);
        offset = EXT4_INLINE_DOTDOT_SIZE;
-       while (offset < dir->i_size) {
+       while (offset < inline_len) {
                de = ext4_get_inline_entry(dir, &iloc, offset,
                                           &inline_pos, &inline_size);
                if (ext4_check_dir_entry(dir, NULL, de,
index 0a512aa..4c9d799 100644 (file)
@@ -48,7 +48,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
         */
        sb_start_write(sb);
        ext4_mmp_csum_set(sb, mmp);
-       mark_buffer_dirty(bh);
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
index d2421fd..783280e 100644 (file)
@@ -18,6 +18,7 @@
 
 int ext4_resize_begin(struct super_block *sb)
 {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        int ret = 0;
 
        if (!capable(CAP_SYS_RESOURCE))
@@ -28,7 +29,7 @@ int ext4_resize_begin(struct super_block *sb)
          * because the user tools have no way of handling this.  Probably a
          * bad time to do it anyways.
          */
-       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+       if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
            le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
                ext4_warning(sb, "won't resize using backup superblock at %llu",
                        (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1954,6 +1955,26 @@ retry:
                }
        }
 
+       /*
+        * Make sure the last group has enough space so that it's
+        * guaranteed to have enough space for all metadata blocks
+        * that it might need to hold.  (We might not need to store
+        * the inode table blocks in the last block group, but there
+        * will be cases where this might be needed.)
+        */
+       if ((ext4_group_first_block_no(sb, n_group) +
+            ext4_group_overhead_blocks(sb, n_group) + 2 +
+            sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+               n_blocks_count = ext4_group_first_block_no(sb, n_group);
+               n_group--;
+               n_blocks_count_retry = 0;
+               if (resize_inode) {
+                       iput(resize_inode);
+                       resize_inode = NULL;
+               }
+               goto retry;
+       }
+
        /* extend the last group */
        if (n_group == o_group)
                add = n_blocks_count - o_blocks_count;
index 71c9104..959d734 100644 (file)
@@ -4023,11 +4023,13 @@ no_journal:
        block = ext4_count_free_clusters(sb);
        ext4_free_blocks_count_set(sbi->s_es, 
                                   EXT4_C2B(sbi, block));
+       ext4_superblock_csum_set(sb);
        err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
                                  GFP_KERNEL);
        if (!err) {
                unsigned long freei = ext4_count_free_inodes(sb);
                sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+               ext4_superblock_csum_set(sb);
                err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
                                          GFP_KERNEL);
        }
index a9b96d7..cfb546e 100644 (file)
@@ -220,12 +220,12 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
 {
        int error;
 
-       if (buffer_verified(bh))
-               return 0;
-
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                return -EFSCORRUPTED;
+       if (buffer_verified(bh))
+               return 0;
+
        if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
                return -EFSBADCRC;
        error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -639,14 +639,20 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
 }
 
 static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+                    struct inode *inode)
 {
-       struct ext4_xattr_entry *last;
+       struct ext4_xattr_entry *last, *next;
        size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
 
        /* Compute min_offs and last. */
        last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+       for (; !IS_LAST_ENTRY(last); last = next) {
+               next = EXT4_XATTR_NEXT(last);
+               if ((void *)next >= s->end) {
+                       EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+                       return -EFSCORRUPTED;
+               }
                if (!last->e_value_block && last->e_value_size) {
                        size_t offs = le16_to_cpu(last->e_value_offs);
                        if (offs < min_offs)
@@ -829,7 +835,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                        mb2_cache_entry_delete_block(ext4_mb_cache, hash,
                                                     bs->bh->b_blocknr);
                        ea_bdebug(bs->bh, "modifying in-place");
-                       error = ext4_xattr_set_entry(i, s);
+                       error = ext4_xattr_set_entry(i, s, inode);
                        if (!error) {
                                if (!IS_LAST_ENTRY(s->first))
                                        ext4_xattr_rehash(header(s->base),
@@ -875,7 +881,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                s->end = s->base + sb->s_blocksize;
        }
 
-       error = ext4_xattr_set_entry(i, s);
+       error = ext4_xattr_set_entry(i, s, inode);
        if (error == -EFSCORRUPTED)
                goto bad_block;
        if (error)
@@ -1063,7 +1069,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
 
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
-       error = ext4_xattr_set_entry(i, s);
+       error = ext4_xattr_set_entry(i, s, inode);
        if (error) {
                if (error == -ENOSPC &&
                    ext4_has_inline_data(inode)) {
@@ -1075,7 +1081,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
                        error = ext4_xattr_ibody_find(inode, i, is);
                        if (error)
                                return error;
-                       error = ext4_xattr_set_entry(i, s);
+                       error = ext4_xattr_set_entry(i, s, inode);
                }
                if (error)
                        return error;
@@ -1101,7 +1107,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
 
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
-       error = ext4_xattr_set_entry(i, s);
+       error = ext4_xattr_set_entry(i, s, inode);
        if (error)
                return error;
        header = IHDR(inode, ext4_raw_inode(&is->iloc));
@@ -1412,6 +1418,11 @@ retry:
                /* Find the entry best suited to be pushed into EA block */
                entry = NULL;
                for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+                       /* never move system.data out of the inode */
+                       if ((last->e_name_len == 4) &&
+                           (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+                           !memcmp(last->e_name, "data", 4))
+                               continue;
                        total_size =
                        EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
                                        EXT4_XATTR_LEN(last->e_name_len);
index 178623c..f7cdd3b 100644 (file)
@@ -28,6 +28,7 @@ struct kmem_cache *f2fs_inode_entry_slab;
 
 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
 {
+       f2fs_build_fault_attr(sbi, 0, 0);
        set_ckpt_flags(sbi, CP_ERROR_FLAG);
        if (!end_io)
                f2fs_flush_merged_writes(sbi);
@@ -70,6 +71,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
                .encrypted_page = NULL,
                .is_meta = is_meta,
        };
+       int err;
 
        if (unlikely(!is_meta))
                fio.op_flags &= ~REQ_META;
@@ -84,9 +86,10 @@ repeat:
 
        fio.page = page;
 
-       if (f2fs_submit_page_bio(&fio)) {
+       err = f2fs_submit_page_bio(&fio);
+       if (err) {
                f2fs_put_page(page, 1);
-               goto repeat;
+               return ERR_PTR(err);
        }
 
        lock_page(page);
@@ -95,14 +98,9 @@ repeat:
                goto repeat;
        }
 
-       /*
-        * if there is any IO error when accessing device, make our filesystem
-        * readonly and make sure do not write checkpoint with non-uptodate
-        * meta page.
-        */
        if (unlikely(!PageUptodate(page))) {
-               memset(page_address(page), 0, PAGE_SIZE);
-               f2fs_stop_checkpoint(sbi, false);
+               f2fs_put_page(page, 1);
+               return ERR_PTR(-EIO);
        }
 out:
        return page;
@@ -113,13 +111,32 @@ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
        return __get_meta_page(sbi, index, true);
 }
 
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+       struct page *page;
+       int count = 0;
+
+retry:
+       page = __get_meta_page(sbi, index, true);
+       if (IS_ERR(page)) {
+               if (PTR_ERR(page) == -EIO &&
+                               ++count <= DEFAULT_RETRY_IO_COUNT)
+                       goto retry;
+
+               f2fs_stop_checkpoint(sbi, false);
+               f2fs_bug_on(sbi, 1);
+       }
+
+       return page;
+}
+
 /* for POR only */
 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
 {
        return __get_meta_page(sbi, index, false);
 }
 
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
                                        block_t blkaddr, int type)
 {
        switch (type) {
@@ -140,8 +157,20 @@ bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
                        return false;
                break;
        case META_POR:
+       case DATA_GENERIC:
                if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
-                       blkaddr < MAIN_BLKADDR(sbi)))
+                       blkaddr < MAIN_BLKADDR(sbi))) {
+                       if (type == DATA_GENERIC) {
+                               f2fs_msg(sbi->sb, KERN_WARNING,
+                                       "access invalid blkaddr:%u", blkaddr);
+                               WARN_ON(1);
+                       }
+                       return false;
+               }
+               break;
+       case META_GENERIC:
+               if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+                       blkaddr >= MAIN_BLKADDR(sbi)))
                        return false;
                break;
        default:
@@ -177,7 +206,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
        blk_start_plug(&plug);
        for (; nrpages-- > 0; blkno++) {
 
-               if (!f2fs_is_valid_meta_blkaddr(sbi, blkno, type))
+               if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
                        goto out;
 
                switch (type) {
@@ -243,11 +272,8 @@ static int __f2fs_write_meta_page(struct page *page,
 
        trace_f2fs_writepage(page, META);
 
-       if (unlikely(f2fs_cp_error(sbi))) {
-               dec_page_count(sbi, F2FS_DIRTY_META);
-               unlock_page(page);
-               return 0;
-       }
+       if (unlikely(f2fs_cp_error(sbi)))
+               goto redirty_out;
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto redirty_out;
        if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@@ -530,13 +556,12 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
 
        spin_lock(&im->ino_lock);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_ORPHAN)) {
                spin_unlock(&im->ino_lock);
                f2fs_show_injection_info(FAULT_ORPHAN);
                return -ENOSPC;
        }
-#endif
+
        if (unlikely(im->ino_num >= sbi->max_orphans))
                err = -ENOSPC;
        else
@@ -573,12 +598,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
        struct inode *inode;
        struct node_info ni;
-       int err = f2fs_acquire_orphan_inode(sbi);
-
-       if (err)
-               goto err_out;
-
-       __add_ino_entry(sbi, ino, 0, ORPHAN_INO);
+       int err;
 
        inode = f2fs_iget_retry(sbi->sb, ino);
        if (IS_ERR(inode)) {
@@ -601,14 +621,15 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
        /* truncate all the data during iput */
        iput(inode);
 
-       f2fs_get_node_info(sbi, ino, &ni);
+       err = f2fs_get_node_info(sbi, ino, &ni);
+       if (err)
+               goto err_out;
 
        /* ENOMEM was fully retried in f2fs_evict_inode. */
        if (ni.blk_addr != NULL_ADDR) {
                err = -EIO;
                goto err_out;
        }
-       __remove_ino_entry(sbi, ino, ORPHAN_INO);
        return 0;
 
 err_out:
@@ -640,7 +661,10 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
        /* Needed for iput() to work correctly and not trash data */
        sbi->sb->s_flags |= MS_ACTIVE;
 
-       /* Turn on quotas so that they are updated correctly */
+       /*
+        * Turn on quotas which were not enabled for read-only mounts if
+        * filesystem has quota feature, so that they are updated correctly.
+        */
        quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
 #endif
 
@@ -650,9 +674,15 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
        f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
 
        for (i = 0; i < orphan_blocks; i++) {
-               struct page *page = f2fs_get_meta_page(sbi, start_blk + i);
+               struct page *page;
                struct f2fs_orphan_block *orphan_blk;
 
+               page = f2fs_get_meta_page(sbi, start_blk + i);
+               if (IS_ERR(page)) {
+                       err = PTR_ERR(page);
+                       goto out;
+               }
+
                orphan_blk = (struct f2fs_orphan_block *)page_address(page);
                for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
                        nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
@@ -743,10 +773,14 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
        __u32 crc = 0;
 
        *cp_page = f2fs_get_meta_page(sbi, cp_addr);
+       if (IS_ERR(*cp_page))
+               return PTR_ERR(*cp_page);
+
        *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
 
        crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
        if (crc_offset > (blk_size - sizeof(__le32))) {
+               f2fs_put_page(*cp_page, 1);
                f2fs_msg(sbi->sb, KERN_WARNING,
                        "invalid crc_offset: %zu", crc_offset);
                return -EINVAL;
@@ -754,6 +788,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
 
        crc = cur_cp_crc(*cp_block);
        if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+               f2fs_put_page(*cp_page, 1);
                f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
                return -EINVAL;
        }
@@ -773,14 +808,22 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
        err = get_checkpoint_version(sbi, cp_addr, &cp_block,
                                        &cp_page_1, version);
        if (err)
-               goto invalid_cp1;
+               return NULL;
+
+       if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+                                       sbi->blocks_per_seg) {
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "invalid cp_pack_total_block_count:%u",
+                       le32_to_cpu(cp_block->cp_pack_total_block_count));
+               goto invalid_cp;
+       }
        pre_version = *version;
 
        cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
        err = get_checkpoint_version(sbi, cp_addr, &cp_block,
                                        &cp_page_2, version);
        if (err)
-               goto invalid_cp2;
+               goto invalid_cp;
        cur_version = *version;
 
        if (cur_version == pre_version) {
@@ -788,9 +831,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
                f2fs_put_page(cp_page_2, 1);
                return cp_page_1;
        }
-invalid_cp2:
        f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
        f2fs_put_page(cp_page_1, 1);
        return NULL;
 }
@@ -839,15 +881,15 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
        cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
        memcpy(sbi->ckpt, cp_block, blk_size);
 
-       /* Sanity checking of checkpoint */
-       if (f2fs_sanity_check_ckpt(sbi))
-               goto free_fail_no_cp;
-
        if (cur_page == cp1)
                sbi->cur_cp_pack = 1;
        else
                sbi->cur_cp_pack = 2;
 
+       /* Sanity checking of checkpoint */
+       if (f2fs_sanity_check_ckpt(sbi))
+               goto free_fail_no_cp;
+
        if (cp_blks <= 1)
                goto done;
 
@@ -860,6 +902,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
                unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 
                cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+               if (IS_ERR(cur_page))
+                       goto free_fail_no_cp;
                sit_bitmap_ptr = page_address(cur_page);
                memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
                f2fs_put_page(cur_page, 1);
@@ -981,12 +1025,10 @@ retry:
 
                iput(inode);
                /* We need to give cpu to another writers. */
-               if (ino == cur_ino) {
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+               if (ino == cur_ino)
                        cond_resched();
-               } else {
+               else
                        ino = cur_ino;
-               }
        } else {
                /*
                 * We should submit bio, since it exists several
@@ -1120,7 +1162,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
        f2fs_unlock_all(sbi);
 }
 
-static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
 {
        DEFINE_WAIT(wait);
 
@@ -1130,6 +1172,9 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
                if (!get_pages(sbi, F2FS_WB_CP_DATA))
                        break;
 
+               if (unlikely(f2fs_cp_error(sbi)))
+                       break;
+
                io_schedule_timeout(5*HZ);
        }
        finish_wait(&sbi->cp_wait, &wait);
@@ -1203,8 +1248,12 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
 
        /* writeout cp pack 2 page */
        err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
-       f2fs_bug_on(sbi, err);
+       if (unlikely(err && f2fs_cp_error(sbi))) {
+               f2fs_put_page(page, 1);
+               return;
+       }
 
+       f2fs_bug_on(sbi, err);
        f2fs_put_page(page, 0);
 
        /* submit checkpoint (with barrier if NOBARRIER is not set) */
@@ -1230,7 +1279,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        while (get_pages(sbi, F2FS_DIRTY_META)) {
                f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
                if (unlikely(f2fs_cp_error(sbi)))
-                       return -EIO;
+                       break;
        }
 
        /*
@@ -1310,7 +1359,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
                        f2fs_sync_meta_pages(sbi, META, LONG_MAX,
                                                        FS_CP_META_IO);
                        if (unlikely(f2fs_cp_error(sbi)))
-                               return -EIO;
+                               break;
                }
        }
 
@@ -1349,10 +1398,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
 
        /* wait for previous submitted meta pages writeback */
-       wait_on_all_pages_writeback(sbi);
-
-       if (unlikely(f2fs_cp_error(sbi)))
-               return -EIO;
+       f2fs_wait_on_all_pages_writeback(sbi);
 
        /* flush all device cache */
        err = f2fs_flush_device_cache(sbi);
@@ -1361,12 +1407,19 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
        /* barrier and flush checkpoint cp pack 2 page if it can */
        commit_checkpoint(sbi, ckpt, start_blk);
-       wait_on_all_pages_writeback(sbi);
+       f2fs_wait_on_all_pages_writeback(sbi);
+
+       /*
+        * invalidate intermediate page cache borrowed from meta inode
+        * which are used for migration of encrypted inode's blocks.
+        */
+       if (f2fs_sb_has_encrypt(sbi->sb))
+               invalidate_mapping_pages(META_MAPPING(sbi),
+                               MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
        f2fs_release_ino_entry(sbi, false);
 
-       if (unlikely(f2fs_cp_error(sbi)))
-               return -EIO;
+       f2fs_reset_fsync_node_info(sbi);
 
        clear_sbi_flag(sbi, SBI_IS_DIRTY);
        clear_sbi_flag(sbi, SBI_NEED_CP);
@@ -1382,7 +1435,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
        f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
 
-       return 0;
+       return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
 }
 
 /*
index 3cc679a..c62bec5 100644 (file)
@@ -126,12 +126,10 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
 
 static void f2fs_read_end_io(struct bio *bio)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
                f2fs_show_injection_info(FAULT_IO);
                bio->bi_error = -EIO;
        }
-#endif
 
        if (f2fs_bio_post_read_required(bio)) {
                struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -177,6 +175,8 @@ static void f2fs_write_end_io(struct bio *bio)
                                        page->index != nid_of_node(page));
 
                dec_page_count(sbi, type);
+               if (f2fs_in_warm_node_list(sbi, page))
+                       f2fs_del_fsync_node_entry(sbi, page);
                clear_cold_data(page);
                end_page_writeback(page);
        }
@@ -263,7 +263,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
                if (type != DATA && type != NODE)
                        goto submit_io;
 
-               if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+               if (test_opt(sbi, LFS) && current->plug)
                        blk_finish_plug(current->plug);
 
                start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
@@ -440,7 +440,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
        struct page *page = fio->encrypted_page ?
                        fio->encrypted_page : fio->page;
 
-       verify_block_addr(fio, fio->new_blkaddr);
+       if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+                       __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+               return -EFAULT;
+
        trace_f2fs_submit_page_bio(page, fio);
        f2fs_trace_ios(fio, 0);
 
@@ -484,7 +487,7 @@ next:
                spin_unlock(&io->io_lock);
        }
 
-       if (is_valid_blkaddr(fio->old_blkaddr))
+       if (__is_valid_data_blkaddr(fio->old_blkaddr))
                verify_block_addr(fio, fio->old_blkaddr);
        verify_block_addr(fio, fio->new_blkaddr);
 
@@ -533,19 +536,22 @@ out:
 }
 
 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
-                                                        unsigned nr_pages)
+                                       unsigned nr_pages, unsigned op_flag)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct bio *bio;
        struct bio_post_read_ctx *ctx;
        unsigned int post_read_steps = 0;
 
+       if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+               return ERR_PTR(-EFAULT);
+
        bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
        if (!bio)
                return ERR_PTR(-ENOMEM);
        f2fs_target_device(sbi, blkaddr, bio);
        bio->bi_end_io = f2fs_read_end_io;
-       bio_set_op_attrs(bio, REQ_OP_READ, 0);
+       bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
 
        if (f2fs_encrypted_file(inode))
                post_read_steps |= 1 << STEP_DECRYPT;
@@ -570,7 +576,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
                                                        block_t blkaddr)
 {
-       struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+       struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
 
        if (IS_ERR(bio))
                return PTR_ERR(bio);
@@ -868,6 +874,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        struct f2fs_summary sum;
        struct node_info ni;
+       block_t old_blkaddr;
        pgoff_t fofs;
        blkcnt_t count = 1;
        int err;
@@ -875,6 +882,10 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
        if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
                return -EPERM;
 
+       err = f2fs_get_node_info(sbi, dn->nid, &ni);
+       if (err)
+               return err;
+
        dn->data_blkaddr = datablock_addr(dn->inode,
                                dn->node_page, dn->ofs_in_node);
        if (dn->data_blkaddr == NEW_ADDR)
@@ -884,11 +895,13 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
                return err;
 
 alloc:
-       f2fs_get_node_info(sbi, dn->nid, &ni);
        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
-       f2fs_allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+       old_blkaddr = dn->data_blkaddr;
+       f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
                                        &sum, seg_type, NULL, false);
+       if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+               invalidate_mapping_pages(META_MAPPING(sbi),
+                                       old_blkaddr, old_blkaddr);
        f2fs_set_data_blkaddr(dn);
 
        /* update i_size */
@@ -1044,7 +1057,13 @@ next_dnode:
 next_block:
        blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 
-       if (!is_valid_blkaddr(blkaddr)) {
+       if (__is_valid_data_blkaddr(blkaddr) &&
+               !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+               err = -EFAULT;
+               goto sync_out;
+       }
+
+       if (!is_valid_data_blkaddr(sbi, blkaddr)) {
                if (create) {
                        if (unlikely(f2fs_cp_error(sbi))) {
                                err = -EIO;
@@ -1281,7 +1300,11 @@ static int f2fs_xattr_fiemap(struct inode *inode,
                if (!page)
                        return -ENOMEM;
 
-               f2fs_get_node_info(sbi, inode->i_ino, &ni);
+               err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+               if (err) {
+                       f2fs_put_page(page, 1);
+                       return err;
+               }
 
                phys = (__u64)blk_to_logical(inode, ni.blk_addr);
                offset = offsetof(struct f2fs_inode, i_addr) +
@@ -1308,7 +1331,11 @@ static int f2fs_xattr_fiemap(struct inode *inode,
                if (!page)
                        return -ENOMEM;
 
-               f2fs_get_node_info(sbi, xnid, &ni);
+               err = f2fs_get_node_info(sbi, xnid, &ni);
+               if (err) {
+                       f2fs_put_page(page, 1);
+                       return err;
+               }
 
                phys = (__u64)blk_to_logical(inode, ni.blk_addr);
                len = inode->i_sb->s_blocksize;
@@ -1420,10 +1447,15 @@ out:
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * from read-ahead.
  */
 static int f2fs_mpage_readpages(struct address_space *mapping,
                        struct list_head *pages, struct page *page,
-                       unsigned nr_pages)
+                       unsigned nr_pages, bool is_readahead)
 {
        struct bio *bio = NULL;
        sector_t last_block_in_bio = 0;
@@ -1493,6 +1525,10 @@ got_it:
                                SetPageUptodate(page);
                                goto confused;
                        }
+
+                       if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+                                                               DATA_GENERIC))
+                               goto set_error_page;
                } else {
                        zero_user_segment(page, 0, PAGE_SIZE);
                        if (!PageUptodate(page))
@@ -1512,7 +1548,8 @@ submit_and_realloc:
                        bio = NULL;
                }
                if (bio == NULL) {
-                       bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+                       bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+                                       is_readahead ? REQ_RAHEAD : 0);
                        if (IS_ERR(bio)) {
                                bio = NULL;
                                goto set_error_page;
@@ -1556,7 +1593,7 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
        if (f2fs_has_inline_data(inode))
                ret = f2fs_read_inline_data(inode, page);
        if (ret == -EAGAIN)
-               ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+               ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
        return ret;
 }
 
@@ -1573,12 +1610,13 @@ static int f2fs_read_data_pages(struct file *file,
        if (f2fs_has_inline_data(inode))
                return 0;
 
-       return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
+       return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
 }
 
 static int encrypt_one_page(struct f2fs_io_info *fio)
 {
        struct inode *inode = fio->page->mapping->host;
+       struct page *mpage;
        gfp_t gfp_flags = GFP_NOFS;
 
        if (!f2fs_encrypted_file(inode))
@@ -1590,17 +1628,25 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
 retry_encrypt:
        fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
                        PAGE_SIZE, 0, fio->page->index, gfp_flags);
-       if (!IS_ERR(fio->encrypted_page))
-               return 0;
+       if (IS_ERR(fio->encrypted_page)) {
+               /* flush pending IOs and wait for a while in the ENOMEM case */
+               if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+                       f2fs_flush_merged_writes(fio->sbi);
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                       gfp_flags |= __GFP_NOFAIL;
+                       goto retry_encrypt;
+               }
+               return PTR_ERR(fio->encrypted_page);
+       }
 
-       /* flush pending IOs and wait for a while in the ENOMEM case */
-       if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
-               f2fs_flush_merged_writes(fio->sbi);
-               congestion_wait(BLK_RW_ASYNC, HZ/50);
-               gfp_flags |= __GFP_NOFAIL;
-               goto retry_encrypt;
+       mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
+       if (mpage) {
+               if (PageUptodate(mpage))
+                       memcpy(page_address(mpage),
+                               page_address(fio->encrypted_page), PAGE_SIZE);
+               f2fs_put_page(mpage, 1);
        }
-       return PTR_ERR(fio->encrypted_page);
+       return 0;
 }
 
 static inline bool check_inplace_update_policy(struct inode *inode,
@@ -1684,6 +1730,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
        struct inode *inode = page->mapping->host;
        struct dnode_of_data dn;
        struct extent_info ei = {0,0,0};
+       struct node_info ni;
        bool ipu_force = false;
        int err = 0;
 
@@ -1692,11 +1739,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
                        f2fs_lookup_extent_cache(inode, page->index, &ei)) {
                fio->old_blkaddr = ei.blk + page->index - ei.fofs;
 
-               if (is_valid_blkaddr(fio->old_blkaddr)) {
-                       ipu_force = true;
-                       fio->need_lock = LOCK_DONE;
-                       goto got_it;
-               }
+               if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+                                                       DATA_GENERIC))
+                       return -EFAULT;
+
+               ipu_force = true;
+               fio->need_lock = LOCK_DONE;
+               goto got_it;
        }
 
        /* Deadlock due to between page->lock and f2fs_lock_op */
@@ -1715,11 +1764,17 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
                goto out_writepage;
        }
 got_it:
+       if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+               !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+                                                       DATA_GENERIC)) {
+               err = -EFAULT;
+               goto out_writepage;
+       }
        /*
         * If current allocation needs SSR,
         * it had better in-place writes for updated data.
         */
-       if (ipu_force || (is_valid_blkaddr(fio->old_blkaddr) &&
+       if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
                                        need_inplace_update(fio))) {
                err = encrypt_one_page(fio);
                if (err)
@@ -1744,6 +1799,12 @@ got_it:
                fio->need_lock = LOCK_REQ;
        }
 
+       err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+       if (err)
+               goto out_writepage;
+
+       fio->version = ni.version;
+
        err = encrypt_one_page(fio);
        if (err)
                goto out_writepage;
@@ -2072,6 +2133,18 @@ continue_unlock:
        return ret;
 }
 
+static inline bool __should_serialize_io(struct inode *inode,
+                                       struct writeback_control *wbc)
+{
+       if (!S_ISREG(inode->i_mode))
+               return false;
+       if (wbc->sync_mode != WB_SYNC_ALL)
+               return true;
+       if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
+               return true;
+       return false;
+}
+
 static int __f2fs_write_data_pages(struct address_space *mapping,
                                                struct writeback_control *wbc,
                                                enum iostat_type io_type)
@@ -2080,6 +2153,7 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct blk_plug plug;
        int ret;
+       bool locked = false;
 
        /* deal with chardevs and other special file */
        if (!mapping->a_ops->writepage)
@@ -2110,10 +2184,18 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
        else if (atomic_read(&sbi->wb_sync_req[DATA]))
                goto skip_write;
 
+       if (__should_serialize_io(inode, wbc)) {
+               mutex_lock(&sbi->writepages);
+               locked = true;
+       }
+
        blk_start_plug(&plug);
        ret = f2fs_write_cache_pages(mapping, wbc, io_type);
        blk_finish_plug(&plug);
 
+       if (locked)
+               mutex_unlock(&sbi->writepages);
+
        if (wbc->sync_mode == WB_SYNC_ALL)
                atomic_dec(&sbi->wb_sync_req[DATA]);
        /*
@@ -2146,10 +2228,14 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
        loff_t i_size = i_size_read(inode);
 
        if (to > i_size) {
+               down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                down_write(&F2FS_I(inode)->i_mmap_sem);
+
                truncate_pagecache(inode, i_size);
                f2fs_truncate_blocks(inode, i_size, true);
+
                up_write(&F2FS_I(inode)->i_mmap_sem);
+               up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        }
 }
 
@@ -2254,8 +2340,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        }
        trace_f2fs_write_begin(inode, pos, len, flags);
 
-       if (f2fs_is_atomic_file(inode) &&
-                       !f2fs_available_free_memory(sbi, INMEM_PAGES)) {
+       if ((f2fs_is_atomic_file(inode) &&
+                       !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+                       is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
                err = -ENOMEM;
                drop_atomic = true;
                goto fail;
@@ -2380,14 +2467,20 @@ unlock_out:
 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
                           loff_t offset)
 {
-       unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
-
-       if (offset & blocksize_mask)
-               return -EINVAL;
-
-       if (iov_iter_alignment(iter) & blocksize_mask)
-               return -EINVAL;
-
+       unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+       unsigned blkbits = i_blkbits;
+       unsigned blocksize_mask = (1 << blkbits) - 1;
+       unsigned long align = offset | iov_iter_alignment(iter);
+       struct block_device *bdev = inode->i_sb->s_bdev;
+
+       if (align & blocksize_mask) {
+               if (bdev)
+                       blkbits = blksize_bits(bdev_logical_block_size(bdev));
+               blocksize_mask = (1 << blkbits) - 1;
+               if (align & blocksize_mask)
+                       return -EINVAL;
+               return 1;
+       }
        return 0;
 }
 
@@ -2405,7 +2498,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
 
        err = check_direct_IO(inode, iter, offset);
        if (err)
-               return err;
+               return err < 0 ? err : 0;
 
        if (f2fs_force_buffered_io(inode, rw))
                return 0;
@@ -2527,6 +2620,10 @@ static int f2fs_set_data_page_dirty(struct page *page)
        if (!PageUptodate(page))
                SetPageUptodate(page);
 
+       /* don't remain PG_checked flag which was set during GC */
+       if (is_cold_data(page))
+               clear_cold_data(page);
+
        if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
                if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
                        f2fs_register_inmem_page(inode, page);
index 2d65e77..214a968 100644 (file)
@@ -215,7 +215,8 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
        si->base_mem += sizeof(struct f2fs_nm_info);
        si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
        si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
-       si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+       si->base_mem += NM_I(sbi)->nat_blocks *
+                               f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
        si->base_mem += NM_I(sbi)->nat_blocks / 8;
        si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
 
index a7feed7..0866395 100644 (file)
@@ -517,12 +517,11 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
        }
 
 start:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
                f2fs_show_injection_info(FAULT_DIR_DEPTH);
                return -ENOSPC;
        }
-#endif
+
        if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
                return -ENOSPC;
 
index 8f8bb31..a1e9753 100644 (file)
@@ -43,7 +43,6 @@
        } while (0)
 #endif
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 enum {
        FAULT_KMALLOC,
        FAULT_KVMALLOC,
@@ -58,16 +57,20 @@ enum {
        FAULT_TRUNCATE,
        FAULT_IO,
        FAULT_CHECKPOINT,
+       FAULT_DISCARD,
        FAULT_MAX,
 };
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define F2FS_ALL_FAULT_TYPE            ((1 << FAULT_MAX) - 1)
+
 struct f2fs_fault_info {
        atomic_t inject_ops;
        unsigned int inject_rate;
        unsigned int inject_type;
 };
 
-extern char *fault_name[FAULT_MAX];
+extern char *f2fs_fault_name[FAULT_MAX];
 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
 #endif
 
@@ -241,7 +244,6 @@ enum {
 
 #define MAX_DISCARD_BLOCKS(sbi)                BLKS_PER_SEC(sbi)
 #define DEF_MAX_DISCARD_REQUEST                8       /* issue 8 discards per round */
-#define DEF_MAX_DISCARD_LEN            512     /* Max. 2MB per discard */
 #define DEF_MIN_DISCARD_ISSUE_TIME     50      /* 50 ms, if exists */
 #define DEF_MID_DISCARD_ISSUE_TIME     500     /* 500 ms, if device busy */
 #define DEF_MAX_DISCARD_ISSUE_TIME     60000   /* 60 s, if no candidates */
@@ -257,7 +259,7 @@ struct cp_control {
 };
 
 /*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
  */
 enum {
        META_CP,
@@ -265,6 +267,8 @@ enum {
        META_SIT,
        META_SSA,
        META_POR,
+       DATA_GENERIC,
+       META_GENERIC,
 };
 
 /* for the list of ino */
@@ -289,6 +293,12 @@ struct inode_entry {
        struct inode *inode;    /* vfs inode pointer */
 };
 
+struct fsync_node_entry {
+       struct list_head list;  /* list head */
+       struct page *page;      /* warm node page pointer */
+       unsigned int seq_id;    /* sequence id */
+};
+
 /* for the bitmap indicate blocks to be discarded */
 struct discard_entry {
        struct list_head list;  /* list head */
@@ -305,9 +315,10 @@ struct discard_entry {
                                        (MAX_PLIST_NUM - 1) : (blk_num - 1))
 
 enum {
-       D_PREP,
-       D_SUBMIT,
-       D_DONE,
+       D_PREP,                 /* initial */
+       D_PARTIAL,              /* partially submitted */
+       D_SUBMIT,               /* all submitted */
+       D_DONE,                 /* finished */
 };
 
 struct discard_info {
@@ -332,7 +343,10 @@ struct discard_cmd {
        struct block_device *bdev;      /* bdev */
        unsigned short ref;             /* reference count */
        unsigned char state;            /* state */
+       unsigned char issuing;          /* issuing discard */
        int error;                      /* bio error */
+       spinlock_t lock;                /* for state/bio_ref updating */
+       unsigned short bio_ref;         /* bio reference count */
 };
 
 enum {
@@ -352,6 +366,7 @@ struct discard_policy {
        unsigned int io_aware_gran;     /* minimum granularity discard not be aware of I/O */
        bool io_aware;                  /* issue discard in idle time */
        bool sync;                      /* submit discard with REQ_SYNC flag */
+       bool ordered;                   /* issue discard by lba order */
        unsigned int granularity;       /* discard granularity */
 };
 
@@ -368,10 +383,12 @@ struct discard_cmd_control {
        unsigned int max_discards;              /* max. discards to be issued */
        unsigned int discard_granularity;       /* discard granularity */
        unsigned int undiscard_blks;            /* # of undiscard blocks */
+       unsigned int next_pos;                  /* next discard position */
        atomic_t issued_discard;                /* # of issued discard */
        atomic_t issing_discard;                /* # of issing discard */
        atomic_t discard_cmd_cnt;               /* # of cached cmd count */
        struct rb_root root;                    /* root of discard rb-tree */
+       bool rbtree_check;                      /* config for consistence check */
 };
 
 /* for the list of fsync inodes, used only during recovery */
@@ -568,13 +585,12 @@ enum {
                                         */
 };
 
+#define DEFAULT_RETRY_IO_COUNT 8       /* maximum retry read IO count */
+
 #define F2FS_LINK_MAX  0xffffffff      /* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES       4       /* maximum ra pages of dir */
 
-/* vector size for gang look-up from extent cache that consists of radix tree */
-#define EXT_TREE_VEC_SIZE      64
-
 /* for in-memory extent cache entry */
 #define F2FS_MIN_EXTENT_LEN    64      /* minimum extent length */
 
@@ -660,6 +676,8 @@ enum {
 #define FADVISE_HOT_BIT                0x20
 #define FADVISE_VERITY_BIT     0x40    /* reserved */
 
+#define FADVISE_MODIFIABLE_BITS        (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
+
 #define file_is_cold(inode)    is_file(inode, FADVISE_COLD_BIT)
 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
 #define file_set_cold(inode)   set_file(inode, FADVISE_COLD_BIT)
@@ -758,22 +776,22 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
 }
 
 static inline bool __is_discard_mergeable(struct discard_info *back,
-                                               struct discard_info *front)
+                       struct discard_info *front, unsigned int max_len)
 {
        return (back->lstart + back->len == front->lstart) &&
-               (back->len + front->len < DEF_MAX_DISCARD_LEN);
+               (back->len + front->len <= max_len);
 }
 
 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
-                                               struct discard_info *back)
+                       struct discard_info *back, unsigned int max_len)
 {
-       return __is_discard_mergeable(back, cur);
+       return __is_discard_mergeable(back, cur, max_len);
 }
 
 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
-                                               struct discard_info *front)
+                       struct discard_info *front, unsigned int max_len)
 {
-       return __is_discard_mergeable(cur, front);
+       return __is_discard_mergeable(cur, front, max_len);
 }
 
 static inline bool __is_extent_mergeable(struct extent_info *back,
@@ -828,6 +846,7 @@ struct f2fs_nm_info {
        struct radix_tree_root nat_set_root;/* root of the nat set cache */
        struct rw_semaphore nat_tree_lock;      /* protect nat_tree_lock */
        struct list_head nat_entries;   /* cached nat entry list (clean) */
+       spinlock_t nat_list_lock;       /* protect clean nat entry list */
        unsigned int nat_cnt;           /* the # of cached nat entries */
        unsigned int dirty_nat_cnt;     /* total num of nat entries in set */
        unsigned int nat_blocks;        /* # of nat blocks */
@@ -954,6 +973,7 @@ struct f2fs_sm_info {
        unsigned int ipu_policy;        /* in-place-update policy */
        unsigned int min_ipu_util;      /* in-place-update threshold */
        unsigned int min_fsync_blocks;  /* threshold for fsync */
+       unsigned int min_seq_blocks;    /* threshold for sequential blocks */
        unsigned int min_hot_blocks;    /* threshold for hot block allocation */
        unsigned int min_ssr_sections;  /* threshold to trigger SSR allocation */
 
@@ -1075,6 +1095,7 @@ struct f2fs_io_info {
        bool retry;             /* need to reallocate block address */
        enum iostat_type io_type;       /* io type */
        struct writeback_control *io_wbc; /* writeback control */
+       unsigned char version;          /* version of the node */
 };
 
 #define is_read_io(rw) ((rw) == READ)
@@ -1126,6 +1147,7 @@ enum {
        SBI_POR_DOING,                          /* recovery is doing or not */
        SBI_NEED_SB_WRITE,                      /* need to recover superblock */
        SBI_NEED_CP,                            /* need to checkpoint */
+       SBI_IS_SHUTDOWN,                        /* shutdown by ioctl */
 };
 
 enum {
@@ -1172,6 +1194,7 @@ struct f2fs_sb_info {
        struct rw_semaphore sb_lock;            /* lock for raw super block */
        int valid_super_block;                  /* valid super block no */
        unsigned long s_flag;                           /* flags for sbi */
+       struct mutex writepages;                /* mutex for writepages() */
 
 #ifdef CONFIG_BLK_DEV_ZONED
        unsigned int blocks_per_blkz;           /* F2FS blocks per zone */
@@ -1208,6 +1231,11 @@ struct f2fs_sb_info {
 
        struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
 
+       spinlock_t fsync_node_lock;             /* for node entry lock */
+       struct list_head fsync_node_list;       /* node list head */
+       unsigned int fsync_seg_id;              /* sequence id */
+       unsigned int fsync_node_num;            /* number of node entries */
+
        /* for orphan inode, use 0'th array */
        unsigned int max_orphans;               /* max orphan inodes */
 
@@ -1275,6 +1303,7 @@ struct f2fs_sb_info {
        unsigned int gc_mode;                   /* current GC state */
        /* for skip statistic */
        unsigned long long skipped_atomic_files[2];     /* FG_GC and BG_GC */
+       unsigned long long skipped_gc_rwsem;            /* FG_GC only */
 
        /* threshold for gc trials on pinned files */
        u64 gc_pin_file_threshold;
@@ -1339,7 +1368,7 @@ struct f2fs_sb_info {
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 #define f2fs_show_injection_info(type)                         \
        printk("%sF2FS-fs : inject %s in %s of %pF\n",          \
-               KERN_INFO, fault_name[type],                    \
+               KERN_INFO, f2fs_fault_name[type],               \
                __func__, __builtin_return_address(0))
 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
 {
@@ -1358,6 +1387,12 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
        }
        return false;
 }
+#else
+#define f2fs_show_injection_info(type) do { } while (0)
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+       return false;
+}
 #endif
 
 /* For write statistics. Suppose sector size is 512 bytes,
@@ -1386,7 +1421,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi)
        struct request_list *rl = &q->root_rl;
 
        if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
-               return 0;
+               return false;
 
        return f2fs_time_over(sbi, REQ_TIME);
 }
@@ -1710,13 +1745,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
        if (ret)
                return ret;
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_BLOCK)) {
                f2fs_show_injection_info(FAULT_BLOCK);
                release = *count;
                goto enospc;
        }
-#endif
+
        /*
         * let's increase this in prior to actual block count change in order
         * for f2fs_sync_file to avoid data races when deciding checkpoint.
@@ -1740,18 +1774,20 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
                sbi->total_valid_block_count -= diff;
                if (!*count) {
                        spin_unlock(&sbi->stat_lock);
-                       percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
                        goto enospc;
                }
        }
        spin_unlock(&sbi->stat_lock);
 
-       if (unlikely(release))
+       if (unlikely(release)) {
+               percpu_counter_sub(&sbi->alloc_valid_block_count, release);
                dquot_release_reservation_block(inode, release);
+       }
        f2fs_i_blocks_write(inode, *count, true, true);
        return 0;
 
 enospc:
+       percpu_counter_sub(&sbi->alloc_valid_block_count, release);
        dquot_release_reservation_block(inode, release);
        return -ENOSPC;
 }
@@ -1923,12 +1959,10 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
                        return ret;
        }
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_BLOCK)) {
                f2fs_show_injection_info(FAULT_BLOCK);
                goto enospc;
        }
-#endif
 
        spin_lock(&sbi->stat_lock);
 
@@ -2013,17 +2047,23 @@ static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
                                                pgoff_t index, bool for_write)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-       struct page *page = find_lock_page(mapping, index);
+       struct page *page;
 
-       if (page)
-               return page;
+       if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+               if (!for_write)
+                       page = find_get_page_flags(mapping, index,
+                                                       FGP_LOCK | FGP_ACCESSED);
+               else
+                       page = find_lock_page(mapping, index);
+               if (page)
+                       return page;
 
-       if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
-               f2fs_show_injection_info(FAULT_PAGE_ALLOC);
-               return NULL;
+               if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+                       f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+                       return NULL;
+               }
        }
-#endif
+
        if (!for_write)
                return grab_cache_page(mapping, index);
        return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -2033,12 +2073,11 @@ static inline struct page *f2fs_pagecache_get_page(
                                struct address_space *mapping, pgoff_t index,
                                int fgp_flags, gfp_t gfp_mask)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
                f2fs_show_injection_info(FAULT_PAGE_GET);
                return NULL;
        }
-#endif
+
        return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
 }
 
@@ -2103,12 +2142,11 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
                        bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
                return bio;
        }
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
                f2fs_show_injection_info(FAULT_ALLOC_BIO);
                return NULL;
        }
-#endif
+
        return bio_alloc(GFP_KERNEL, npages);
 }
 
@@ -2643,12 +2681,11 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
                                        size_t size, gfp_t flags)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_KMALLOC)) {
                f2fs_show_injection_info(FAULT_KMALLOC);
                return NULL;
        }
-#endif
+
        return kmalloc(size, flags);
 }
 
@@ -2681,12 +2718,11 @@ static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
                                        size_t size, gfp_t flags)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_KVMALLOC)) {
                f2fs_show_injection_info(FAULT_KVMALLOC);
                return NULL;
        }
-#endif
+
        return kvmalloc(size, flags);
 }
 
@@ -2745,13 +2781,39 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
        spin_unlock(&sbi->iostat_lock);
 }
 
-static inline bool is_valid_blkaddr(block_t blkaddr)
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&      \
+                               (!is_read_io(fio->op) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+                                       block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+                                       block_t blkaddr, int type)
+{
+       if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "invalid blkaddr: %u, type: %d, run fsck to fix.",
+                       blkaddr, type);
+               f2fs_bug_on(sbi, 1);
+       }
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
 {
        if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
                return false;
        return true;
 }
 
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+                                               block_t blkaddr)
+{
+       if (!__is_valid_data_blkaddr(blkaddr))
+               return false;
+       verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+       return true;
+}
+
 /*
  * file.c
  */
@@ -2866,16 +2928,21 @@ struct node_info;
 
 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
                                                struct node_info *ni);
 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
 int f2fs_truncate_xattr_node(struct inode *inode);
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+                                       unsigned int seq_id);
 int f2fs_remove_inode_page(struct inode *inode);
 struct page *f2fs_new_inode_page(struct inode *inode);
 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
@@ -2884,11 +2951,12 @@ struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
 void f2fs_move_node_page(struct page *node_page, int gc_type);
 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
-                       struct writeback_control *wbc, bool atomic);
+                       struct writeback_control *wbc, bool atomic,
+                       unsigned int *seq_id);
 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
                        struct writeback_control *wbc,
                        bool do_balance, enum iostat_type io_type);
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
@@ -2896,7 +2964,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
                        unsigned int segno, struct f2fs_summary_block *sum);
 void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
@@ -2974,9 +3042,10 @@ enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
-                       block_t blkaddr, int type);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+                                       block_t blkaddr, int type);
 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
                        int type, bool sync);
 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
@@ -3000,6 +3069,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
 void f2fs_remove_dirty_inode(struct inode *inode);
 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
 int __init f2fs_create_checkpoint_caches(void);
@@ -3438,7 +3508,7 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
 
        return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
 #else
-       return 0;
+       return false;
 #endif
 }
 
@@ -3449,4 +3519,11 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
                        F2FS_I_SB(inode)->s_ndevs);
 }
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+                                                       unsigned int type);
+#else
+#define f2fs_build_fault_attr(sbi, rate, type)         do { } while (0)
+#endif
+
 #endif
index 1ada298..8e381b6 100644 (file)
@@ -216,6 +216,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
                .nr_to_write = LONG_MAX,
                .for_reclaim = 0,
        };
+       unsigned int seq_id = 0;
 
        if (unlikely(f2fs_readonly(inode->i_sb)))
                return 0;
@@ -278,7 +279,7 @@ go_write:
        }
 sync_nodes:
        atomic_inc(&sbi->wb_sync_req[NODE]);
-       ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
+       ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
        atomic_dec(&sbi->wb_sync_req[NODE]);
        if (ret)
                goto out;
@@ -304,7 +305,7 @@ sync_nodes:
         * given fsync mark.
         */
        if (!atomic) {
-               ret = f2fs_wait_on_node_pages_writeback(sbi, ino);
+               ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
                if (ret)
                        goto out;
        }
@@ -353,13 +354,13 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
        return pgofs;
 }
 
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
-                                                       int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+                               pgoff_t dirty, pgoff_t pgofs, int whence)
 {
        switch (whence) {
        case SEEK_DATA:
                if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
-                       is_valid_blkaddr(blkaddr))
+                       is_valid_data_blkaddr(sbi, blkaddr))
                        return true;
                break;
        case SEEK_HOLE:
@@ -423,7 +424,15 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
                        blkaddr = datablock_addr(dn.inode,
                                        dn.node_page, dn.ofs_in_node);
 
-                       if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+                       if (__is_valid_data_blkaddr(blkaddr) &&
+                               !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+                                               blkaddr, DATA_GENERIC)) {
+                               f2fs_put_dnode(&dn);
+                               goto fail;
+                       }
+
+                       if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+                                                       pgofs, whence)) {
                                f2fs_put_dnode(&dn);
                                goto found;
                        }
@@ -516,6 +525,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 
                dn->data_blkaddr = NULL_ADDR;
                f2fs_set_data_blkaddr(dn);
+
+               if (__is_valid_data_blkaddr(blkaddr) &&
+                       !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+                       continue;
+
                f2fs_invalidate_blocks(sbi, blkaddr);
                if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
                        clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
@@ -657,12 +671,11 @@ int f2fs_truncate(struct inode *inode)
 
        trace_f2fs_truncate(inode);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
                f2fs_show_injection_info(FAULT_TRUNCATE);
                return -EIO;
        }
-#endif
+
        /* we should check inline_data size */
        if (!f2fs_may_inline_data(inode)) {
                err = f2fs_convert_inline_inode(inode);
@@ -786,22 +799,26 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        if (attr->ia_valid & ATTR_SIZE) {
-               if (attr->ia_size <= i_size_read(inode)) {
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
-                       truncate_setsize(inode, attr->ia_size);
+               bool to_smaller = (attr->ia_size <= i_size_read(inode));
+
+               down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+               down_write(&F2FS_I(inode)->i_mmap_sem);
+
+               truncate_setsize(inode, attr->ia_size);
+
+               if (to_smaller)
                        err = f2fs_truncate(inode);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
-                       if (err)
-                               return err;
-               } else {
-                       /*
-                        * do not trim all blocks after i_size if target size is
-                        * larger than i_size.
-                        */
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
-                       truncate_setsize(inode, attr->ia_size);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+               /*
+                * do not trim all blocks after i_size if target size is
+                * larger than i_size.
+                */
+               up_write(&F2FS_I(inode)->i_mmap_sem);
+               up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+               if (err)
+                       return err;
 
+               if (!to_smaller) {
                        /* should convert inline inode here */
                        if (!f2fs_may_inline_data(inode)) {
                                err = f2fs_convert_inline_inode(inode);
@@ -951,14 +968,19 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
 
                        blk_start = (loff_t)pg_start << PAGE_SHIFT;
                        blk_end = (loff_t)pg_end << PAGE_SHIFT;
+
+                       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                        down_write(&F2FS_I(inode)->i_mmap_sem);
+
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
 
                        f2fs_lock_op(sbi);
                        ret = f2fs_truncate_hole(inode, pg_start, pg_end);
                        f2fs_unlock_op(sbi);
+
                        up_write(&F2FS_I(inode)->i_mmap_sem);
+                       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
        }
 
@@ -1061,7 +1083,12 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
                        if (ret)
                                return ret;
 
-                       f2fs_get_node_info(sbi, dn.nid, &ni);
+                       ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+                       if (ret) {
+                               f2fs_put_dnode(&dn);
+                               return ret;
+                       }
+
                        ilen = min((pgoff_t)
                                ADDRS_PER_PAGE(dn.node_page, dst_inode) -
                                                dn.ofs_in_node, len - i);
@@ -1168,25 +1195,33 @@ roll_back:
        return ret;
 }
 
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+       pgoff_t start = offset >> PAGE_SHIFT;
+       pgoff_t end = (offset + len) >> PAGE_SHIFT;
        int ret;
 
        f2fs_balance_fs(sbi, true);
-       f2fs_lock_op(sbi);
 
-       f2fs_drop_extent_tree(inode);
+       /* avoid gc operation during block exchange */
+       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       down_write(&F2FS_I(inode)->i_mmap_sem);
 
+       f2fs_lock_op(sbi);
+       f2fs_drop_extent_tree(inode);
+       truncate_pagecache(inode, offset);
        ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
        f2fs_unlock_op(sbi);
+
+       up_write(&F2FS_I(inode)->i_mmap_sem);
+       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        return ret;
 }
 
 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
-       pgoff_t pg_start, pg_end;
        loff_t new_size;
        int ret;
 
@@ -1201,25 +1236,17 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        if (ret)
                return ret;
 
-       pg_start = offset >> PAGE_SHIFT;
-       pg_end = (offset + len) >> PAGE_SHIFT;
-
-       /* avoid gc operation during block exchange */
-       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
-       down_write(&F2FS_I(inode)->i_mmap_sem);
        /* write out all dirty pages from offset */
        ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        if (ret)
-               goto out_unlock;
-
-       truncate_pagecache(inode, offset);
+               return ret;
 
-       ret = f2fs_do_collapse(inode, pg_start, pg_end);
+       ret = f2fs_do_collapse(inode, offset, len);
        if (ret)
-               goto out_unlock;
+               return ret;
 
        /* write out all moved pages, if possible */
+       down_write(&F2FS_I(inode)->i_mmap_sem);
        filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
 
@@ -1227,11 +1254,9 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        truncate_pagecache(inode, new_size);
 
        ret = f2fs_truncate_blocks(inode, new_size, true);
+       up_write(&F2FS_I(inode)->i_mmap_sem);
        if (!ret)
                f2fs_i_size_write(inode, new_size);
-out_unlock:
-       up_write(&F2FS_I(inode)->i_mmap_sem);
-       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        return ret;
 }
 
@@ -1297,12 +1322,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
        if (ret)
                return ret;
 
-       down_write(&F2FS_I(inode)->i_mmap_sem);
        ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
        if (ret)
-               goto out_sem;
-
-       truncate_pagecache_range(inode, offset, offset + len - 1);
+               return ret;
 
        pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
        pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1314,7 +1336,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                ret = fill_zero(inode, pg_start, off_start,
                                                off_end - off_start);
                if (ret)
-                       goto out_sem;
+                       return ret;
 
                new_size = max_t(loff_t, new_size, offset + len);
        } else {
@@ -1322,7 +1344,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        ret = fill_zero(inode, pg_start++, off_start,
                                                PAGE_SIZE - off_start);
                        if (ret)
-                               goto out_sem;
+                               return ret;
 
                        new_size = max_t(loff_t, new_size,
                                        (loff_t)pg_start << PAGE_SHIFT);
@@ -1333,12 +1355,21 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        unsigned int end_offset;
                        pgoff_t end;
 
+                       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+                       down_write(&F2FS_I(inode)->i_mmap_sem);
+
+                       truncate_pagecache_range(inode,
+                               (loff_t)index << PAGE_SHIFT,
+                               ((loff_t)pg_end << PAGE_SHIFT) - 1);
+
                        f2fs_lock_op(sbi);
 
                        set_new_dnode(&dn, inode, NULL, NULL, 0);
                        ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
                        if (ret) {
                                f2fs_unlock_op(sbi);
+                               up_write(&F2FS_I(inode)->i_mmap_sem);
+                               up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                                goto out;
                        }
 
@@ -1347,7 +1378,10 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 
                        ret = f2fs_do_zero_range(&dn, index, end);
                        f2fs_put_dnode(&dn);
+
                        f2fs_unlock_op(sbi);
+                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
                        f2fs_balance_fs(sbi, dn.node_changed);
 
@@ -1375,9 +1409,6 @@ out:
                else
                        f2fs_i_size_write(inode, new_size);
        }
-out_sem:
-       up_write(&F2FS_I(inode)->i_mmap_sem);
-
        return ret;
 }
 
@@ -1406,26 +1437,27 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        f2fs_balance_fs(sbi, true);
 
-       /* avoid gc operation during block exchange */
-       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
        down_write(&F2FS_I(inode)->i_mmap_sem);
        ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+       up_write(&F2FS_I(inode)->i_mmap_sem);
        if (ret)
-               goto out;
+               return ret;
 
        /* write out all dirty pages from offset */
        ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        if (ret)
-               goto out;
-
-       truncate_pagecache(inode, offset);
+               return ret;
 
        pg_start = offset >> PAGE_SHIFT;
        pg_end = (offset + len) >> PAGE_SHIFT;
        delta = pg_end - pg_start;
        idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 
+       /* avoid gc operation during block exchange */
+       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       down_write(&F2FS_I(inode)->i_mmap_sem);
+       truncate_pagecache(inode, offset);
+
        while (!ret && idx > pg_start) {
                nr = idx - pg_start;
                if (nr > delta)
@@ -1439,16 +1471,17 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
                                        idx + delta, nr, false);
                f2fs_unlock_op(sbi);
        }
+       up_write(&F2FS_I(inode)->i_mmap_sem);
+       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        /* write out all moved pages, if possible */
+       down_write(&F2FS_I(inode)->i_mmap_sem);
        filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
+       up_write(&F2FS_I(inode)->i_mmap_sem);
 
        if (!ret)
                f2fs_i_size_write(inode, new_size);
-out:
-       up_write(&F2FS_I(inode)->i_mmap_sem);
-       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        return ret;
 }
 
@@ -1604,7 +1637,7 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
        struct f2fs_inode_info *fi = F2FS_I(inode);
        unsigned int flags = fi->i_flags;
 
-       if (file_is_encrypt(inode))
+       if (f2fs_encrypted_inode(inode))
                flags |= F2FS_ENCRYPT_FL;
        if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
                flags |= F2FS_INLINE_DATA_FL;
@@ -1688,15 +1721,18 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 
        inode_lock(inode);
 
-       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
-       if (f2fs_is_atomic_file(inode))
+       if (f2fs_is_atomic_file(inode)) {
+               if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
+                       ret = -EINVAL;
                goto out;
+       }
 
        ret = f2fs_convert_inline_inode(inode);
        if (ret)
                goto out;
 
+       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
        if (!get_dirty_pages(inode))
                goto skip_flush;
 
@@ -1704,18 +1740,20 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
                "Unexpected flush for atomic writes: ino=%lu, npages=%u",
                                        inode->i_ino, get_dirty_pages(inode));
        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
-       if (ret)
+       if (ret) {
+               up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                goto out;
+       }
 skip_flush:
        set_inode_flag(inode, FI_ATOMIC_FILE);
        clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
        F2FS_I(inode)->inmem_task = current;
        stat_inc_atomic_write(inode);
        stat_update_max_atomic_write(inode);
 out:
-       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        inode_unlock(inode);
        mnt_drop_write_file(filp);
        return ret;
@@ -1733,9 +1771,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
        if (ret)
                return ret;
 
-       inode_lock(inode);
+       f2fs_balance_fs(F2FS_I_SB(inode), true);
 
-       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       inode_lock(inode);
 
        if (f2fs_is_volatile_file(inode)) {
                ret = -EINVAL;
@@ -1761,7 +1799,6 @@ err_out:
                clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
                ret = -EINVAL;
        }
-       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        inode_unlock(inode);
        mnt_drop_write_file(filp);
        return ret;
@@ -1853,6 +1890,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
                ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
        }
 
+       clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+
        inode_unlock(inode);
 
        mnt_drop_write_file(filp);
@@ -1866,7 +1905,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct super_block *sb = sbi->sb;
        __u32 in;
-       int ret;
+       int ret = 0;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -1889,6 +1928,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
                }
                if (sb) {
                        f2fs_stop_checkpoint(sbi, false);
+                       set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
                        thaw_bdev(sb->s_bdev, sb);
                }
                break;
@@ -1898,13 +1938,16 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
                if (ret)
                        goto out;
                f2fs_stop_checkpoint(sbi, false);
+               set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
                break;
        case F2FS_GOING_DOWN_NOSYNC:
                f2fs_stop_checkpoint(sbi, false);
+               set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
                break;
        case F2FS_GOING_DOWN_METAFLUSH:
                f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
                f2fs_stop_checkpoint(sbi, false);
+               set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
                break;
        default:
                ret = -EINVAL;
@@ -2107,7 +2150,7 @@ out:
        return ret;
 }
 
-static int f2fs_ioc_f2fs_write_checkpoint(struct file *filp, unsigned long arg)
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -2351,15 +2394,10 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
        }
 
        inode_lock(src);
-       down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
        if (src != dst) {
                ret = -EBUSY;
                if (!inode_trylock(dst))
                        goto out;
-               if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) {
-                       inode_unlock(dst);
-                       goto out;
-               }
        }
 
        ret = -EINVAL;
@@ -2404,6 +2442,14 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
                goto out_unlock;
 
        f2fs_balance_fs(sbi, true);
+
+       down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+       if (src != dst) {
+               ret = -EBUSY;
+               if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+                       goto out_src;
+       }
+
        f2fs_lock_op(sbi);
        ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
                                pos_out >> F2FS_BLKSIZE_BITS,
@@ -2416,13 +2462,15 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
                        f2fs_i_size_write(dst, dst_osize);
        }
        f2fs_unlock_op(sbi);
-out_unlock:
-       if (src != dst) {
+
+       if (src != dst)
                up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
+out_src:
+       up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+out_unlock:
+       if (src != dst)
                inode_unlock(dst);
-       }
 out:
-       up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
        inode_unlock(src);
        return ret;
 }
@@ -2594,7 +2642,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
 
        if (!pin) {
                clear_inode_flag(inode, FI_PIN_FILE);
-               F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1;
+               f2fs_i_gc_failures_write(inode, 0);
                goto done;
        }
 
@@ -2700,7 +2748,7 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case F2FS_IOC_GARBAGE_COLLECT_RANGE:
                return f2fs_ioc_gc_range(filp, arg);
        case F2FS_IOC_WRITE_CHECKPOINT:
-               return f2fs_ioc_f2fs_write_checkpoint(filp, arg);
+               return f2fs_ioc_write_checkpoint(filp, arg);
        case F2FS_IOC_DEFRAGMENT:
                return f2fs_ioc_defragment(filp, arg);
        case F2FS_IOC_MOVE_RANGE:
index 772ef64..ada8b80 100644 (file)
@@ -53,12 +53,10 @@ static int gc_thread_func(void *data)
                        continue;
                }
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
                if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
                        f2fs_show_injection_info(FAULT_CHECKPOINT);
                        f2fs_stop_checkpoint(sbi, false);
                }
-#endif
 
                if (!sb_start_write_trylock(sbi->sb))
                        continue;
@@ -517,7 +515,11 @@ next_step:
                        continue;
                }
 
-               f2fs_get_node_info(sbi, nid, &ni);
+               if (f2fs_get_node_info(sbi, nid, &ni)) {
+                       f2fs_put_page(node_page, 1);
+                       continue;
+               }
+
                if (ni.blk_addr != start_addr + off) {
                        f2fs_put_page(node_page, 1);
                        continue;
@@ -576,7 +578,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
        if (IS_ERR(node_page))
                return false;
 
-       f2fs_get_node_info(sbi, nid, dni);
+       if (f2fs_get_node_info(sbi, nid, dni)) {
+               f2fs_put_page(node_page, 1);
+               return false;
+       }
 
        if (sum->version != dni->version) {
                f2fs_msg(sbi->sb, KERN_WARNING,
@@ -594,6 +599,72 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
        return true;
 }
 
+static int ra_data_block(struct inode *inode, pgoff_t index)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
+       struct dnode_of_data dn;
+       struct page *page;
+       struct extent_info ei = {0, 0, 0};
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .ino = inode->i_ino,
+               .type = DATA,
+               .temp = COLD,
+               .op = REQ_OP_READ,
+               .op_flags = 0,
+               .encrypted_page = NULL,
+               .in_list = false,
+               .retry = false,
+       };
+       int err;
+
+       page = f2fs_grab_cache_page(mapping, index, true);
+       if (!page)
+               return -ENOMEM;
+
+       if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+               dn.data_blkaddr = ei.blk + index - ei.fofs;
+               goto got_it;
+       }
+
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+       if (err)
+               goto put_page;
+       f2fs_put_dnode(&dn);
+
+       if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+                                               DATA_GENERIC))) {
+               err = -EFAULT;
+               goto put_page;
+       }
+got_it:
+       /* read page */
+       fio.page = page;
+       fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+       fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
+                                       dn.data_blkaddr,
+                                       FGP_LOCK | FGP_CREAT, GFP_NOFS);
+       if (!fio.encrypted_page) {
+               err = -ENOMEM;
+               goto put_page;
+       }
+
+       err = f2fs_submit_page_bio(&fio);
+       if (err)
+               goto put_encrypted_page;
+       f2fs_put_page(fio.encrypted_page, 0);
+       f2fs_put_page(page, 1);
+       return 0;
+put_encrypted_page:
+       f2fs_put_page(fio.encrypted_page, 1);
+put_page:
+       f2fs_put_page(page, 1);
+       return err;
+}
+
 /*
  * Move data block via META_MAPPING while keeping locked data page.
  * This can be used to move blocks, aka LBAs, directly on disk.
@@ -615,7 +686,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
        struct dnode_of_data dn;
        struct f2fs_summary sum;
        struct node_info ni;
-       struct page *page;
+       struct page *page, *mpage;
        block_t newaddr;
        int err;
        bool lfs_mode = test_opt(fio.sbi, LFS);
@@ -655,7 +726,10 @@ static void move_data_block(struct inode *inode, block_t bidx,
         */
        f2fs_wait_on_page_writeback(page, DATA, true);
 
-       f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+       err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+       if (err)
+               goto put_out;
+
        set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
 
        /* read page */
@@ -675,6 +749,23 @@ static void move_data_block(struct inode *inode, block_t bidx,
                goto recover_block;
        }
 
+       mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+                                       fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+       if (mpage) {
+               bool updated = false;
+
+               if (PageUptodate(mpage)) {
+                       memcpy(page_address(fio.encrypted_page),
+                                       page_address(mpage), PAGE_SIZE);
+                       updated = true;
+               }
+               f2fs_put_page(mpage, 1);
+               invalidate_mapping_pages(META_MAPPING(fio.sbi),
+                                       fio.old_blkaddr, fio.old_blkaddr);
+               if (updated)
+                       goto write_page;
+       }
+
        err = f2fs_submit_page_bio(&fio);
        if (err)
                goto put_page_out;
@@ -691,6 +782,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
                goto put_page_out;
        }
 
+write_page:
        set_page_dirty(fio.encrypted_page);
        f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
        if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -865,22 +957,30 @@ next_step:
                        if (IS_ERR(inode) || is_bad_inode(inode))
                                continue;
 
-                       /* if inode uses special I/O path, let's go phase 3 */
-                       if (f2fs_post_read_required(inode)) {
-                               add_gc_inode(gc_list, inode);
-                               continue;
-                       }
-
                        if (!down_write_trylock(
                                &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
                                iput(inode);
+                               sbi->skipped_gc_rwsem++;
+                               continue;
+                       }
+
+                       start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+                                                               ofs_in_node;
+
+                       if (f2fs_post_read_required(inode)) {
+                               int err = ra_data_block(inode, start_bidx);
+
+                               up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+                               if (err) {
+                                       iput(inode);
+                                       continue;
+                               }
+                               add_gc_inode(gc_list, inode);
                                continue;
                        }
 
-                       start_bidx = f2fs_start_bidx_of_node(nofs, inode);
                        data_page = f2fs_get_read_data_page(inode,
-                                       start_bidx + ofs_in_node, REQ_RAHEAD,
-                                       true);
+                                               start_bidx, REQ_RAHEAD, true);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                        if (IS_ERR(data_page)) {
                                iput(inode);
@@ -903,6 +1003,7 @@ next_step:
                                        continue;
                                if (!down_write_trylock(
                                                &fi->i_gc_rwsem[WRITE])) {
+                                       sbi->skipped_gc_rwsem++;
                                        up_write(&fi->i_gc_rwsem[READ]);
                                        continue;
                                }
@@ -986,7 +1087,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
                        goto next;
 
                sum = page_address(sum_page);
-               f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+               if (type != GET_SUM_TYPE((&sum->footer))) {
+                       f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
+                               "type [%d, %d] in SSA and SIT",
+                               segno, type, GET_SUM_TYPE((&sum->footer)));
+                       set_sbi_flag(sbi, SBI_NEED_FSCK);
+                       goto next;
+               }
 
                /*
                 * this is to avoid deadlock:
@@ -1034,6 +1141,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
                .iroot = RADIX_TREE_INIT(GFP_NOFS),
        };
        unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+       unsigned long long first_skipped;
        unsigned int skipped_round = 0, round = 0;
 
        trace_f2fs_gc_begin(sbi->sb, sync, background,
@@ -1046,6 +1154,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
                                prefree_segments(sbi));
 
        cpc.reason = __get_cp_reason(sbi);
+       sbi->skipped_gc_rwsem = 0;
+       first_skipped = last_skipped;
 gc_more:
        if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
                ret = -EINVAL;
@@ -1087,7 +1197,8 @@ gc_more:
        total_freed += seg_freed;
 
        if (gc_type == FG_GC) {
-               if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
+               if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
+                                               sbi->skipped_gc_rwsem)
                        skipped_round++;
                last_skipped = sbi->skipped_atomic_files[FG_GC];
                round++;
@@ -1096,15 +1207,23 @@ gc_more:
        if (gc_type == FG_GC)
                sbi->cur_victim_sec = NULL_SEGNO;
 
-       if (!sync) {
-               if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
-                       if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
-                               skipped_round * 2 >= round)
-                               f2fs_drop_inmem_pages_all(sbi, true);
+       if (sync)
+               goto stop;
+
+       if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+               if (skipped_round <= MAX_SKIP_GC_COUNT ||
+                                       skipped_round * 2 < round) {
                        segno = NULL_SEGNO;
                        goto gc_more;
                }
 
+               if (first_skipped < last_skipped &&
+                               (last_skipped - first_skipped) >
+                                               sbi->skipped_gc_rwsem) {
+                       f2fs_drop_inmem_pages_all(sbi, true);
+                       segno = NULL_SEGNO;
+                       goto gc_more;
+               }
                if (gc_type == FG_GC)
                        ret = f2fs_write_checkpoint(sbi, &cpc);
        }
index b71d9f6..202bfff 100644 (file)
@@ -139,6 +139,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
                .encrypted_page = NULL,
                .io_type = FS_DATA_IO,
        };
+       struct node_info ni;
        int dirty, err;
 
        if (!f2fs_exist_data(dn->inode))
@@ -148,6 +149,24 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        if (err)
                return err;
 
+       err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+       if (err) {
+               f2fs_put_dnode(dn);
+               return err;
+       }
+
+       fio.version = ni.version;
+
+       if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
+               f2fs_put_dnode(dn);
+               set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
+               f2fs_msg(fio.sbi->sb, KERN_WARNING,
+                       "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+                       "run fsck to fix.",
+                       __func__, dn->inode->i_ino, dn->data_blkaddr);
+               return -EINVAL;
+       }
+
        f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
 
        f2fs_do_read_inline_data(page, dn->inode_page);
@@ -381,6 +400,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
        if (err)
                goto out;
 
+       if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
+               f2fs_put_dnode(&dn);
+               set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+               f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
+                       "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+                       "run fsck to fix.",
+                       __func__, dir->i_ino, dn.data_blkaddr);
+               err = -EINVAL;
+               goto out;
+       }
+
        f2fs_wait_on_page_writeback(page, DATA, true);
 
        dentry_blk = page_address(page);
@@ -495,6 +525,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
        return 0;
 recover:
        lock_page(ipage);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
        f2fs_i_depth_write(dir, 0);
        f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
@@ -686,7 +717,10 @@ int f2fs_inline_data_fiemap(struct inode *inode,
                ilen = start + len;
        ilen -= start;
 
-       f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+       err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+       if (err)
+               goto out;
+
        byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
        byteaddr += (char *)inline_data_addr(inode, ipage) -
                                        (char *)F2FS_INODE(ipage);
index 27e18b5..292f787 100644 (file)
@@ -68,13 +68,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
        }
 }
 
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+                                       struct f2fs_inode *ri)
 {
        block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
 
-       if (is_valid_blkaddr(addr))
-               return true;
-       return false;
+       if (!__is_valid_data_blkaddr(addr))
+               return 1;
+       if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+               return -EFAULT;
+       return 0;
 }
 
 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -121,7 +124,7 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page
        if (!f2fs_sb_has_inode_chksum(sbi->sb))
                return false;
 
-       if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+       if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
                return false;
 
        if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
@@ -159,8 +162,15 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
        struct f2fs_inode *ri;
        __u32 provided, calculated;
 
+       if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
+               return true;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+       if (!f2fs_enable_inode_chksum(sbi, page))
+#else
        if (!f2fs_enable_inode_chksum(sbi, page) ||
                        PageDirty(page) || PageWriteback(page))
+#endif
                return true;
 
        ri = &F2FS_NODE(page)->i;
@@ -185,9 +195,31 @@ void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
        ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
 }
 
-static bool sanity_check_inode(struct inode *inode)
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       unsigned long long iblocks;
+
+       iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+       if (!iblocks) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+                       "run fsck to fix.",
+                       __func__, inode->i_ino, iblocks);
+               return false;
+       }
+
+       if (ino_of_node(node_page) != nid_of_node(node_page)) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: corrupted inode footer i_ino=%lx, ino,nid: "
+                       "[%u, %u] run fsck to fix.",
+                       __func__, inode->i_ino,
+                       ino_of_node(node_page), nid_of_node(node_page));
+               return false;
+       }
 
        if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
                        && !f2fs_has_extra_attr(inode)) {
@@ -197,6 +229,64 @@ static bool sanity_check_inode(struct inode *inode)
                        __func__, inode->i_ino);
                return false;
        }
+
+       if (f2fs_has_extra_attr(inode) &&
+                       !f2fs_sb_has_extra_attr(sbi->sb)) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: inode (ino=%lx) is with extra_attr, "
+                       "but extra_attr feature is off",
+                       __func__, inode->i_ino);
+               return false;
+       }
+
+       if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+                       fi->i_extra_isize % sizeof(__le32)) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
+                       "max: %zu",
+                       __func__, inode->i_ino, fi->i_extra_isize,
+                       F2FS_TOTAL_EXTRA_ATTR_SIZE);
+               return false;
+       }
+
+       if (F2FS_I(inode)->extent_tree) {
+               struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+               if (ei->len &&
+                       (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+                       !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+                                                       DATA_GENERIC))) {
+                       set_sbi_flag(sbi, SBI_NEED_FSCK);
+                       f2fs_msg(sbi->sb, KERN_WARNING,
+                               "%s: inode (ino=%lx) extent info [%u, %u, %u] "
+                               "is incorrect, run fsck to fix",
+                               __func__, inode->i_ino,
+                               ei->blk, ei->fofs, ei->len);
+                       return false;
+               }
+       }
+
+       if (f2fs_has_inline_data(inode) &&
+                       (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: inode (ino=%lx, mode=%u) should not have "
+                       "inline_data, run fsck to fix",
+                       __func__, inode->i_ino, inode->i_mode);
+               return false;
+       }
+
+       if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: inode (ino=%lx, mode=%u) should not have "
+                       "inline_dentry, run fsck to fix",
+                       __func__, inode->i_ino, inode->i_mode);
+               return false;
+       }
+
        return true;
 }
 
@@ -207,6 +297,7 @@ static int do_read_inode(struct inode *inode)
        struct page *node_page;
        struct f2fs_inode *ri;
        projid_t i_projid;
+       int err;
 
        /* Check if ino is within scope */
        if (f2fs_check_nid_range(sbi, inode->i_ino))
@@ -268,6 +359,11 @@ static int do_read_inode(struct inode *inode)
                fi->i_inline_xattr_size = 0;
        }
 
+       if (!sanity_check_inode(inode, node_page)) {
+               f2fs_put_page(node_page, 1);
+               return -EINVAL;
+       }
+
        /* check data exist */
        if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
                __recover_inline_status(inode, node_page);
@@ -275,8 +371,15 @@ static int do_read_inode(struct inode *inode)
        /* get rdev by using inline_info */
        __get_inode_rdev(inode, ri);
 
-       if (__written_first_block(ri))
-               set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+       if (S_ISREG(inode->i_mode)) {
+               err = __written_first_block(sbi, ri);
+               if (err < 0) {
+                       f2fs_put_page(node_page, 1);
+                       return err;
+               }
+               if (!err)
+                       set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+       }
 
        if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
                fi->last_disk_size = inode->i_size;
@@ -330,10 +433,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
        ret = do_read_inode(inode);
        if (ret)
                goto bad_inode;
-       if (!sanity_check_inode(inode)) {
-               ret = -EINVAL;
-               goto bad_inode;
-       }
 make_now:
        if (ino == F2FS_NODE_INO(sbi)) {
                inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -474,6 +573,10 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
        F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
        F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
        F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+       f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+#endif
 }
 
 void f2fs_update_inode_page(struct inode *inode)
@@ -558,12 +661,11 @@ retry:
        if (F2FS_HAS_BLOCKS(inode))
                err = f2fs_truncate(inode);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
                f2fs_show_injection_info(FAULT_EVICT_INODE);
                err = -EIO;
        }
-#endif
+
        if (!err) {
                f2fs_lock_op(sbi);
                err = f2fs_remove_inode_page(inode);
@@ -626,6 +728,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct node_info ni;
+       int err;
 
        /*
         * clear nlink of inode in order to release resource of inode
@@ -648,10 +751,16 @@ void f2fs_handle_failed_inode(struct inode *inode)
         * so we can prevent losing this orphan when encoutering checkpoint
         * and following suddenly power-off.
         */
-       f2fs_get_node_info(sbi, inode->i_ino, &ni);
+       err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+       if (err) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "May loss orphan inode, run fsck to fix.");
+               goto out;
+       }
 
        if (ni.blk_addr != NULL_ADDR) {
-               int err = f2fs_acquire_orphan_inode(sbi);
+               err = f2fs_acquire_orphan_inode(sbi);
                if (err) {
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
                        f2fs_msg(sbi->sb, KERN_WARNING,
@@ -664,6 +773,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
                set_inode_flag(inode, FI_FREE_NID);
        }
 
+out:
        f2fs_unlock_op(sbi);
 
        /* iput will drop the inode object */
index bdd0a7f..5758af6 100644 (file)
@@ -246,7 +246,7 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
                return -EINVAL;
 
        if (hot) {
-               strncpy(extlist[count], name, strlen(name));
+               memcpy(extlist[count], name, strlen(name));
                sbi->raw_super->hot_ext_count = hot_count + 1;
        } else {
                char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
@@ -254,7 +254,7 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
                memcpy(buf, &extlist[cold_count],
                                F2FS_EXTENSION_LEN * hot_count);
                memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
-               strncpy(extlist[cold_count], name, strlen(name));
+               memcpy(extlist[cold_count], name, strlen(name));
                memcpy(&extlist[cold_count + 1], buf,
                                F2FS_EXTENSION_LEN * hot_count);
                sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
index b72fac4..1af0805 100644 (file)
@@ -28,6 +28,7 @@
 static struct kmem_cache *nat_entry_slab;
 static struct kmem_cache *free_nid_slab;
 static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
 
 /*
  * Check whether the given nid is within node id range.
@@ -112,25 +113,22 @@ static void clear_node_page_dirty(struct page *page)
 
 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
-       pgoff_t index = current_nat_addr(sbi, nid);
-       return f2fs_get_meta_page(sbi, index);
+       return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
 }
 
 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
        struct page *src_page;
        struct page *dst_page;
-       pgoff_t src_off;
        pgoff_t dst_off;
        void *src_addr;
        void *dst_addr;
        struct f2fs_nm_info *nm_i = NM_I(sbi);
 
-       src_off = current_nat_addr(sbi, nid);
-       dst_off = next_nat_addr(sbi, src_off);
+       dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
 
        /* get current nat block page with lock */
-       src_page = f2fs_get_meta_page(sbi, src_off);
+       src_page = get_current_nat_page(sbi, nid);
        dst_page = f2fs_grab_meta_page(sbi, dst_off);
        f2fs_bug_on(sbi, PageDirty(src_page));
 
@@ -176,14 +174,30 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
 
        if (raw_ne)
                node_info_from_raw_nat(&ne->ni, raw_ne);
+
+       spin_lock(&nm_i->nat_list_lock);
        list_add_tail(&ne->list, &nm_i->nat_entries);
+       spin_unlock(&nm_i->nat_list_lock);
+
        nm_i->nat_cnt++;
        return ne;
 }
 
 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 {
-       return radix_tree_lookup(&nm_i->nat_root, n);
+       struct nat_entry *ne;
+
+       ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+       /* for recent accessed nat entry, move it to tail of lru list */
+       if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+               spin_lock(&nm_i->nat_list_lock);
+               if (!list_empty(&ne->list))
+                       list_move_tail(&ne->list, &nm_i->nat_entries);
+               spin_unlock(&nm_i->nat_list_lock);
+       }
+
+       return ne;
 }
 
 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
@@ -194,7 +208,6 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
 
 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 {
-       list_del(&e->list);
        radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
        nm_i->nat_cnt--;
        __free_nat_entry(e);
@@ -245,16 +258,21 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
        nm_i->dirty_nat_cnt++;
        set_nat_flag(ne, IS_DIRTY, true);
 refresh_list:
+       spin_lock(&nm_i->nat_list_lock);
        if (new_ne)
                list_del_init(&ne->list);
        else
                list_move_tail(&ne->list, &head->entry_list);
+       spin_unlock(&nm_i->nat_list_lock);
 }
 
 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
                struct nat_entry_set *set, struct nat_entry *ne)
 {
+       spin_lock(&nm_i->nat_list_lock);
        list_move_tail(&ne->list, &nm_i->nat_entries);
+       spin_unlock(&nm_i->nat_list_lock);
+
        set_nat_flag(ne, IS_DIRTY, false);
        set->entry_cnt--;
        nm_i->dirty_nat_cnt--;
@@ -267,6 +285,72 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
                                                        start, nr);
 }
 
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+{
+       return NODE_MAPPING(sbi) == page->mapping &&
+                       IS_DNODE(page) && is_cold_node(page);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+       spin_lock_init(&sbi->fsync_node_lock);
+       INIT_LIST_HEAD(&sbi->fsync_node_list);
+       sbi->fsync_seg_id = 0;
+       sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+                                                       struct page *page)
+{
+       struct fsync_node_entry *fn;
+       unsigned long flags;
+       unsigned int seq_id;
+
+       fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+
+       get_page(page);
+       fn->page = page;
+       INIT_LIST_HEAD(&fn->list);
+
+       spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+       list_add_tail(&fn->list, &sbi->fsync_node_list);
+       fn->seq_id = sbi->fsync_seg_id++;
+       seq_id = fn->seq_id;
+       sbi->fsync_node_num++;
+       spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+       return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+{
+       struct fsync_node_entry *fn;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+       list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+               if (fn->page == page) {
+                       list_del(&fn->list);
+                       sbi->fsync_node_num--;
+                       spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+                       kmem_cache_free(fsync_node_entry_slab, fn);
+                       put_page(page);
+                       return;
+               }
+       }
+       spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+       f2fs_bug_on(sbi, 1);
+}
+
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+       sbi->fsync_seg_id = 0;
+       spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -371,7 +455,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
                        new_blkaddr == NULL_ADDR);
        f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
                        new_blkaddr == NEW_ADDR);
-       f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
+       f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
                        new_blkaddr == NEW_ADDR);
 
        /* increment version no as node is removed */
@@ -382,7 +466,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 
        /* change address */
        nat_set_blkaddr(e, new_blkaddr);
-       if (!is_valid_blkaddr(new_blkaddr))
+       if (!is_valid_data_blkaddr(sbi, new_blkaddr))
                set_nat_flag(e, IS_CHECKPOINTED, false);
        __set_nat_cache_dirty(nm_i, e);
 
@@ -405,13 +489,25 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
        if (!down_write_trylock(&nm_i->nat_tree_lock))
                return 0;
 
-       while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+       spin_lock(&nm_i->nat_list_lock);
+       while (nr_shrink) {
                struct nat_entry *ne;
+
+               if (list_empty(&nm_i->nat_entries))
+                       break;
+
                ne = list_first_entry(&nm_i->nat_entries,
                                        struct nat_entry, list);
+               list_del(&ne->list);
+               spin_unlock(&nm_i->nat_list_lock);
+
                __del_from_nat_cache(nm_i, ne);
                nr_shrink--;
+
+               spin_lock(&nm_i->nat_list_lock);
        }
+       spin_unlock(&nm_i->nat_list_lock);
+
        up_write(&nm_i->nat_tree_lock);
        return nr - nr_shrink;
 }
@@ -419,7 +515,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 /*
  * This function always returns success
  */
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
                                                struct node_info *ni)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -443,7 +539,7 @@ void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
                ni->blk_addr = nat_get_blkaddr(e);
                ni->version = nat_get_version(e);
                up_read(&nm_i->nat_tree_lock);
-               return;
+               return 0;
        }
 
        memset(&ne, 0, sizeof(struct f2fs_nat_entry));
@@ -466,6 +562,9 @@ void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
        up_read(&nm_i->nat_tree_lock);
 
        page = f2fs_get_meta_page(sbi, index);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        nat_blk = (struct f2fs_nat_block *)page_address(page);
        ne = nat_blk->entries[nid - start_nid];
        node_info_from_raw_nat(ni, &ne);
@@ -473,6 +572,7 @@ void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
 cache:
        /* cache nat entry */
        cache_nat_entry(sbi, nid, &ne);
+       return 0;
 }
 
 /*
@@ -722,12 +822,15 @@ release_out:
        return err;
 }
 
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        struct node_info ni;
+       int err;
 
-       f2fs_get_node_info(sbi, dn->nid, &ni);
+       err = f2fs_get_node_info(sbi, dn->nid, &ni);
+       if (err)
+               return err;
 
        /* Deallocate node address */
        f2fs_invalidate_blocks(sbi, ni.blk_addr);
@@ -750,11 +853,14 @@ static void truncate_node(struct dnode_of_data *dn)
 
        dn->node_page = NULL;
        trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+       return 0;
 }
 
 static int truncate_dnode(struct dnode_of_data *dn)
 {
        struct page *page;
+       int err;
 
        if (dn->nid == 0)
                return 1;
@@ -770,7 +876,10 @@ static int truncate_dnode(struct dnode_of_data *dn)
        dn->node_page = page;
        dn->ofs_in_node = 0;
        f2fs_truncate_data_blocks(dn);
-       truncate_node(dn);
+       err = truncate_node(dn);
+       if (err)
+               return err;
+
        return 1;
 }
 
@@ -835,7 +944,9 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
        if (!ofs) {
                /* remove current indirect node */
                dn->node_page = page;
-               truncate_node(dn);
+               ret = truncate_node(dn);
+               if (ret)
+                       goto out_err;
                freed++;
        } else {
                f2fs_put_page(page, 1);
@@ -893,7 +1004,9 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
        if (offset[idx + 1] == 0) {
                dn->node_page = pages[idx];
                dn->nid = nid[idx];
-               truncate_node(dn);
+               err = truncate_node(dn);
+               if (err)
+                       goto fail;
        } else {
                f2fs_put_page(pages[idx], 1);
        }
@@ -1014,6 +1127,7 @@ int f2fs_truncate_xattr_node(struct inode *inode)
        nid_t nid = F2FS_I(inode)->i_xattr_nid;
        struct dnode_of_data dn;
        struct page *npage;
+       int err;
 
        if (!nid)
                return 0;
@@ -1022,10 +1136,15 @@ int f2fs_truncate_xattr_node(struct inode *inode)
        if (IS_ERR(npage))
                return PTR_ERR(npage);
 
+       set_new_dnode(&dn, inode, NULL, npage, nid);
+       err = truncate_node(&dn);
+       if (err) {
+               f2fs_put_page(npage, 1);
+               return err;
+       }
+
        f2fs_i_xnid_write(inode, 0);
 
-       set_new_dnode(&dn, inode, NULL, npage, nid);
-       truncate_node(&dn);
        return 0;
 }
 
@@ -1055,11 +1174,19 @@ int f2fs_remove_inode_page(struct inode *inode)
                f2fs_truncate_data_blocks_range(&dn, 1);
 
        /* 0 is possible, after f2fs_new_inode() has failed */
+       if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+               f2fs_put_dnode(&dn);
+               return -EIO;
+       }
        f2fs_bug_on(F2FS_I_SB(inode),
                        inode->i_blocks != 0 && inode->i_blocks != 8);
 
        /* will put inode & node pages */
-       truncate_node(&dn);
+       err = truncate_node(&dn);
+       if (err) {
+               f2fs_put_dnode(&dn);
+               return err;
+       }
        return 0;
 }
 
@@ -1092,7 +1219,11 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
                goto fail;
 
 #ifdef CONFIG_F2FS_CHECK_FS
-       f2fs_get_node_info(sbi, dn->nid, &new_ni);
+       err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+       if (err) {
+               dec_valid_node_count(sbi, dn->inode, !ofs);
+               goto fail;
+       }
        f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
 #endif
        new_ni.nid = dn->nid;
@@ -1140,13 +1271,21 @@ static int read_node_page(struct page *page, int op_flags)
                .page = page,
                .encrypted_page = NULL,
        };
+       int err;
 
-       if (PageUptodate(page))
+       if (PageUptodate(page)) {
+#ifdef CONFIG_F2FS_CHECK_FS
+               f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+#endif
                return LOCKED_PAGE;
+       }
 
-       f2fs_get_node_info(sbi, page->index, &ni);
+       err = f2fs_get_node_info(sbi, page->index, &ni);
+       if (err)
+               return err;
 
-       if (unlikely(ni.blk_addr == NULL_ADDR)) {
+       if (unlikely(ni.blk_addr == NULL_ADDR) ||
+                       is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
                ClearPageUptodate(page);
                return -ENOENT;
        }
@@ -1348,7 +1487,7 @@ continue_unlock:
 
 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
                                struct writeback_control *wbc, bool do_balance,
-                               enum iostat_type io_type)
+                               enum iostat_type io_type, unsigned int *seq_id)
 {
        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
        nid_t nid;
@@ -1365,6 +1504,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
                .io_type = io_type,
                .io_wbc = wbc,
        };
+       unsigned int seq;
 
        trace_f2fs_writepage(page, NODE);
 
@@ -1374,10 +1514,17 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto redirty_out;
 
+       if (wbc->sync_mode == WB_SYNC_NONE &&
+                       IS_DNODE(page) && is_cold_node(page))
+               goto redirty_out;
+
        /* get old block addr of this node page */
        nid = nid_of_node(page);
        f2fs_bug_on(sbi, page->index != nid);
 
+       if (f2fs_get_node_info(sbi, nid, &ni))
+               goto redirty_out;
+
        if (wbc->for_reclaim) {
                if (!down_read_trylock(&sbi->node_write))
                        goto redirty_out;
@@ -1385,8 +1532,6 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
                down_read(&sbi->node_write);
        }
 
-       f2fs_get_node_info(sbi, nid, &ni);
-
        /* This page is already truncated */
        if (unlikely(ni.blk_addr == NULL_ADDR)) {
                ClearPageUptodate(page);
@@ -1396,11 +1541,22 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
                return 0;
        }
 
+       if (__is_valid_data_blkaddr(ni.blk_addr) &&
+               !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+               goto redirty_out;
+
        if (atomic && !test_opt(sbi, NOBARRIER))
                fio.op_flags |= WRITE_FLUSH_FUA;
 
        set_page_writeback(page);
        ClearPageError(page);
+
+       if (f2fs_in_warm_node_list(sbi, page)) {
+               seq = f2fs_add_fsync_node_entry(sbi, page);
+               if (seq_id)
+                       *seq_id = seq;
+       }
+
        fio.old_blkaddr = ni.blk_addr;
        f2fs_do_write_node_page(nid, &fio);
        set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -1448,7 +1604,7 @@ void f2fs_move_node_page(struct page *node_page, int gc_type)
                        goto out_page;
 
                if (__write_node_page(node_page, false, NULL,
-                                       &wbc, false, FS_GC_NODE_IO))
+                                       &wbc, false, FS_GC_NODE_IO, NULL))
                        unlock_page(node_page);
                goto release_page;
        } else {
@@ -1465,11 +1621,13 @@ release_page:
 static int f2fs_write_node_page(struct page *page,
                                struct writeback_control *wbc)
 {
-       return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+       return __write_node_page(page, false, NULL, wbc, false,
+                                               FS_NODE_IO, NULL);
 }
 
 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
-                       struct writeback_control *wbc, bool atomic)
+                       struct writeback_control *wbc, bool atomic,
+                       unsigned int *seq_id)
 {
        pgoff_t index;
        pgoff_t last_idx = ULONG_MAX;
@@ -1550,7 +1708,7 @@ continue_unlock:
                        ret = __write_node_page(page, atomic &&
                                                page == last_page,
                                                &submitted, wbc, true,
-                                               FS_NODE_IO);
+                                               FS_NODE_IO, seq_id);
                        if (ret) {
                                unlock_page(page);
                                f2fs_put_page(last_page, 0);
@@ -1633,7 +1791,9 @@ next_step:
                                                !is_cold_node(page)))
                                continue;
 lock_node:
-                       if (!trylock_page(page))
+                       if (wbc->sync_mode == WB_SYNC_ALL)
+                               lock_page(page);
+                       else if (!trylock_page(page))
                                continue;
 
                        if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
@@ -1665,7 +1825,7 @@ continue_unlock:
                        set_dentry_mark(page, 0);
 
                        ret = __write_node_page(page, false, &submitted,
-                                               wbc, do_balance, io_type);
+                                               wbc, do_balance, io_type, NULL);
                        if (ret)
                                unlock_page(page);
                        else if (submitted)
@@ -1684,10 +1844,12 @@ continue_unlock:
        }
 
        if (step < 2) {
+               if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+                       goto out;
                step++;
                goto next_step;
        }
-
+out:
        if (nwritten)
                f2fs_submit_merged_write(sbi, NODE);
 
@@ -1696,30 +1858,40 @@ continue_unlock:
        return ret;
 }
 
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+                                               unsigned int seq_id)
 {
-       pgoff_t index = 0;
-       struct pagevec pvec;
+       struct fsync_node_entry *fn;
+       struct page *page;
+       struct list_head *head = &sbi->fsync_node_list;
+       unsigned long flags;
+       unsigned int cur_seq_id = 0;
        int ret2 = 0, ret = 0;
-       int nr_pages;
 
-       pagevec_init(&pvec, 0);
+       while (seq_id && cur_seq_id < seq_id) {
+               spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+               if (list_empty(head)) {
+                       spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+                       break;
+               }
+               fn = list_first_entry(head, struct fsync_node_entry, list);
+               if (fn->seq_id > seq_id) {
+                       spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+                       break;
+               }
+               cur_seq_id = fn->seq_id;
+               page = fn->page;
+               get_page(page);
+               spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 
-       while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-                               PAGECACHE_TAG_WRITEBACK))) {
-               int i;
+               f2fs_wait_on_page_writeback(page, NODE, true);
+               if (TestClearPageError(page))
+                       ret = -EIO;
 
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
+               put_page(page);
 
-                       if (ino && ino_of_node(page) == ino) {
-                               f2fs_wait_on_page_writeback(page, NODE, true);
-                               if (TestClearPageError(page))
-                                       ret = -EIO;
-                       }
-               }
-               pagevec_release(&pvec);
-               cond_resched();
+               if (ret)
+                       break;
        }
 
        if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
@@ -1728,6 +1900,7 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
                ret2 = -EIO;
        if (!ret)
                ret = ret2;
+
        return ret;
 }
 
@@ -1777,6 +1950,10 @@ static int f2fs_set_node_page_dirty(struct page *page)
 
        if (!PageUptodate(page))
                SetPageUptodate(page);
+#ifdef CONFIG_F2FS_CHECK_FS
+       if (IS_INODE(page))
+               f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
        if (!PageDirty(page)) {
                __set_page_dirty_nobuffers(page);
                inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
@@ -1971,7 +2148,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
                kmem_cache_free(free_nid_slab, i);
 }
 
-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
                        struct page *nat_page, nid_t start_nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1989,7 +2166,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
                        break;
 
                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
-               f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+               if (blk_addr == NEW_ADDR)
+                       return -EINVAL;
+
                if (blk_addr == NULL_ADDR) {
                        add_free_nid(sbi, start_nid, true, true);
                } else {
@@ -1998,6 +2178,8 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
                        spin_unlock(&NM_I(sbi)->nid_list_lock);
                }
        }
+
+       return 0;
 }
 
 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
@@ -2053,11 +2235,11 @@ out:
        up_read(&nm_i->nat_tree_lock);
 }
 
-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
                                                bool sync, bool mount)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
-       int i = 0;
+       int i = 0, ret;
        nid_t nid = nm_i->next_scan_nid;
 
        if (unlikely(nid >= nm_i->max_nid))
@@ -2065,17 +2247,17 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
 
        /* Enough entries */
        if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
-               return;
+               return 0;
 
        if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
-               return;
+               return 0;
 
        if (!mount) {
                /* try to find free nids in free_nid_bitmap */
                scan_free_nid_bits(sbi);
 
                if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
-                       return;
+                       return 0;
        }
 
        /* readahead nat pages to be scanned */
@@ -2089,8 +2271,16 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
                                                nm_i->nat_block_bitmap)) {
                        struct page *page = get_current_nat_page(sbi, nid);
 
-                       scan_nat_page(sbi, page, nid);
+                       ret = scan_nat_page(sbi, page, nid);
                        f2fs_put_page(page, 1);
+
+                       if (ret) {
+                               up_read(&nm_i->nat_tree_lock);
+                               f2fs_bug_on(sbi, !mount);
+                               f2fs_msg(sbi->sb, KERN_ERR,
+                                       "NAT is corrupt, run fsck to fix it");
+                               return -EINVAL;
+                       }
                }
 
                nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -2111,13 +2301,19 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
 
        f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
                                        nm_i->ra_nid_pages, META_NAT, false);
+
+       return 0;
 }
 
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 {
+       int ret;
+
        mutex_lock(&NM_I(sbi)->build_lock);
-       __f2fs_build_free_nids(sbi, sync, mount);
+       ret = __f2fs_build_free_nids(sbi, sync, mount);
        mutex_unlock(&NM_I(sbi)->build_lock);
+
+       return ret;
 }
 
 /*
@@ -2130,12 +2326,11 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i = NULL;
 retry:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
                f2fs_show_injection_info(FAULT_ALLOC_NID);
                return false;
        }
-#endif
+
        spin_lock(&nm_i->nid_list_lock);
 
        if (unlikely(nm_i->available_nids == 0)) {
@@ -2280,12 +2475,16 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
        struct dnode_of_data dn;
        struct node_info ni;
        struct page *xpage;
+       int err;
 
        if (!prev_xnid)
                goto recover_xnid;
 
        /* 1: invalidate the previous xattr nid */
-       f2fs_get_node_info(sbi, prev_xnid, &ni);
+       err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+       if (err)
+               return err;
+
        f2fs_invalidate_blocks(sbi, ni.blk_addr);
        dec_valid_node_count(sbi, inode, false);
        set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2320,8 +2519,11 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
        nid_t ino = ino_of_node(page);
        struct node_info old_ni, new_ni;
        struct page *ipage;
+       int err;
 
-       f2fs_get_node_info(sbi, ino, &old_ni);
+       err = f2fs_get_node_info(sbi, ino, &old_ni);
+       if (err)
+               return err;
 
        if (unlikely(old_ni.blk_addr != NULL_ADDR))
                return -EINVAL;
@@ -2375,7 +2577,7 @@ retry:
        return 0;
 }
 
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
                        unsigned int segno, struct f2fs_summary_block *sum)
 {
        struct f2fs_node *rn;
@@ -2397,6 +2599,9 @@ void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
                for (idx = addr; idx < addr + nrpages; idx++) {
                        struct page *page = f2fs_get_tmp_page(sbi, idx);
 
+                       if (IS_ERR(page))
+                               return PTR_ERR(page);
+
                        rn = F2FS_NODE(page);
                        sum_entry->nid = rn->footer.nid;
                        sum_entry->version = 0;
@@ -2408,6 +2613,7 @@ void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
                invalidate_mapping_pages(META_MAPPING(sbi), addr,
                                                        addr + nrpages);
        }
+       return 0;
 }
 
 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2585,6 +2791,13 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        nid_t set_idx = 0;
        LIST_HEAD(sets);
 
+       /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+       if (enabled_nat_bits(sbi, cpc)) {
+               down_write(&nm_i->nat_tree_lock);
+               remove_nats_in_journal(sbi);
+               up_write(&nm_i->nat_tree_lock);
+       }
+
        if (!nm_i->dirty_nat_cnt)
                return;
 
@@ -2637,7 +2850,13 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
        nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
                                                nm_i->nat_bits_blocks;
        for (i = 0; i < nm_i->nat_bits_blocks; i++) {
-               struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+               struct page *page;
+
+               page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+               if (IS_ERR(page)) {
+                       disable_nat_bits(sbi, true);
+                       return PTR_ERR(page);
+               }
 
                memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
                                        page_address(page), F2FS_BLKSIZE);
@@ -2721,6 +2940,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
        INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
        INIT_LIST_HEAD(&nm_i->nat_entries);
+       spin_lock_init(&nm_i->nat_list_lock);
 
        mutex_init(&nm_i->build_lock);
        spin_lock_init(&nm_i->nid_list_lock);
@@ -2765,8 +2985,8 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
 
        for (i = 0; i < nm_i->nat_blocks; i++) {
                nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
-                               NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
-               if (!nm_i->free_nid_bitmap)
+                       f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+               if (!nm_i->free_nid_bitmap[i])
                        return -ENOMEM;
        }
 
@@ -2804,8 +3024,7 @@ int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
        /* load free nid status from nat_bits table */
        load_free_nid_bitmap(sbi);
 
-       f2fs_build_free_nids(sbi, true, true);
-       return 0;
+       return f2fs_build_free_nids(sbi, true, true);
 }
 
 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
@@ -2840,8 +3059,13 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
                unsigned idx;
 
                nid = nat_get_nid(natvec[found - 1]) + 1;
-               for (idx = 0; idx < found; idx++)
+               for (idx = 0; idx < found; idx++) {
+                       spin_lock(&nm_i->nat_list_lock);
+                       list_del(&natvec[idx]->list);
+                       spin_unlock(&nm_i->nat_list_lock);
+
                        __del_from_nat_cache(nm_i, natvec[idx]);
+               }
        }
        f2fs_bug_on(sbi, nm_i->nat_cnt);
 
@@ -2896,8 +3120,15 @@ int __init f2fs_create_node_manager_caches(void)
                        sizeof(struct nat_entry_set));
        if (!nat_entry_set_slab)
                goto destroy_free_nid;
+
+       fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+                       sizeof(struct fsync_node_entry));
+       if (!fsync_node_entry_slab)
+               goto destroy_nat_entry_set;
        return 0;
 
+destroy_nat_entry_set:
+       kmem_cache_destroy(nat_entry_set_slab);
 destroy_free_nid:
        kmem_cache_destroy(free_nid_slab);
 destroy_nat_entry:
@@ -2908,6 +3139,7 @@ fail:
 
 void f2fs_destroy_node_manager_caches(void)
 {
+       kmem_cache_destroy(fsync_node_entry_slab);
        kmem_cache_destroy(nat_entry_set_slab);
        kmem_cache_destroy(free_nid_slab);
        kmem_cache_destroy(nat_entry_slab);
index b95e49e..0f4db7a 100644 (file)
@@ -135,6 +135,11 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
        return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
 }
 
+static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
+{
+       return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
+}
+
 enum mem_type {
        FREE_NIDS,      /* indicates the free nid list */
        NAT_ENTRIES,    /* indicates the cached nat entry */
@@ -444,6 +449,10 @@ static inline void set_mark(struct page *page, int mark, int type)
        else
                flag &= ~(0x1 << type);
        rn->footer.flag = cpu_to_le32(flag);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+       f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
 }
 #define set_dentry_mark(page, mark)    set_mark(page, mark, DENT_BIT_SHIFT)
 #define set_fsync_mark(page, mark)     set_mark(page, mark, FSYNC_BIT_SHIFT)
index daf81d4..501bb0f 100644 (file)
@@ -241,8 +241,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
        struct page *page = NULL;
        block_t blkaddr;
        unsigned int loop_cnt = 0;
-       unsigned int free_blocks = sbi->user_block_count -
-                                       valid_user_blocks(sbi);
+       unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+                                               valid_user_blocks(sbi);
        int err = 0;
 
        /* get node pages in the current segment */
@@ -252,10 +252,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
        while (1) {
                struct fsync_inode_entry *entry;
 
-               if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+               if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
                        return 0;
 
                page = f2fs_get_tmp_page(sbi, blkaddr);
+               if (IS_ERR(page)) {
+                       err = PTR_ERR(page);
+                       break;
+               }
 
                if (!is_recoverable_dnode(page))
                        break;
@@ -471,7 +475,10 @@ retry_dn:
 
        f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
-       f2fs_get_node_info(sbi, dn.nid, &ni);
+       err = f2fs_get_node_info(sbi, dn.nid, &ni);
+       if (err)
+               goto err;
+
        f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
        f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
@@ -507,14 +514,13 @@ retry_dn:
                }
 
                /* dest is valid block, try to recover from src to dest */
-               if (f2fs_is_valid_meta_blkaddr(sbi, dest, META_POR)) {
+               if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 
                        if (src == NULL_ADDR) {
                                err = f2fs_reserve_new_block(&dn);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-                               while (err)
+                               while (err &&
+                                      IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
                                        err = f2fs_reserve_new_block(&dn);
-#endif
                                /* We should not get -ENOSPC */
                                f2fs_bug_on(sbi, err);
                                if (err)
@@ -568,12 +574,16 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
        while (1) {
                struct fsync_inode_entry *entry;
 
-               if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+               if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
                        break;
 
                f2fs_ra_meta_pages_cond(sbi, blkaddr);
 
                page = f2fs_get_tmp_page(sbi, blkaddr);
+               if (IS_ERR(page)) {
+                       err = PTR_ERR(page);
+                       break;
+               }
 
                if (!is_recoverable_dnode(page)) {
                        f2fs_put_page(page, 1);
@@ -628,7 +638,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 #endif
 
        if (s_flags & MS_RDONLY) {
-               f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+               f2fs_msg(sbi->sb, KERN_INFO,
+                               "recover fsync data on readonly fs");
                sbi->sb->s_flags &= ~MS_RDONLY;
        }
 
index 3d0c42e..fdc1772 100644 (file)
@@ -250,7 +250,13 @@ retry:
                                err = -EAGAIN;
                                goto next;
                        }
-                       f2fs_get_node_info(sbi, dn.nid, &ni);
+
+                       err = f2fs_get_node_info(sbi, dn.nid, &ni);
+                       if (err) {
+                               f2fs_put_dnode(&dn);
+                               return err;
+                       }
+
                        if (cur->old_addr == NEW_ADDR) {
                                f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
                                f2fs_update_data_blkaddr(&dn, NEW_ADDR);
@@ -439,8 +445,10 @@ int f2fs_commit_inmem_pages(struct inode *inode)
        int err;
 
        f2fs_balance_fs(sbi, true);
-       f2fs_lock_op(sbi);
 
+       down_write(&fi->i_gc_rwsem[WRITE]);
+
+       f2fs_lock_op(sbi);
        set_inode_flag(inode, FI_ATOMIC_COMMIT);
 
        mutex_lock(&fi->inmem_lock);
@@ -455,6 +463,8 @@ int f2fs_commit_inmem_pages(struct inode *inode)
        clear_inode_flag(inode, FI_ATOMIC_COMMIT);
 
        f2fs_unlock_op(sbi);
+       up_write(&fi->i_gc_rwsem[WRITE]);
+
        return err;
 }
 
@@ -464,12 +474,10 @@ int f2fs_commit_inmem_pages(struct inode *inode)
  */
 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
                f2fs_show_injection_info(FAULT_CHECKPOINT);
                f2fs_stop_checkpoint(sbi, false);
        }
-#endif
 
        /* balance_fs_bg is able to be pending */
        if (need && excess_cached_nats(sbi))
@@ -503,7 +511,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
        else
                f2fs_build_free_nids(sbi, false, false);
 
-       if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+       if (!is_idle(sbi) &&
+               (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
                return;
 
        /* checkpoint is the only way to shrink partial cached entries */
@@ -511,6 +520,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
                        !f2fs_available_free_memory(sbi, INO_ENTRIES) ||
                        excess_prefree_segs(sbi) ||
                        excess_dirty_nats(sbi) ||
+                       excess_dirty_nodes(sbi) ||
                        f2fs_time_over(sbi, CP_TIME)) {
                if (test_opt(sbi, DATA_FLUSH)) {
                        struct blk_plug plug;
@@ -831,9 +841,12 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
        dc->len = len;
        dc->ref = 0;
        dc->state = D_PREP;
+       dc->issuing = 0;
        dc->error = 0;
        init_completion(&dc->wait);
        list_add_tail(&dc->list, pend_list);
+       spin_lock_init(&dc->lock);
+       dc->bio_ref = 0;
        atomic_inc(&dcc->discard_cmd_cnt);
        dcc->undiscard_blks += len;
 
@@ -860,7 +873,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
                                                        struct discard_cmd *dc)
 {
        if (dc->state == D_DONE)
-               atomic_dec(&dcc->issing_discard);
+               atomic_sub(dc->issuing, &dcc->issing_discard);
 
        list_del(&dc->list);
        rb_erase(&dc->rb_node, &dcc->root);
@@ -875,9 +888,17 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
                                                        struct discard_cmd *dc)
 {
        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+       unsigned long flags;
 
        trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
 
+       spin_lock_irqsave(&dc->lock, flags);
+       if (dc->bio_ref) {
+               spin_unlock_irqrestore(&dc->lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&dc->lock, flags);
+
        f2fs_bug_on(sbi, dc->ref);
 
        if (dc->error == -EOPNOTSUPP)
@@ -893,10 +914,17 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
 static void f2fs_submit_discard_endio(struct bio *bio)
 {
        struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+       unsigned long flags;
 
        dc->error = bio->bi_error;
-       dc->state = D_DONE;
-       complete_all(&dc->wait);
+
+       spin_lock_irqsave(&dc->lock, flags);
+       dc->bio_ref--;
+       if (!dc->bio_ref && dc->state == D_SUBMIT) {
+               dc->state = D_DONE;
+               complete_all(&dc->wait);
+       }
+       spin_unlock_irqrestore(&dc->lock, flags);
        bio_put(bio);
 }
 
@@ -1015,6 +1043,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
        /* common policy */
        dpolicy->type = discard_type;
        dpolicy->sync = true;
+       dpolicy->ordered = false;
        dpolicy->granularity = granularity;
 
        dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
@@ -1026,6 +1055,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
                dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
                dpolicy->io_aware = true;
                dpolicy->sync = false;
+               dpolicy->ordered = true;
                if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
                        dpolicy->granularity = 1;
                        dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -1043,47 +1073,114 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
        }
 }
 
-
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+                               struct block_device *bdev, block_t lstart,
+                               block_t start, block_t len);
 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
                                                struct discard_policy *dpolicy,
-                                               struct discard_cmd *dc)
+                                               struct discard_cmd *dc,
+                                               unsigned int *issued)
 {
+       struct block_device *bdev = dc->bdev;
+       struct request_queue *q = bdev_get_queue(bdev);
+       unsigned int max_discard_blocks =
+                       SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
        struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
                                        &(dcc->fstrim_list) : &(dcc->wait_list);
-       struct bio *bio = NULL;
        int flag = dpolicy->sync ? REQ_SYNC : 0;
+       block_t lstart, start, len, total_len;
+       int err = 0;
 
        if (dc->state != D_PREP)
-               return;
+               return 0;
 
        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
-               return;
+               return 0;
 
-       trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+       trace_f2fs_issue_discard(bdev, dc->start, dc->len);
 
-       dc->error = __blkdev_issue_discard(dc->bdev,
-                               SECTOR_FROM_BLOCK(dc->start),
-                               SECTOR_FROM_BLOCK(dc->len),
-                               GFP_NOFS, 0, &bio);
-       if (!dc->error) {
-               /* should keep before submission to avoid D_DONE right away */
-               dc->state = D_SUBMIT;
-               atomic_inc(&dcc->issued_discard);
-               atomic_inc(&dcc->issing_discard);
-               if (bio) {
-                       bio->bi_private = dc;
-                       bio->bi_end_io = f2fs_submit_discard_endio;
-                       submit_bio(flag, bio);
-                       list_move_tail(&dc->list, wait_list);
-                       __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+       lstart = dc->lstart;
+       start = dc->start;
+       len = dc->len;
+       total_len = len;
+
+       dc->len = 0;
+
+       while (total_len && *issued < dpolicy->max_requests && !err) {
+               struct bio *bio = NULL;
+               unsigned long flags;
+               bool last = true;
 
-                       f2fs_update_iostat(sbi, FS_DISCARD, 1);
+               if (len > max_discard_blocks) {
+                       len = max_discard_blocks;
+                       last = false;
                }
-       } else {
-               __remove_discard_cmd(sbi, dc);
+
+               (*issued)++;
+               if (*issued == dpolicy->max_requests)
+                       last = true;
+
+               dc->len += len;
+
+               if (time_to_inject(sbi, FAULT_DISCARD)) {
+                       f2fs_show_injection_info(FAULT_DISCARD);
+                       err = -EIO;
+                       goto submit;
+               }
+               err = __blkdev_issue_discard(bdev,
+                                       SECTOR_FROM_BLOCK(start),
+                                       SECTOR_FROM_BLOCK(len),
+                                       GFP_NOFS, 0, &bio);
+submit:
+               if (err) {
+                       spin_lock_irqsave(&dc->lock, flags);
+                       if (dc->state == D_PARTIAL)
+                               dc->state = D_SUBMIT;
+                       spin_unlock_irqrestore(&dc->lock, flags);
+
+                       break;
+               }
+
+               f2fs_bug_on(sbi, !bio);
+
+               /*
+                * should keep before submission to avoid D_DONE
+                * right away
+                */
+               spin_lock_irqsave(&dc->lock, flags);
+               if (last)
+                       dc->state = D_SUBMIT;
+               else
+                       dc->state = D_PARTIAL;
+               dc->bio_ref++;
+               spin_unlock_irqrestore(&dc->lock, flags);
+
+               atomic_inc(&dcc->issing_discard);
+               dc->issuing++;
+               list_move_tail(&dc->list, wait_list);
+
+               /* sanity check on discard range */
+               __check_sit_bitmap(sbi, start, start + len);
+
+               bio->bi_private = dc;
+               bio->bi_end_io = f2fs_submit_discard_endio;
+               submit_bio(flag, bio);
+
+               atomic_inc(&dcc->issued_discard);
+
+               f2fs_update_iostat(sbi, FS_DISCARD, 1);
+
+               lstart += len;
+               start += len;
+               total_len -= len;
+               len = total_len;
        }
+
+       if (!err && len)
+               __update_discard_tree_range(sbi, bdev, lstart, start, len);
+       return err;
 }
 
 static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
@@ -1164,10 +1261,11 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
        struct discard_cmd *dc;
        struct discard_info di = {0};
        struct rb_node **insert_p = NULL, *insert_parent = NULL;
+       struct request_queue *q = bdev_get_queue(bdev);
+       unsigned int max_discard_blocks =
+                       SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
        block_t end = lstart + len;
 
-       mutex_lock(&dcc->cmd_lock);
-
        dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
                                        NULL, lstart,
                                        (struct rb_entry **)&prev_dc,
@@ -1207,7 +1305,8 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
 
                if (prev_dc && prev_dc->state == D_PREP &&
                        prev_dc->bdev == bdev &&
-                       __is_discard_back_mergeable(&di, &prev_dc->di)) {
+                       __is_discard_back_mergeable(&di, &prev_dc->di,
+                                                       max_discard_blocks)) {
                        prev_dc->di.len += di.len;
                        dcc->undiscard_blks += di.len;
                        __relocate_discard_cmd(dcc, prev_dc);
@@ -1218,7 +1317,8 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
 
                if (next_dc && next_dc->state == D_PREP &&
                        next_dc->bdev == bdev &&
-                       __is_discard_front_mergeable(&di, &next_dc->di)) {
+                       __is_discard_front_mergeable(&di, &next_dc->di,
+                                                       max_discard_blocks)) {
                        next_dc->di.lstart = di.lstart;
                        next_dc->di.len += di.len;
                        next_dc->di.start = di.start;
@@ -1241,8 +1341,6 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
                node = rb_next(&prev_dc->rb_node);
                next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
        }
-
-       mutex_unlock(&dcc->cmd_lock);
 }
 
 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1257,10 +1355,72 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
 
                blkstart -= FDEV(devi).start_blk;
        }
+       mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
        __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+       mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
        return 0;
 }
 
+static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+                                       struct discard_policy *dpolicy)
+{
+       struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+       struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+       struct rb_node **insert_p = NULL, *insert_parent = NULL;
+       struct discard_cmd *dc;
+       struct blk_plug plug;
+       unsigned int pos = dcc->next_pos;
+       unsigned int issued = 0;
+       bool io_interrupted = false;
+
+       mutex_lock(&dcc->cmd_lock);
+       dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+                                       NULL, pos,
+                                       (struct rb_entry **)&prev_dc,
+                                       (struct rb_entry **)&next_dc,
+                                       &insert_p, &insert_parent, true);
+       if (!dc)
+               dc = next_dc;
+
+       blk_start_plug(&plug);
+
+       while (dc) {
+               struct rb_node *node;
+               int err = 0;
+
+               if (dc->state != D_PREP)
+                       goto next;
+
+               if (dpolicy->io_aware && !is_idle(sbi)) {
+                       io_interrupted = true;
+                       break;
+               }
+
+               dcc->next_pos = dc->lstart + dc->len;
+               err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+               if (issued >= dpolicy->max_requests)
+                       break;
+next:
+               node = rb_next(&dc->rb_node);
+               if (err)
+                       __remove_discard_cmd(sbi, dc);
+               dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+       }
+
+       blk_finish_plug(&plug);
+
+       if (!dc)
+               dcc->next_pos = 0;
+
+       mutex_unlock(&dcc->cmd_lock);
+
+       if (!issued && io_interrupted)
+               issued = -1;
+
+       return issued;
+}
+
 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
                                        struct discard_policy *dpolicy)
 {
@@ -1268,19 +1428,24 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
        struct list_head *pend_list;
        struct discard_cmd *dc, *tmp;
        struct blk_plug plug;
-       int i, iter = 0, issued = 0;
+       int i, issued = 0;
        bool io_interrupted = false;
 
        for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
                if (i + 1 < dpolicy->granularity)
                        break;
+
+               if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+                       return __issue_discard_cmd_orderly(sbi, dpolicy);
+
                pend_list = &dcc->pend_list[i];
 
                mutex_lock(&dcc->cmd_lock);
                if (list_empty(pend_list))
                        goto next;
-               f2fs_bug_on(sbi,
-                       !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+               if (unlikely(dcc->rbtree_check))
+                       f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+                                                               &dcc->root));
                blk_start_plug(&plug);
                list_for_each_entry_safe(dc, tmp, pend_list, list) {
                        f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1288,20 +1453,19 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
                        if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
                                                                !is_idle(sbi)) {
                                io_interrupted = true;
-                               goto skip;
+                               break;
                        }
 
-                       __submit_discard_cmd(sbi, dpolicy, dc);
-                       issued++;
-skip:
-                       if (++iter >= dpolicy->max_requests)
+                       __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+                       if (issued >= dpolicy->max_requests)
                                break;
                }
                blk_finish_plug(&plug);
 next:
                mutex_unlock(&dcc->cmd_lock);
 
-               if (iter >= dpolicy->max_requests)
+               if (issued >= dpolicy->max_requests || io_interrupted)
                        break;
        }
 
@@ -1399,21 +1563,22 @@ next:
        return trimmed;
 }
 
-static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
                                                struct discard_policy *dpolicy)
 {
        struct discard_policy dp;
+       unsigned int discard_blks;
 
-       if (dpolicy) {
-               __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
-               return;
-       }
+       if (dpolicy)
+               return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
 
        /* wait all */
        __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
-       __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+       discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
        __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
-       __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+       discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+
+       return discard_blks;
 }
 
 /* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1466,6 +1631,8 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
 
        /* just to make sure there is no pending discard commands */
        __wait_all_discard_cmd(sbi, NULL);
+
+       f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
        return dropped;
 }
 
@@ -1723,21 +1890,30 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
        unsigned int start = 0, end = -1;
        unsigned int secno, start_segno;
        bool force = (cpc->reason & CP_DISCARD);
+       bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
 
        mutex_lock(&dirty_i->seglist_lock);
 
        while (1) {
                int i;
+
+               if (need_align && end != -1)
+                       end--;
                start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
                if (start >= MAIN_SEGS(sbi))
                        break;
                end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
                                                                start + 1);
 
-               for (i = start; i < end; i++)
-                       clear_bit(i, prefree_map);
+               if (need_align) {
+                       start = rounddown(start, sbi->segs_per_sec);
+                       end = roundup(end, sbi->segs_per_sec);
+               }
 
-               dirty_i->nr_dirty[PRE] -= end - start;
+               for (i = start; i < end; i++) {
+                       if (test_and_clear_bit(i, prefree_map))
+                               dirty_i->nr_dirty[PRE]--;
+               }
 
                if (!test_opt(sbi, DISCARD))
                        continue;
@@ -1831,7 +2007,9 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
        dcc->nr_discards = 0;
        dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
        dcc->undiscard_blks = 0;
+       dcc->next_pos = 0;
        dcc->root = RB_ROOT;
+       dcc->rbtree_check = false;
 
        init_waitqueue_head(&dcc->discard_wait_queue);
        SM_I(sbi)->dcc_info = dcc;
@@ -1981,6 +2159,8 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
        if (addr == NEW_ADDR)
                return;
 
+       invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+
        /* add it into sit main buffer */
        down_write(&sit_i->sentry_lock);
 
@@ -1999,7 +2179,7 @@ bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
        struct seg_entry *se;
        bool is_cp = false;
 
-       if (!is_valid_blkaddr(blkaddr))
+       if (!is_valid_data_blkaddr(sbi, blkaddr))
                return true;
 
        down_read(&sit_i->sentry_lock);
@@ -2063,7 +2243,7 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
  */
 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-       return f2fs_get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+       return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
 }
 
 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
@@ -2446,7 +2626,7 @@ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
        return has_candidate;
 }
 
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
                                        struct discard_policy *dpolicy,
                                        unsigned int start, unsigned int end)
 {
@@ -2456,12 +2636,15 @@ static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
        struct discard_cmd *dc;
        struct blk_plug plug;
        int issued;
+       unsigned int trimmed = 0;
 
 next:
        issued = 0;
 
        mutex_lock(&dcc->cmd_lock);
-       f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+       if (unlikely(dcc->rbtree_check))
+               f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+                                                               &dcc->root));
 
        dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
                                        NULL, start,
@@ -2475,6 +2658,7 @@ next:
 
        while (dc && dc->lstart <= end) {
                struct rb_node *node;
+               int err = 0;
 
                if (dc->len < dpolicy->granularity)
                        goto skip;
@@ -2484,19 +2668,24 @@ next:
                        goto skip;
                }
 
-               __submit_discard_cmd(sbi, dpolicy, dc);
+               err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
 
-               if (++issued >= dpolicy->max_requests) {
+               if (issued >= dpolicy->max_requests) {
                        start = dc->lstart + dc->len;
 
+                       if (err)
+                               __remove_discard_cmd(sbi, dc);
+
                        blk_finish_plug(&plug);
                        mutex_unlock(&dcc->cmd_lock);
-                       __wait_all_discard_cmd(sbi, NULL);
+                       trimmed += __wait_all_discard_cmd(sbi, NULL);
                        congestion_wait(BLK_RW_ASYNC, HZ/50);
                        goto next;
                }
 skip:
                node = rb_next(&dc->rb_node);
+               if (err)
+                       __remove_discard_cmd(sbi, dc);
                dc = rb_entry_safe(node, struct discard_cmd, rb_node);
 
                if (fatal_signal_pending(current))
@@ -2505,6 +2694,8 @@ skip:
 
        blk_finish_plug(&plug);
        mutex_unlock(&dcc->cmd_lock);
+
+       return trimmed;
 }
 
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
@@ -2517,12 +2708,13 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        struct discard_policy dpolicy;
        unsigned long long trimmed = 0;
        int err = 0;
+       bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
 
        if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
                return -EINVAL;
 
-       if (end <= MAIN_BLKADDR(sbi))
-               return -EINVAL;
+       if (end < MAIN_BLKADDR(sbi))
+               goto out;
 
        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
                f2fs_msg(sbi->sb, KERN_WARNING,
@@ -2534,6 +2726,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
        end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
                                                GET_SEGNO(sbi, end);
+       if (need_align) {
+               start_segno = rounddown(start_segno, sbi->segs_per_sec);
+               end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+       }
 
        cpc.reason = CP_DISCARD;
        cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
@@ -2549,24 +2745,27 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        if (err)
                goto out;
 
-       start_block = START_BLOCK(sbi, start_segno);
-       end_block = START_BLOCK(sbi, end_segno + 1);
-
-       __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
-       __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
-
        /*
         * We filed discard candidates, but actually we don't need to wait for
         * all of them, since they'll be issued in idle time along with runtime
         * discard option. User configuration looks like using runtime discard
         * or periodic fstrim instead of it.
         */
-       if (!test_opt(sbi, DISCARD)) {
-               trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+       if (test_opt(sbi, DISCARD))
+               goto out;
+
+       start_block = START_BLOCK(sbi, start_segno);
+       end_block = START_BLOCK(sbi, end_segno + 1);
+
+       __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+       trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
+                                       start_block, end_block);
+
+       trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
                                        start_block, end_block);
-               range->len = F2FS_BLK_TO_BYTES(trimmed);
-       }
 out:
+       if (!err)
+               range->len = F2FS_BLK_TO_BYTES(trimmed);
        return err;
 }
 
@@ -2719,8 +2918,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
                        return CURSEG_COLD_DATA;
                if (file_is_hot(inode) ||
                                is_inode_flag_set(inode, FI_HOT_DATA) ||
-                               is_inode_flag_set(inode, FI_ATOMIC_FILE) ||
-                               is_inode_flag_set(inode, FI_VOLATILE_FILE))
+                               f2fs_is_atomic_file(inode) ||
+                               f2fs_is_volatile_file(inode))
                        return CURSEG_HOT_DATA;
                /* f2fs_rw_hint_to_seg_type(inode->i_write_hint); */
                return CURSEG_WARM_DATA;
@@ -2862,6 +3061,9 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
 reallocate:
        f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
                        &fio->new_blkaddr, sum, type, fio, true);
+       if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+               invalidate_mapping_pages(META_MAPPING(fio->sbi),
+                                       fio->old_blkaddr, fio->old_blkaddr);
 
        /* writeout dirty page into bdev */
        f2fs_submit_page_write(fio);
@@ -2917,11 +3119,9 @@ void f2fs_outplace_write_data(struct dnode_of_data *dn,
 {
        struct f2fs_sb_info *sbi = fio->sbi;
        struct f2fs_summary sum;
-       struct node_info ni;
 
        f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
-       f2fs_get_node_info(sbi, dn->nid, &ni);
-       set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+       set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
        do_write_page(&sum, fio);
        f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
 
@@ -3018,8 +3218,11 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 
        if (!recover_curseg || recover_newaddr)
                update_sit_entry(sbi, new_blkaddr, 1);
-       if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+       if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+               invalidate_mapping_pages(META_MAPPING(sbi),
+                                       old_blkaddr, old_blkaddr);
                update_sit_entry(sbi, old_blkaddr, -1);
+       }
 
        locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
        locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
@@ -3073,7 +3276,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
 {
        struct page *cpage;
 
-       if (!is_valid_blkaddr(blkaddr))
+       if (!is_valid_data_blkaddr(sbi, blkaddr))
                return;
 
        cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
@@ -3083,7 +3286,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
        }
 }
 
-static void read_compacted_summaries(struct f2fs_sb_info *sbi)
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
 {
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
        struct curseg_info *seg_i;
@@ -3095,6 +3298,8 @@ static void read_compacted_summaries(struct f2fs_sb_info *sbi)
        start = start_sum_block(sbi);
 
        page = f2fs_get_meta_page(sbi, start++);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
        kaddr = (unsigned char *)page_address(page);
 
        /* Step 1: restore nat cache */
@@ -3135,11 +3340,14 @@ static void read_compacted_summaries(struct f2fs_sb_info *sbi)
                        page = NULL;
 
                        page = f2fs_get_meta_page(sbi, start++);
+                       if (IS_ERR(page))
+                               return PTR_ERR(page);
                        kaddr = (unsigned char *)page_address(page);
                        offset = 0;
                }
        }
        f2fs_put_page(page, 1);
+       return 0;
 }
 
 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -3151,6 +3359,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
        unsigned short blk_off;
        unsigned int segno = 0;
        block_t blk_addr = 0;
+       int err = 0;
 
        /* get segment number and block addr */
        if (IS_DATASEG(type)) {
@@ -3174,6 +3383,8 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
        }
 
        new = f2fs_get_meta_page(sbi, blk_addr);
+       if (IS_ERR(new))
+               return PTR_ERR(new);
        sum = (struct f2fs_summary_block *)page_address(new);
 
        if (IS_NODESEG(type)) {
@@ -3185,7 +3396,9 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
                                ns->ofs_in_node = 0;
                        }
                } else {
-                       f2fs_restore_node_summary(sbi, segno, sum);
+                       err = f2fs_restore_node_summary(sbi, segno, sum);
+                       if (err)
+                               goto out;
                }
        }
 
@@ -3205,8 +3418,9 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
        curseg->alloc_type = ckpt->alloc_type[type];
        curseg->next_blkoff = blk_off;
        mutex_unlock(&curseg->curseg_mutex);
+out:
        f2fs_put_page(new, 1);
-       return 0;
+       return err;
 }
 
 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
@@ -3224,7 +3438,9 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
                                                        META_CP, true);
 
                /* restore for compacted data summary */
-               read_compacted_summaries(sbi);
+               err = read_compacted_summaries(sbi);
+               if (err)
+                       return err;
                type = CURSEG_HOT_NODE;
        }
 
@@ -3355,7 +3571,7 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
                                        unsigned int segno)
 {
-       return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+       return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
 }
 
 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
@@ -4004,6 +4220,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
                sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
        sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
        sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+       sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
        sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
        sm_info->min_ssr_sections = reserved_sections(sbi);
 
index 38c549d..b3d9e31 100644 (file)
@@ -85,7 +85,7 @@
        (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 
 #define GET_SEGNO(sbi, blk_addr)                                       \
-       ((!is_valid_blkaddr(blk_addr)) ?                        \
+       ((!is_valid_data_blkaddr(sbi, blk_addr)) ?                      \
        NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),                 \
                GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
 #define BLKS_PER_SEC(sbi)                                      \
@@ -215,7 +215,7 @@ struct segment_allocation {
 #define IS_DUMMY_WRITTEN_PAGE(page)                    \
                (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
 
-#define MAX_SKIP_ATOMIC_COUNT                  16
+#define MAX_SKIP_GC_COUNT                      16
 
 struct inmem_pages {
        struct list_head list;
@@ -648,13 +648,10 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
 {
        struct f2fs_sb_info *sbi = fio->sbi;
 
-       if (PAGE_TYPE_OF_BIO(fio->type) == META &&
-                               (!is_read_io(fio->op) || fio->is_meta))
-               BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
-                               blk_addr >= MAIN_BLKADDR(sbi));
+       if (__is_meta_io(fio))
+               verify_blkaddr(sbi, blk_addr, META_GENERIC);
        else
-               BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
-                               blk_addr >= MAX_BLKADDR(sbi));
+               verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
 }
 
 /*
index b43aa47..5e329c0 100644 (file)
@@ -41,7 +41,7 @@ static struct kmem_cache *f2fs_inode_cachep;
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 
-char *fault_name[FAULT_MAX] = {
+char *f2fs_fault_name[FAULT_MAX] = {
        [FAULT_KMALLOC]         = "kmalloc",
        [FAULT_KVMALLOC]        = "kvmalloc",
        [FAULT_PAGE_ALLOC]      = "page alloc",
@@ -55,20 +55,24 @@ char *fault_name[FAULT_MAX] = {
        [FAULT_TRUNCATE]        = "truncate fail",
        [FAULT_IO]              = "IO error",
        [FAULT_CHECKPOINT]      = "checkpoint error",
+       [FAULT_DISCARD]         = "discard error",
 };
 
-static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
-                                               unsigned int rate)
+void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+                                                       unsigned int type)
 {
        struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
 
        if (rate) {
                atomic_set(&ffi->inject_ops, 0);
                ffi->inject_rate = rate;
-               ffi->inject_type = (1 << FAULT_MAX) - 1;
-       } else {
-               memset(ffi, 0, sizeof(struct f2fs_fault_info));
        }
+
+       if (type)
+               ffi->inject_type = type;
+
+       if (!rate && !type)
+               memset(ffi, 0, sizeof(struct f2fs_fault_info));
 }
 #endif
 
@@ -113,6 +117,7 @@ enum {
        Opt_mode,
        Opt_io_size_bits,
        Opt_fault_injection,
+       Opt_fault_type,
        Opt_lazytime,
        Opt_nolazytime,
        Opt_quota,
@@ -170,6 +175,7 @@ static match_table_t f2fs_tokens = {
        {Opt_mode, "mode=%s"},
        {Opt_io_size_bits, "io_bits=%u"},
        {Opt_fault_injection, "fault_injection=%u"},
+       {Opt_fault_type, "fault_type=%u"},
        {Opt_lazytime, "lazytime"},
        {Opt_nolazytime, "nolazytime"},
        {Opt_quota, "quota"},
@@ -347,12 +353,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
                        "QUOTA feature is enabled, so ignore jquota_fmt");
                F2FS_OPTION(sbi).s_jquota_fmt = 0;
        }
-       if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
-               f2fs_msg(sbi->sb, KERN_INFO,
-                        "Filesystem with quota feature cannot be mounted RDWR "
-                        "without CONFIG_QUOTA");
-               return -1;
-       }
        return 0;
 }
 #endif
@@ -606,7 +606,18 @@ static int parse_options(struct super_block *sb, char *options)
                        if (args->from && match_int(args, &arg))
                                return -EINVAL;
 #ifdef CONFIG_F2FS_FAULT_INJECTION
-                       f2fs_build_fault_attr(sbi, arg);
+                       f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
+                       set_opt(sbi, FAULT_INJECTION);
+#else
+                       f2fs_msg(sb, KERN_INFO,
+                               "FAULT_INJECTION was not selected");
+#endif
+                       break;
+               case Opt_fault_type:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+                       f2fs_build_fault_attr(sbi, 0, arg);
                        set_opt(sbi, FAULT_INJECTION);
 #else
                        f2fs_msg(sb, KERN_INFO,
@@ -775,6 +786,19 @@ static int parse_options(struct super_block *sb, char *options)
 #ifdef CONFIG_QUOTA
        if (f2fs_check_quota_options(sbi))
                return -EINVAL;
+#else
+       if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+               f2fs_msg(sbi->sb, KERN_INFO,
+                        "Filesystem with quota feature cannot be mounted RDWR "
+                        "without CONFIG_QUOTA");
+               return -EINVAL;
+       }
+       if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Filesystem with project quota feature cannot be "
+                       "mounted RDWR without CONFIG_QUOTA");
+               return -EINVAL;
+       }
 #endif
 
        if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
@@ -1030,6 +1054,10 @@ static void f2fs_put_super(struct super_block *sb)
        /* our cp_error case, we can wait for any writeback page */
        f2fs_flush_merged_writes(sbi);
 
+       f2fs_wait_on_all_pages_writeback(sbi);
+
+       f2fs_bug_on(sbi, sbi->fsync_node_num);
+
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
 
@@ -1311,9 +1339,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
        if (F2FS_IO_SIZE_BITS(sbi))
                seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
 #ifdef CONFIG_F2FS_FAULT_INJECTION
-       if (test_opt(sbi, FAULT_INJECTION))
+       if (test_opt(sbi, FAULT_INJECTION)) {
                seq_printf(seq, ",fault_injection=%u",
                                F2FS_OPTION(sbi).fault_info.inject_rate);
+               seq_printf(seq, ",fault_type=%u",
+                               F2FS_OPTION(sbi).fault_info.inject_type);
+       }
 #endif
 #ifdef CONFIG_QUOTA
        if (test_opt(sbi, QUOTA))
@@ -1344,6 +1375,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",fsync_mode=%s", "posix");
        else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
                seq_printf(seq, ",fsync_mode=%s", "strict");
+       else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
+               seq_printf(seq, ",fsync_mode=%s", "nobarrier");
        return 0;
 }
 
@@ -1356,7 +1389,8 @@ static void default_options(struct f2fs_sb_info *sbi)
        F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
        F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
        F2FS_OPTION(sbi).test_dummy_encryption = false;
-       sbi->readdir_ra = 1;
+       F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+       F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
 
        set_opt(sbi, BG_GC);
        set_opt(sbi, INLINE_XATTR);
@@ -1366,12 +1400,12 @@ static void default_options(struct f2fs_sb_info *sbi)
        set_opt(sbi, NOHEAP);
        sbi->sb->s_flags |= MS_LAZYTIME;
        set_opt(sbi, FLUSH_MERGE);
-       if (f2fs_sb_has_blkzoned(sbi->sb)) {
-               set_opt_mode(sbi, F2FS_MOUNT_LFS);
+       if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
                set_opt(sbi, DISCARD);
-       } else {
+       if (f2fs_sb_has_blkzoned(sbi->sb))
+               set_opt_mode(sbi, F2FS_MOUNT_LFS);
+       else
                set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
-       }
 
 #ifdef CONFIG_F2FS_FS_XATTR
        set_opt(sbi, XATTR_USER);
@@ -1380,9 +1414,7 @@ static void default_options(struct f2fs_sb_info *sbi)
        set_opt(sbi, POSIX_ACL);
 #endif
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-       f2fs_build_fault_attr(sbi, 0);
-#endif
+       f2fs_build_fault_attr(sbi, 0, 0);
 }
 
 #ifdef CONFIG_QUOTA
@@ -2233,9 +2265,9 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                return 1;
        }
 
-       if (secs_per_zone > total_sections) {
+       if (secs_per_zone > total_sections || !secs_per_zone) {
                f2fs_msg(sb, KERN_INFO,
-                       "Wrong secs_per_zone (%u > %u)",
+                       "Wrong secs_per_zone / total_sections (%u, %u)",
                        secs_per_zone, total_sections);
                return 1;
        }
@@ -2289,6 +2321,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
        unsigned int sit_segs, nat_segs;
        unsigned int sit_bitmap_size, nat_bitmap_size;
        unsigned int log_blocks_per_seg;
+       unsigned int segment_count_main;
+       unsigned int cp_pack_start_sum, cp_payload;
+       block_t user_block_count;
        int i;
 
        total = le32_to_cpu(raw_super->segment_count);
@@ -2313,6 +2348,16 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
                return 1;
        }
 
+       user_block_count = le64_to_cpu(ckpt->user_block_count);
+       segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+       log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+       if (!user_block_count || user_block_count >=
+                       segment_count_main << log_blocks_per_seg) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "Wrong user_block_count: %u", user_block_count);
+               return 1;
+       }
+
        main_segs = le32_to_cpu(raw_super->segment_count_main);
        blocks_per_seg = sbi->blocks_per_seg;
 
@@ -2329,7 +2374,6 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 
        sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
        nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
-       log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
 
        if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
                nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
@@ -2339,6 +2383,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
                return 1;
        }
 
+       cp_pack_start_sum = __start_sum_addr(sbi);
+       cp_payload = __cp_payload(sbi);
+       if (cp_pack_start_sum < cp_payload + 1 ||
+               cp_pack_start_sum > blocks_per_seg - 1 -
+                       NR_CURSEG_TYPE) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "Wrong cp_pack_start_sum: %u",
+                       cp_pack_start_sum);
+               return 1;
+       }
+
        if (unlikely(f2fs_cp_error(sbi))) {
                f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
                return 1;
@@ -2676,6 +2731,8 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
                sm_i->dcc_info->discard_granularity = 1;
                sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
        }
+
+       sbi->readdir_ra = 1;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -2725,9 +2782,6 @@ try_onemore:
        sb->s_fs_info = sbi;
        sbi->raw_super = raw_super;
 
-       F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
-       F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
-
        /* precompute checksum seed for metadata */
        if (f2fs_sb_has_inode_chksum(sb))
                sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2796,6 +2850,7 @@ try_onemore:
        /* init f2fs-specific super block info */
        sbi->valid_super_block = valid_super_block;
        mutex_init(&sbi->gc_mutex);
+       mutex_init(&sbi->writepages);
        mutex_init(&sbi->cp_mutex);
        init_rwsem(&sbi->node_write);
        init_rwsem(&sbi->node_change);
@@ -2890,6 +2945,8 @@ try_onemore:
 
        f2fs_init_ino_entry_info(sbi);
 
+       f2fs_init_fsync_node_info(sbi);
+
        /* setup f2fs internal modules */
        err = f2fs_build_segment_manager(sbi);
        if (err) {
@@ -2936,10 +2993,11 @@ try_onemore:
                err = PTR_ERR(root);
                goto free_stats;
        }
-       if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+       if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+                       !root->i_size || !root->i_nlink) {
                iput(root);
                err = -EINVAL;
-               goto free_node_inode;
+               goto free_stats;
        }
 
        sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -2953,10 +3011,7 @@ try_onemore:
                goto free_root_inode;
 
 #ifdef CONFIG_QUOTA
-       /*
-        * Turn on quotas which were not enabled for read-only mounts if
-        * filesystem has quota feature, so that they are updated correctly.
-        */
+       /* Enable quota usage during mount */
        if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
                err = f2fs_enable_quotas(sb);
                if (err) {
@@ -3114,9 +3169,19 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
 static void kill_f2fs_super(struct super_block *sb)
 {
        if (sb->s_root) {
-               set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
-               f2fs_stop_gc_thread(F2FS_SB(sb));
-               f2fs_stop_discard_thread(F2FS_SB(sb));
+               struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+               set_sbi_flag(sbi, SBI_IS_CLOSE);
+               f2fs_stop_gc_thread(sbi);
+               f2fs_stop_discard_thread(sbi);
+
+               if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+                               !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+                       struct cp_control cpc = {
+                               .reason = CP_UMOUNT,
+                       };
+                       f2fs_write_checkpoint(sbi, &cpc);
+               }
        }
        kill_block_super(sb);
 }
index 60c827e..30fd016 100644 (file)
@@ -9,6 +9,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/compiler.h>
 #include <linux/proc_fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/seq_file.h>
@@ -252,6 +253,7 @@ out:
                if (t >= 1) {
                        sbi->gc_mode = GC_URGENT;
                        if (sbi->gc_thread) {
+                               sbi->gc_thread->gc_wake = 1;
                                wake_up_interruptible_all(
                                        &sbi->gc_thread->gc_wait_queue_head);
                                wake_up_discard_thread(sbi, true);
@@ -286,8 +288,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
        bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
                                        a->struct_type == GC_THREAD);
 
-       if (gc_entry)
-               down_read(&sbi->sb->s_umount);
+       if (gc_entry) {
+               if (!down_read_trylock(&sbi->sb->s_umount))
+                       return -EAGAIN;
+       }
        ret = __sbi_store(a, sbi, buf, count);
        if (gc_entry)
                up_read(&sbi->sb->s_umount);
@@ -393,6 +397,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_seq_blocks, min_seq_blocks);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
@@ -445,6 +450,7 @@ static struct attribute *f2fs_attrs[] = {
        ATTR_LIST(ipu_policy),
        ATTR_LIST(min_ipu_util),
        ATTR_LIST(min_fsync_blocks),
+       ATTR_LIST(min_seq_blocks),
        ATTR_LIST(min_hot_blocks),
        ATTR_LIST(min_ssr_sections),
        ATTR_LIST(max_victim_search),
@@ -516,7 +522,8 @@ static struct kobject f2fs_feat = {
        .kset   = &f2fs_kset,
 };
 
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+                                               void *offset)
 {
        struct super_block *sb = seq->private;
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -543,7 +550,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
        return 0;
 }
 
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+                                               void *offset)
 {
        struct super_block *sb = seq->private;
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -567,7 +575,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
        return 0;
 }
 
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+                                              void *offset)
 {
        struct super_block *sb = seq->private;
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -609,6 +618,28 @@ static int iostat_info_seq_show(struct seq_file *seq, void *offset)
        return 0;
 }
 
+static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
+                                               void *offset)
+{
+       struct super_block *sb = seq->private;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+       int i;
+
+       seq_puts(seq, "format: victim_secmap bitmaps\n");
+
+       for (i = 0; i < MAIN_SECS(sbi); i++) {
+               if ((i % 10) == 0)
+                       seq_printf(seq, "%-10d", i);
+               seq_printf(seq, "%d", test_bit(i, dirty_i->victim_secmap) ? 1 : 0);
+               if ((i % 10) == 9 || i == (MAIN_SECS(sbi) - 1))
+                       seq_putc(seq, '\n');
+               else
+                       seq_putc(seq, ' ');
+       }
+       return 0;
+}
+
 #define F2FS_PROC_FILE_DEF(_name)                                      \
 static int _name##_open_fs(struct inode *inode, struct file *file)     \
 {                                                                      \
@@ -625,6 +656,7 @@ static const struct file_operations f2fs_seq_##_name##_fops = {             \
 F2FS_PROC_FILE_DEF(segment_info);
 F2FS_PROC_FILE_DEF(segment_bits);
 F2FS_PROC_FILE_DEF(iostat_info);
+F2FS_PROC_FILE_DEF(victim_bits);
 
 int __init f2fs_init_sysfs(void)
 {
@@ -675,6 +707,8 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
                                 &f2fs_seq_segment_bits_fops, sb);
                proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
                                &f2fs_seq_iostat_info_fops, sb);
+               proc_create_data("victim_bits", S_IRUGO, sbi->s_proc,
+                               &f2fs_seq_victim_bits_fops, sb);
        }
        return 0;
 }
@@ -685,6 +719,7 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
                remove_proc_entry("iostat_info", sbi->s_proc);
                remove_proc_entry("segment_info", sbi->s_proc);
                remove_proc_entry("segment_bits", sbi->s_proc);
+               remove_proc_entry("victim_bits", sbi->s_proc);
                remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
        }
        kobject_del(&sbi->s_kobj);
index 61a5d92..152078b 100644 (file)
@@ -38,9 +38,6 @@ static size_t f2fs_xattr_generic_list(const struct xattr_handler *handler,
                        return -EOPNOTSUPP;
                break;
        case F2FS_XATTR_INDEX_TRUSTED:
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-               break;
        case F2FS_XATTR_INDEX_SECURITY:
                break;
        default:
@@ -69,9 +66,6 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
                        return -EOPNOTSUPP;
                break;
        case F2FS_XATTR_INDEX_TRUSTED:
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-               break;
        case F2FS_XATTR_INDEX_SECURITY:
                break;
        default:
@@ -142,6 +136,8 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
                size_t size, int flags)
 {
        struct inode *inode = d_inode(dentry);
+       unsigned char old_advise = F2FS_I(inode)->i_advise;
+       unsigned char new_advise;
 
        if (strcmp(name, "") != 0)
                return -EINVAL;
@@ -150,7 +146,14 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
        if (value == NULL)
                return -EINVAL;
 
-       F2FS_I(inode)->i_advise |= *(char *)value;
+       new_advise = *(char *)value;
+       if (new_advise & ~FADVISE_MODIFIABLE_BITS)
+               return -EINVAL;
+
+       new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
+       new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
+
+       F2FS_I(inode)->i_advise = new_advise;
        f2fs_mark_inode_dirty_sync(inode, true);
        return 0;
 }
index 4c2c036..8e14277 100644 (file)
@@ -1004,12 +1004,14 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
                        rc = xhandle->list(xhandle, dentry, buffer + len,
                                           size - len, xd->xname,
                                           xd->name_len);
+                       if (rc > size - len) {
+                               rc = -ERANGE;
+                               goto out;
+                       }
                } else {
                        rc = xhandle->list(xhandle, dentry, NULL, 0,
                                           xd->xname, xd->name_len);
                }
-               if (rc < 0)
-                       goto out;
                len += rc;
        }
        rc = len;
index bfbee8d..c67064d 100644 (file)
@@ -1632,6 +1632,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
        if (status) {
                op = &args->ops[0];
                op->status = status;
+               resp->opcnt = 1;
                goto encode_op;
        }
 
index fe50ded..272269f 100644 (file)
@@ -336,6 +336,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                                 * for this bh as it's not marked locally
                                 * uptodate. */
                                status = -EIO;
+                               clear_buffer_needs_validate(bh);
                                put_bh(bh);
                                bhs[i] = NULL;
                                continue;
index 4e2162b..0cefb03 100644 (file)
@@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
 
        res->last_used = 0;
 
-       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->track_lock);
        list_add_tail(&res->tracking, &dlm->tracking_list);
-       spin_unlock(&dlm->spinlock);
+       spin_unlock(&dlm->track_lock);
 
        memset(res->lvb, 0, DLM_LVB_LEN);
        memset(res->refmap, 0, sizeof(res->refmap));
index 4c09890..ee9b7f0 100644 (file)
@@ -472,6 +472,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
        int err;
        int i;
 
+       /*
+        * The ability to racily run the kernel stack unwinder on a running task
+        * and then observe the unwinder output is scary; while it is useful for
+        * debugging kernel issues, it can also allow an attacker to leak kernel
+        * stack contents.
+        * Doing this in a manner that is at least safe from races would require
+        * some work to ensure that the remote task can not be scheduled; and
+        * even then, this would still expose the unwinder as local attack
+        * surface.
+        * Therefore, this interface is restricted to root.
+        */
+       if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+               return -EACCES;
+
        entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
        if (!entries)
                return -ENOMEM;
index 1461254..271c4c4 100644 (file)
@@ -118,7 +118,11 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
                goto out;
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+       saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+       if (!saved_cred) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        if (lower_file->f_op->unlocked_ioctl)
                err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
@@ -127,7 +131,7 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
        if (!err)
                sdcardfs_copy_and_fix_attrs(file_inode(file),
                                      file_inode(lower_file));
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out:
        return err;
 }
@@ -149,12 +153,16 @@ static long sdcardfs_compat_ioctl(struct file *file, unsigned int cmd,
                goto out;
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+       saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+       if (!saved_cred) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        if (lower_file->f_op->compat_ioctl)
                err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
 
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out:
        return err;
 }
@@ -241,7 +249,11 @@ static int sdcardfs_open(struct inode *inode, struct file *file)
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
+       saved_cred = override_fsids(sbi, SDCARDFS_I(inode)->data);
+       if (!saved_cred) {
+               err = -ENOMEM;
+               goto out_err;
+       }
 
        file->private_data =
                kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -271,7 +283,7 @@ static int sdcardfs_open(struct inode *inode, struct file *file)
                sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
 
 out_revert_cred:
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_err:
        dput(parent);
        return err;
index b41eb7f..ab0952f 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/fs_struct.h>
 #include <linux/ratelimit.h>
 
-/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
 const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
                struct sdcardfs_inode_data *data)
 {
@@ -50,7 +49,6 @@ const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
        return old_cred;
 }
 
-/* Do not directly use this function, use REVERT_CRED() instead. */
 void revert_fsids(const struct cred *old_cred)
 {
        const struct cred *cur_cred;
@@ -78,7 +76,10 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+       saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+                                       SDCARDFS_I(dir)->data);
+       if (!saved_cred)
+               return -ENOMEM;
 
        sdcardfs_get_lower_path(dentry, &lower_path);
        lower_dentry = lower_path.dentry;
@@ -95,8 +96,11 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
                err = -ENOMEM;
                goto out_unlock;
        }
+       copied_fs->umask = 0;
+       task_lock(current);
        current->fs = copied_fs;
-       current->fs->umask = 0;
+       task_unlock(current);
+
        err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
        if (err)
                goto out;
@@ -110,58 +114,18 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
        fixup_lower_ownership(dentry, dentry->d_name.name);
 
 out:
+       task_lock(current);
        current->fs = saved_fs;
+       task_unlock(current);
        free_fs_struct(copied_fs);
 out_unlock:
        unlock_dir(lower_parent_dentry);
        sdcardfs_put_lower_path(dentry, &lower_path);
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_eacces:
        return err;
 }
 
-#if 0
-static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
-                      struct dentry *new_dentry)
-{
-       struct dentry *lower_old_dentry;
-       struct dentry *lower_new_dentry;
-       struct dentry *lower_dir_dentry;
-       u64 file_size_save;
-       int err;
-       struct path lower_old_path, lower_new_path;
-
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-       file_size_save = i_size_read(d_inode(old_dentry));
-       sdcardfs_get_lower_path(old_dentry, &lower_old_path);
-       sdcardfs_get_lower_path(new_dentry, &lower_new_path);
-       lower_old_dentry = lower_old_path.dentry;
-       lower_new_dentry = lower_new_path.dentry;
-       lower_dir_dentry = lock_parent(lower_new_dentry);
-
-       err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
-                      lower_new_dentry, NULL);
-       if (err || !d_inode(lower_new_dentry))
-               goto out;
-
-       err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
-       if (err)
-               goto out;
-       fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
-       fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
-       set_nlink(d_inode(old_dentry),
-                 sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
-       i_size_write(d_inode(new_dentry), file_size_save);
-out:
-       unlock_dir(lower_dir_dentry);
-       sdcardfs_put_lower_path(old_dentry, &lower_old_path);
-       sdcardfs_put_lower_path(new_dentry, &lower_new_path);
-       REVERT_CRED();
-       return err;
-}
-#endif
-
 static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
 {
        int err;
@@ -178,7 +142,10 @@ static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+       saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+                                               SDCARDFS_I(dir)->data);
+       if (!saved_cred)
+               return -ENOMEM;
 
        sdcardfs_get_lower_path(dentry, &lower_path);
        lower_dentry = lower_path.dentry;
@@ -209,43 +176,11 @@ out:
        unlock_dir(lower_dir_dentry);
        dput(lower_dentry);
        sdcardfs_put_lower_path(dentry, &lower_path);
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_eacces:
        return err;
 }
 
-#if 0
-static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
-                         const char *symname)
-{
-       int err;
-       struct dentry *lower_dentry;
-       struct dentry *lower_parent_dentry = NULL;
-       struct path lower_path;
-
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-       sdcardfs_get_lower_path(dentry, &lower_path);
-       lower_dentry = lower_path.dentry;
-       lower_parent_dentry = lock_parent(lower_dentry);
-
-       err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
-       if (err)
-               goto out;
-       err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
-       if (err)
-               goto out;
-       fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
-       fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
-       unlock_dir(lower_parent_dentry);
-       sdcardfs_put_lower_path(dentry, &lower_path);
-       REVERT_CRED();
-       return err;
-}
-#endif
-
 static int touch(char *abs_path, mode_t mode)
 {
        struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
@@ -287,7 +222,10 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+       saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+                                               SDCARDFS_I(dir)->data);
+       if (!saved_cred)
+               return -ENOMEM;
 
        /* check disk space */
        parent_dentry = dget_parent(dentry);
@@ -316,8 +254,11 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
                unlock_dir(lower_parent_dentry);
                goto out_unlock;
        }
+       copied_fs->umask = 0;
+       task_lock(current);
        current->fs = copied_fs;
-       current->fs->umask = 0;
+       task_unlock(current);
+
        err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode);
 
        if (err) {
@@ -366,23 +307,34 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        if (make_nomedia_in_obb ||
                ((pd->perm == PERM_ANDROID)
                                && (qstr_case_eq(&dentry->d_name, &q_data)))) {
-               REVERT_CRED(saved_cred);
-               OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
+               revert_fsids(saved_cred);
+               saved_cred = override_fsids(sbi,
+                                       SDCARDFS_I(d_inode(dentry))->data);
+               if (!saved_cred) {
+                       pr_err("sdcardfs: failed to set up .nomedia in %s: %d\n",
+                                               lower_path.dentry->d_name.name,
+                                               -ENOMEM);
+                       goto out;
+               }
                set_fs_pwd(current->fs, &lower_path);
                touch_err = touch(".nomedia", 0664);
                if (touch_err) {
                        pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
-                                                       lower_path.dentry->d_name.name, touch_err);
+                                               lower_path.dentry->d_name.name,
+                                               touch_err);
                        goto out;
                }
        }
 out:
+       task_lock(current);
        current->fs = saved_fs;
+       task_unlock(current);
+
        free_fs_struct(copied_fs);
 out_unlock:
        sdcardfs_put_lower_path(dentry, &lower_path);
 out_revert:
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_eacces:
        return err;
 }
@@ -402,7 +354,10 @@ static int sdcardfs_rmdir(struct inode *dir, struct dentry *dentry)
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+       saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+                                               SDCARDFS_I(dir)->data);
+       if (!saved_cred)
+               return -ENOMEM;
 
        /* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
         * the dentry on the original path should be deleted.
@@ -427,44 +382,11 @@ static int sdcardfs_rmdir(struct inode *dir, struct dentry *dentry)
 out:
        unlock_dir(lower_dir_dentry);
        sdcardfs_put_real_lower(dentry, &lower_path);
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_eacces:
        return err;
 }
 
-#if 0
-static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-                       dev_t dev)
-{
-       int err;
-       struct dentry *lower_dentry;
-       struct dentry *lower_parent_dentry = NULL;
-       struct path lower_path;
-
-       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-       sdcardfs_get_lower_path(dentry, &lower_path);
-       lower_dentry = lower_path.dentry;
-       lower_parent_dentry = lock_parent(lower_dentry);
-
-       err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
-       if (err)
-               goto out;
-
-       err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
-       if (err)
-               goto out;
-       fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
-       fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
-       unlock_dir(lower_parent_dentry);
-       sdcardfs_put_lower_path(dentry, &lower_path);
-       REVERT_CRED();
-       return err;
-}
-#endif
-
 /*
  * The locking rules in sdcardfs_rename are complex.  We could use a simpler
  * superblock-level name-space lock for renames and copy-ups.
@@ -489,7 +411,10 @@ static int sdcardfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
+       saved_cred = override_fsids(SDCARDFS_SB(old_dir->i_sb),
+                                               SDCARDFS_I(new_dir)->data);
+       if (!saved_cred)
+               return -ENOMEM;
 
        sdcardfs_get_real_lower(old_dentry, &lower_old_path);
        sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -536,7 +461,7 @@ out:
        dput(lower_new_dir_dentry);
        sdcardfs_put_real_lower(old_dentry, &lower_old_path);
        sdcardfs_put_lower_path(new_dentry, &lower_new_path);
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_eacces:
        return err;
 }
@@ -655,33 +580,7 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
        if (IS_POSIXACL(inode))
                pr_warn("%s: This may be undefined behavior...\n", __func__);
        err = generic_permission(&tmp, mask);
-       /* XXX
-        * Original sdcardfs code calls inode_permission(lower_inode,.. )
-        * for checking inode permission. But doing such things here seems
-        * duplicated work, because the functions called after this func,
-        * such as vfs_create, vfs_unlink, vfs_rename, and etc,
-        * does exactly same thing, i.e., they calls inode_permission().
-        * So we just let they do the things.
-        * If there are any security hole, just uncomment following if block.
-        */
-#if 0
-       if (!err) {
-               /*
-                * Permission check on lower_inode(=EXT4).
-                * we check it with AID_MEDIA_RW permission
-                */
-               struct inode *lower_inode;
-
-               OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
-
-               lower_inode = sdcardfs_lower_inode(inode);
-               err = inode_permission(lower_inode, mask);
-
-               REVERT_CRED();
-       }
-#endif
        return err;
-
 }
 
 static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
@@ -756,7 +655,10 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
                goto out_err;
 
        /* save current_cred and override it */
-       OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+       saved_cred = override_fsids(SDCARDFS_SB(dentry->d_sb),
+                                               SDCARDFS_I(inode)->data);
+       if (!saved_cred)
+               return -ENOMEM;
 
        sdcardfs_get_lower_path(dentry, &lower_path);
        lower_dentry = lower_path.dentry;
@@ -815,7 +717,7 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
 
 out:
        sdcardfs_put_lower_path(dentry, &lower_path);
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_err:
        return err;
 }
@@ -898,13 +800,6 @@ const struct inode_operations sdcardfs_dir_iops = {
        .setattr        = sdcardfs_setattr_wrn,
        .setattr2       = sdcardfs_setattr,
        .getattr        = sdcardfs_getattr,
-       /* XXX Following operations are implemented,
-        *     but FUSE(sdcard) or FAT does not support them
-        *     These methods are *NOT* perfectly tested.
-       .symlink        = sdcardfs_symlink,
-       .link           = sdcardfs_link,
-       .mknod          = sdcardfs_mknod,
-        */
 };
 
 const struct inode_operations sdcardfs_main_iops = {
index 206f8cb..a671ae2 100644 (file)
@@ -426,7 +426,12 @@ struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
        }
 
        /* save current_cred and override it */
-       OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+       saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+                                               SDCARDFS_I(dir)->data);
+       if (!saved_cred) {
+               ret = ERR_PTR(-ENOMEM);
+               goto out_err;
+       }
 
        sdcardfs_get_lower_path(parent, &lower_parent_path);
 
@@ -457,7 +462,7 @@ struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
 
 out:
        sdcardfs_put_lower_path(parent, &lower_parent_path);
-       REVERT_CRED(saved_cred);
+       revert_fsids(saved_cred);
 out_err:
        dput(parent);
        return ret;
index 27ec726..1ad7718 100644 (file)
@@ -264,7 +264,7 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
 
        pr_info("sdcardfs: dev_name -> %s\n", dev_name);
        pr_info("sdcardfs: options -> %s\n", (char *)raw_data);
-       pr_info("sdcardfs: mnt -> %p\n", mnt);
+       pr_info("sdcardfs: mnt -> %pK\n", mnt);
 
        /* parse lower path */
        err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
index 055e413..99227a0 100644 (file)
                (x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
        } while (0)
 
-/* OVERRIDE_CRED() and REVERT_CRED()
- *     OVERRIDE_CRED()
- *             backup original task->cred
- *             and modifies task->cred->fsuid/fsgid to specified value.
- *     REVERT_CRED()
- *             restore original task->cred->fsuid/fsgid.
- * These two macro should be used in pair, and OVERRIDE_CRED() should be
- * placed at the beginning of a function, right after variable declaration.
- */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info)          \
-       do {    \
-               saved_cred = override_fsids(sdcardfs_sbi, info->data);  \
-               if (!saved_cred)        \
-                       return -ENOMEM; \
-       } while (0)
-
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info)      \
-       do {    \
-               saved_cred = override_fsids(sdcardfs_sbi, info->data);  \
-               if (!saved_cred)        \
-                       return ERR_PTR(-ENOMEM);        \
-       } while (0)
-
-#define REVERT_CRED(saved_cred)        revert_fsids(saved_cred)
-
 /* Android 5.0 support */
 
 /* Permission mode for a specific node. Controls how file permissions
index cffcdb1..fa7d9d2 100644 (file)
@@ -144,7 +144,7 @@ static int sdcardfs_remount_fs2(struct vfsmount *mnt, struct super_block *sb,
                pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
                err = -EINVAL;
        }
-       pr_info("Remount options were %s for vfsmnt %p.\n", options, mnt);
+       pr_info("Remount options were %s for vfsmnt %pK.\n", options, mnt);
        err = parse_options_remount(sb, options, *flags & ~MS_SILENT, mnt->data);
 
 
index 0bb6de3..7968b7a 100644 (file)
@@ -1918,6 +1918,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
        int dev, vol;
        char *endptr;
 
+       if (!name || !*name)
+               return ERR_PTR(-EINVAL);
+
        /* First, try to open using the device node path method */
        ubi = ubi_open_volume_path(name, mode);
        if (!IS_ERR(ubi))
index 2ebfa01..8e6a185 100644 (file)
@@ -304,11 +304,6 @@ struct f2fs_node {
  * For NAT entries
  */
 #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE  ((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED                          \
-       ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) /          \
-       BITS_PER_LONG * BITS_PER_LONG)
-
 
 struct f2fs_nat_entry {
        __u8 version;           /* latest version of cached nat entry */
index 11c2af7..1ff11e4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -133,6 +133,7 @@ struct hdcp_client_ops {
        int (*wakeup)(struct hdmi_hdcp_wakeup_data *data);
        void (*notify_lvl_change)(void *client_ctx, int min_lvl);
        void (*srm_cb)(void *client_ctx);
+       void (*mute_sink)(void *client_ctx);
 };
 
 enum hdcp_device_type {
index ae6a711..281bb00 100644 (file)
@@ -1179,6 +1179,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
 
 struct hv_util_service {
        u8 *recv_buffer;
+       void *channel;
        void (*util_cb)(void *);
        int (*util_init)(struct hv_util_service *);
        void (*util_deinit)(void);
index 623d0f0..2586395 100644 (file)
@@ -1164,6 +1164,28 @@ struct ipa_gsi_ep_config {
        int ee;
 };
 
+/**
+ * union ipa_bam_sw_peer_desc - IPA sps sw peer desc
+ *
+ * @sw_dsc_ofst: software desc offset
+ * @sw_ofst_in_desc: offset in desc
+ * @p_dsc_fifo_peer_ofst: peer desc offset
+ * @p_bytes_consumed: bytes consumed
+ */
+union ipa_bam_sw_peer_desc {
+       struct sw_ofsts_reg {
+               u32 sw_dsc_ofst:16;
+               u32 sw_ofst_in_desc:15;
+       } sw_desc;
+
+       struct evnt_reg {
+               u32 p_dsc_fifo_peer_ofst:16;
+               u32 p_bytes_consumed:15;
+       } peer_desc;
+
+       u32 read_reg;
+};
+
 #if defined CONFIG_IPA || defined CONFIG_IPA3
 
 /*
index c77de3b..43c998c 100644 (file)
@@ -2168,6 +2168,13 @@ struct netdev_notifier_info {
        struct net_device *dev;
 };
 
+struct netdev_notifier_info_ext {
+       struct netdev_notifier_info info; /* must be first */
+       union {
+               u32 mtu;
+       } ext;
+};
+
 struct netdev_notifier_change_info {
        struct netdev_notifier_info info; /* must be first */
        unsigned int flags_changed;
index 2ea517c..bffd096 100644 (file)
@@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
 /* True if the target is not a standard target */
 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
 
+static inline bool ebt_invalid_target(int target)
+{
+       return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
 #endif
index 9abc0ca..9f0aa1b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Driver for Texas Instruments INA219, INA226 power monitor chips
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index dfa6752..b0ece2d 100644 (file)
@@ -39,6 +39,8 @@ int msm_anc_dev_stop(void);
 
 int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd);
 
+int msm_anc_dev_get_info(void *info_p, int32_t anc_cmd);
+
 int msm_anc_dev_create(struct platform_device *pdev);
 
 int msm_anc_dev_destroy(struct platform_device *pdev);
index 3b236e8..5c1b705 100644 (file)
@@ -15,7 +15,6 @@
 #include <sound/q6afe-v2.h>
 #include <sound/apr_audio-v2.h>
 
-
 #define AUD_MSVC_MODULE_AUDIO_DEV_RESOURCE_SHARE           0x0001028A
 #define AUD_MSVC_PARAM_ID_PORT_SHARE_RESOURCE_CONFIG       0x00010297
 #define AUD_MSVC_API_VERSION_SHARE_RESOURCE_CONFIG         0x1
@@ -23,8 +22,6 @@
 #define AUD_MSVC_PARAM_ID_DEV_ANC_REFS_CONFIG              0x00010286
 #define AUD_MSVC_API_VERSION_DEV_ANC_REFS_CONFIG           0x1
 #define AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO                 0x00010234
-#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM                0x00010235
-#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM              0x1
 
 struct aud_msvc_port_param_data_v2 {
        /* ID of the module to be configured.
@@ -148,7 +145,7 @@ struct aud_msvc_port_cmd_get_param_v2 {
 } __packed;
 
 struct aud_audioif_config_command {
-       struct apr_hdr                  hdr;
+       struct apr_hdr hdr;
        struct aud_msvc_port_cmd_set_param_v2 param;
        struct aud_msvc_port_param_data_v2    pdata;
        union afe_port_config            port;
@@ -162,13 +159,6 @@ struct aud_msvc_param_id_dev_share_resource_cfg {
        u32                  lpm_length;
 } __packed;
 
-
-struct aud_msvc_param_id_dev_anc_algo_rpm {
-       u32                  minor_version;
-       u32                  rpm;
-} __packed;
-
-
 struct aud_msvc_param_id_dev_anc_refs_cfg {
        u32                  minor_version;
        u16                  port_id;
@@ -177,65 +167,20 @@ struct aud_msvc_param_id_dev_anc_refs_cfg {
        u32                  bit_width;
 } __packed;
 
-
 struct anc_share_resource_command {
-       struct apr_hdr                  hdr;
+       struct apr_hdr hdr;
        struct aud_msvc_port_cmd_set_param_v2 param;
        struct aud_msvc_port_param_data_v2    pdata;
        struct aud_msvc_param_id_dev_share_resource_cfg resource;
 } __packed;
 
-
 struct anc_config_ref_command {
-       struct apr_hdr                  hdr;
+       struct apr_hdr hdr;
        struct aud_msvc_port_cmd_set_param_v2 param;
        struct aud_msvc_port_param_data_v2    pdata;
        struct aud_msvc_param_id_dev_anc_refs_cfg refs;
 } __packed;
 
-
-
-struct anc_set_rpm_command {
-       struct apr_hdr                  hdr;
-       struct aud_msvc_port_cmd_set_param_v2 param;
-       struct aud_msvc_port_param_data_v2    pdata;
-       struct aud_msvc_param_id_dev_anc_algo_rpm set_rpm;
-} __packed;
-
-struct anc_get_rpm_command {
-       struct apr_hdr                  hdr;
-       struct aud_msvc_port_cmd_get_param_v2 param;
-       struct aud_msvc_port_param_data_v2    pdata;
-       struct aud_msvc_param_id_dev_anc_algo_rpm get_rpm;
-} __packed;
-
-struct anc_get_rpm_resp {
-       uint32_t status;
-       struct aud_msvc_port_param_data_v2 pdata;
-       struct aud_msvc_param_id_dev_anc_algo_rpm res_rpm;
-} __packed;
-
-#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE      0x0001029B
-
-#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE    0x1
-
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_NO                               0x0
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_TO_ANC_SPKR                 0x1
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_ANC_MIC_TO_ANC_SPKR              0x2
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_MIXED_ANC_MIC_TO_ANC_SPKR   0x3
-
-struct aud_msvc_param_id_dev_anc_algo_bypass_mode {
-       uint32_t                  minor_version;
-       uint32_t                  bypass_mode;
-} __packed;
-
-struct anc_set_bypass_mode_command {
-       struct apr_hdr                   hdr;
-       struct aud_msvc_port_cmd_set_param_v2 param;
-       struct aud_msvc_port_param_data_v2    pdata;
-       struct aud_msvc_param_id_dev_anc_algo_bypass_mode set_bypass_mode;
-} __packed;
-
 #define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID      0x0001023A
 
 struct aud_msvc_param_id_dev_anc_algo_module_id {
@@ -244,7 +189,7 @@ struct aud_msvc_param_id_dev_anc_algo_module_id {
 } __packed;
 
 struct anc_set_algo_module_id_command {
-       struct apr_hdr                   hdr;
+       struct apr_hdr hdr;
        struct aud_msvc_port_cmd_set_param_v2 param;
        struct aud_msvc_port_param_data_v2    pdata;
        struct aud_msvc_param_id_dev_anc_algo_module_id set_algo_module_id;
@@ -269,13 +214,37 @@ struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info {
 } __packed;
 
 struct anc_set_mic_spkr_layout_info_command {
-       struct apr_hdr                   hdr;
+       struct apr_hdr hdr;
        struct aud_msvc_port_cmd_set_param_v2 param;
        struct aud_msvc_port_param_data_v2    pdata;
        struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info
                set_mic_spkr_layout;
 } __packed;
 
+struct anc_set_algo_module_cali_data_command {
+       struct apr_hdr hdr;
+       struct aud_msvc_port_cmd_set_param_v2 param;
+       struct aud_msvc_port_param_data_v2    pdata;
+       /*
+        * calibration data payload followed
+        */
+} __packed;
+
+struct anc_get_algo_module_cali_data_command {
+       struct apr_hdr hdr;
+       struct aud_msvc_port_cmd_get_param_v2 param;
+       struct aud_msvc_port_param_data_v2    pdata;
+       /*
+        * calibration data payload followed
+        */
+} __packed;
+
+struct anc_get_algo_module_cali_data_resp {
+       uint32_t status;
+       struct aud_msvc_port_param_data_v2 pdata;
+       uint32_t payload[128];
+} __packed;
+
 int anc_if_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port);
 
 int anc_if_tdm_port_stop(u16 port_id);
@@ -286,15 +255,15 @@ int anc_if_share_resource(u16 port_id, u16 rddma_idx, u16 wrdma_idx,
 int anc_if_config_ref(u16 port_id, u32 sample_rate, u32 bit_width,
                u16 num_channel);
 
-int anc_if_set_rpm(u16 port_id, u32 rpm);
-
-int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode);
-
 int anc_if_set_algo_module_id(u16 port_id, u32 module_id);
 
 int anc_if_set_anc_mic_spkr_layout(u16 port_id,
 struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p);
 
+int anc_if_set_algo_module_cali_data(u16 port_id, void *data_p);
+
+int anc_if_get_algo_module_cali_data(u16 port_id, void *data_p);
+
 int anc_if_shared_mem_map(void);
 
 int anc_if_shared_mem_unmap(void);
index c28bd8b..a490dd7 100644 (file)
@@ -2273,6 +2273,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
                kfree_skb(skb);
 }
 
+void skb_rbtree_purge(struct rb_root *root);
+
 void *netdev_alloc_frag(unsigned int fragsz);
 
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2807,6 +2809,12 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
        return __pskb_trim(skb, len);
 }
 
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root)  rb_to_skb(rb_last(root))
+#define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))
+
 #define skb_queue_walk(queue, skb) \
                for (skb = (queue)->next;                                       \
                     skb != (struct sk_buff *)(queue);                          \
index fd720e3..fb7b94a 100644 (file)
@@ -67,7 +67,8 @@ struct kmem_cache {
        int size;               /* The size of an object including meta data */
        int object_size;        /* The size of an object without meta data */
        int offset;             /* Free pointer offset. */
-       int cpu_partial;        /* Number of per cpu partial objects to keep around */
+       /* Number of per cpu partial objects to keep around */
+       unsigned int cpu_partial;
        struct kmem_cache_order_objects oo;
 
        /* Allocation and freeing of slabs */
index 5b6df1a..747404d 100644 (file)
@@ -279,10 +279,9 @@ struct tcp_sock {
        struct sk_buff* lost_skb_hint;
        struct sk_buff *retransmit_skb_hint;
 
-       /* OOO segments go in this list. Note that socket lock must be held,
-        * as we do not use sk_buff_head lock.
-        */
-       struct sk_buff_head     out_of_order_queue;
+       /* OOO segments go in this rbtree. Socket lock must be held. */
+       struct rb_root  out_of_order_queue;
+       struct sk_buff  *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
 
        /* SACKs data, these 2 need to be together (see tcp_options_write) */
        struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
index 8035167..4fdcd0d 100644 (file)
@@ -43,6 +43,7 @@ struct v4l2_fh {
        wait_queue_head_t       wait;
        struct list_head        subscribed; /* Subscribed events */
        struct list_head        available; /* Dequeueable event */
+       struct mutex            subscribe_lock;
        unsigned int            navailable;
        u32                     sequence;
 
index 93abe5f..d5abd3a 100644 (file)
@@ -146,12 +146,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-struct netdev_notify_work {
-       struct delayed_work     work;
-       struct net_device       *dev;
-       struct netdev_bonding_info bonding_info;
-};
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -177,6 +171,7 @@ struct slave {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
 #endif
+       struct delayed_work notify_work;
        struct kobject kobj;
        struct rtnl_link_stats64 slave_stats;
 };
index e06f0b6..8dc7fc4 100644 (file)
@@ -14,6 +14,7 @@
 #define _NET_CNSS2_H
 
 #include <linux/pci.h>
+#include <linux/usb.h>
 
 #define CNSS_MAX_FILE_NAME             20
 #define CNSS_MAX_TIMESTAMP_LEN         32
@@ -83,6 +84,21 @@ struct cnss_wlan_driver {
        const struct pci_device_id *id_table;
 };
 
+struct cnss_usb_wlan_driver {
+       char *name;
+       int  (*probe)(struct usb_interface *pintf, const struct usb_device_id
+                     *id);
+       void (*remove)(struct usb_interface *pintf);
+       int  (*reinit)(struct usb_interface *pintf, const struct usb_device_id
+                      *id);
+       void (*shutdown)(struct usb_interface *pintf);
+       void (*crash_shutdown)(struct usb_interface *pintf);
+       int  (*suspend)(struct usb_interface *pintf, pm_message_t state);
+       int  (*resume)(struct usb_interface *pintf);
+       int  (*reset_resume)(struct usb_interface *pintf);
+       const struct usb_device_id *id_table;
+};
+
 enum cnss_driver_status {
        CNSS_UNINITIALIZED,
        CNSS_INITIALIZED,
@@ -192,6 +208,9 @@ extern void cnss_lock_pm_sem(struct device *dev);
 extern void cnss_release_pm_sem(struct device *dev);
 extern int cnss_auto_suspend(struct device *dev);
 extern int cnss_auto_resume(struct device *dev);
+extern int cnss_pci_force_wake_request(struct device *dev);
+extern int cnss_pci_is_device_awake(struct device *dev);
+extern int cnss_pci_force_wake_release(struct device *dev);
 extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
                                        int *num_vectors,
                                        uint32_t *user_base_data,
@@ -212,5 +231,7 @@ extern int cnss_athdiag_write(struct device *dev, uint32_t offset,
                              uint32_t mem_type, uint32_t data_len,
                              uint8_t *input);
 extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
-
+extern int cnss_usb_wlan_register_driver(struct cnss_usb_wlan_driver *driver);
+extern void cnss_usb_wlan_unregister_driver(struct cnss_usb_wlan_driver *
+                                           driver);
 #endif /* _NET_CNSS2_H */
index 3afb7c4..2a25b53 100644 (file)
@@ -322,6 +322,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
 int fib_sync_down_addr(struct net *net, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
 
 extern u32 fib_multipath_secret __read_mostly;
 
index 316694d..008f466 100644 (file)
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NFC_HCI_MAX_PIPES              127
+#define NFC_HCI_MAX_PIPES              128
 struct nfc_hci_init_data {
        u8 gate_count;
        struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
index 653118a..86eb7ca 100644 (file)
@@ -2149,6 +2149,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
        SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
 }
 
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+       int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+       atomic_add(segs, &sk->sk_drops);
+}
+
 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
                           struct sk_buff *skb);
 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
index cbc97fc..976f382 100644 (file)
@@ -665,7 +665,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (skb_queue_empty(&tp->out_of_order_queue) &&
+       if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
            tp->rcv_wnd &&
            atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
            !tp->urg_data)
index 4f79506..1845c72 100644 (file)
@@ -29,7 +29,8 @@
 
 #define MM_VID_START   500
 #define MM_VID         501
-#define MM_VID_END     502
+#define MM_VID_2       502
+#define MM_VID_END     503
 
 #define MM_MISC_START  600
 #define MM_MISC                601
 
 #define MM_QCPE_START  700
 #define MM_QCPE_VM1    701
-#define MM_QCPE_VM2    702
-#define MM_QCPE_VM3    703
-#define MM_QCPE_VM4    704
-#define MM_QCPE_END    705
+#define MM_QCPE_END    702
 
 #define        MM_CLK_START    800
 #define        MM_CLK_VM1 801
index d628f7c..87701fd 100644 (file)
@@ -21,6 +21,8 @@
 /* room for ANC_CMD define extend */
 #define ANC_CMD_MAX   0xFF
 
+#define ANC_CALIBRATION_PAYLOAD_SIZE_MAX   100
+
 struct audio_anc_header {
        int32_t data_size;
        int32_t version;
@@ -35,14 +37,23 @@ struct audio_anc_rpm_info {
 struct audio_anc_bypass_mode {
        int32_t mode;
 };
-
 struct audio_anc_algo_module_info {
        int32_t module_id;
 };
 
+struct audio_anc_algo_calibration_header {
+       uint32_t module_id;
+       uint32_t param_id;
+       uint32_t payload_size;
+};
+
+struct audio_anc_algo_calibration_body {
+       int32_t payload[ANC_CALIBRATION_PAYLOAD_SIZE_MAX];
+};
+
 struct audio_anc_algo_calibration_info {
-       int32_t payload_size;
-       /* num bytes of payload specificed in payload_size followed */
+       struct audio_anc_algo_calibration_header cali_header;
+       struct audio_anc_algo_calibration_body cali_body;
 };
 
 union  audio_anc_data {
index 13bb8b7..005fb82 100644 (file)
@@ -322,6 +322,8 @@ enum kgsl_timestamp_type {
 #define KGSL_PROP_DEVICE_QDSS_STM      0x19
 #define KGSL_PROP_DEVICE_QTIMER        0x20
 #define KGSL_PROP_IB_TIMEOUT 0x21
+#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
+#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
 
 struct kgsl_shadowprop {
        unsigned long gpuaddr;
index a0eeedb..57809d2 100644 (file)
@@ -3866,7 +3866,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
 
        for (i = 0; i < kallsyms->num_symtab; i++)
                if (strcmp(name, symname(kallsyms, i)) == 0 &&
-                   kallsyms->symtab[i].st_info != 'U')
+                   kallsyms->symtab[i].st_shndx != SHN_UNDEF)
                        return kallsyms->symtab[i].st_value;
        return 0;
 }
@@ -3912,6 +3912,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
                for (i = 0; i < kallsyms->num_symtab; i++) {
+
+                       if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+                               continue;
+
                        ret = fn(data, symname(kallsyms, i),
                                 mod, kallsyms->symtab[i].st_value);
                        if (ret != 0)
index 9c56841..b84d137 100644 (file)
@@ -718,7 +718,7 @@ prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
            u64 prefer_idle)
 {
        struct schedtune *st = css_st(css);
-       st->prefer_idle = prefer_idle;
+       st->prefer_idle = !!prefer_idle;
 
        return 0;
 }
index ceec77c..271b379 100644 (file)
@@ -1005,7 +1005,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        /* Convert (if necessary) to absolute time */
        if (flags != TIMER_ABSTIME) {
                ktime_t now = alarm_bases[type].gettime();
-               exp = ktime_add(now, exp);
+
+               exp = ktime_add_safe(now, exp);
        }
 
        if (alarmtimer_do_nsleep(&alarm, exp))
index fdaa88f..74b20e3 100644 (file)
@@ -1513,6 +1513,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
        tmp_iter_page = first_page;
 
        do {
+               cond_resched();
+
                to_remove_page = tmp_iter_page;
                rb_inc_page(cpu_buffer, &tmp_iter_page);
 
index 0507fa5..f6b5478 100644 (file)
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
        void (*put)(struct klist_node *) = i->i_klist->put;
        struct klist_node *last = i->i_cur;
        struct klist_node *prev;
+       unsigned long flags;
 
-       spin_lock(&i->i_klist->k_lock);
+       spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
        if (last) {
                prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
                prev = to_klist_node(prev->n_node.prev);
        }
 
-       spin_unlock(&i->i_klist->k_lock);
+       spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
        if (put && last)
                put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
        void (*put)(struct klist_node *) = i->i_klist->put;
        struct klist_node *last = i->i_cur;
        struct klist_node *next;
+       unsigned long flags;
 
-       spin_lock(&i->i_klist->k_lock);
+       spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
        if (last) {
                next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
                next = to_klist_node(next->n_node.next);
        }
 
-       spin_unlock(&i->i_klist->k_lock);
+       spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
        if (put && last)
                put(last);
index b04f2d2..d1d09bd 100644 (file)
@@ -76,7 +76,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
                new_flags |= VM_DONTDUMP;
                break;
        case MADV_DODUMP:
-               if (new_flags & VM_SPECIAL) {
+               if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
                        error = -EINVAL;
                        goto out;
                }
index 9bdb044..6318fc7 100644 (file)
@@ -1464,6 +1464,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                        mpol_shared_policy_init(&info->policy, NULL);
                        break;
                }
+
+               lockdep_annotate_inode_mutex_key(inode);
        } else
                shmem_free_inode(sb);
        return inode;
index f806a8a..716b794 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1727,7 +1727,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 {
        struct page *page, *page2;
        void *object = NULL;
-       int available = 0;
+       unsigned int available = 0;
        int objects;
 
        /*
@@ -4807,10 +4807,10 @@ static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
                                 size_t length)
 {
-       unsigned long objects;
+       unsigned int objects;
        int err;
 
-       err = kstrtoul(buf, 10, &objects);
+       err = kstrtouint(buf, 10, &objects);
        if (err)
                return err;
        if (objects && !kmem_cache_has_cpu_partial(s))
index 9ab13e3..c020d2d 100644 (file)
@@ -864,6 +864,9 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
+#else
+       "", /* nr_tlb_remote_flush */
+       "", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
@@ -872,7 +875,6 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_DEBUG_VM_VMACACHE
        "vmacache_find_calls",
        "vmacache_find_hits",
-       "vmacache_full_flushes",
 #endif
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
index 346b5c1..c40eb04 100644 (file)
@@ -569,6 +569,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
                hdr.hop_limit, &hdr.daddr);
 
        skb_push(skb, sizeof(hdr));
+       skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
 
index 070cf13..f2660c1 100644 (file)
@@ -67,6 +67,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
        if (e->ethproto != htons(ETH_P_ARP) ||
            e->invflags & EBT_IPROTO)
                return -EINVAL;
+       if (ebt_invalid_target(info->target))
+               return -EINVAL;
+
        return 0;
 }
 
index a2e903c..18035fa 100644 (file)
@@ -1662,6 +1662,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
+/**
+ *     call_netdevice_notifiers_mtu - call all network notifier blocks
+ *     @val: value passed unmodified to notifier function
+ *     @dev: net_device pointer passed unmodified to notifier function
+ *     @arg: additional u32 argument passed to the notifier function
+ *
+ *     Call all network notifier blocks.  Parameters and return value
+ *     are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+                                       struct net_device *dev, u32 arg)
+{
+       struct netdev_notifier_info_ext info = {
+               .info.dev = dev,
+               .ext.mtu = arg,
+       };
+
+       BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+       return call_netdevice_notifiers_info(val, dev, &info.info);
+}
+
 #ifdef CONFIG_NET_INGRESS
 static struct static_key ingress_needed __read_mostly;
 
@@ -6170,14 +6192,16 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
        err = __dev_set_mtu(dev, new_mtu);
 
        if (!err) {
-               err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+               err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+                                                  orig_mtu);
                err = notifier_to_errno(err);
                if (err) {
                        /* setting mtu back and notifying everyone again,
                         * so that they have a chance to revert changes.
                         */
                        __dev_set_mtu(dev, orig_mtu);
-                       call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+                       call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+                                                    new_mtu);
                }
        }
        return err;
index 642b13d..fef2043 100644 (file)
@@ -1140,6 +1140,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                lladdr = neigh->ha;
        }
 
+       /* Update confirmed timestamp for neighbour entry after we
+        * received ARP packet even if it doesn't change IP to MAC binding.
+        */
+       if (new & NUD_CONNECTED)
+               neigh->confirmed = jiffies;
+
        /* If entry was valid and address is not changed,
           do not change entry state, if new one is STALE.
         */
@@ -1163,15 +1169,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                }
        }
 
-       /* Update timestamps only once we know we will make a change to the
+       /* Update timestamp only once we know we will make a change to the
         * neighbour entry. Otherwise we risk to move the locktime window with
         * noop updates and ignore relevant ARP updates.
         */
-       if (new != old || lladdr != neigh->ha) {
-               if (new & NUD_CONNECTED)
-                       neigh->confirmed = jiffies;
+       if (new != old || lladdr != neigh->ha)
                neigh->updated = jiffies;
-       }
 
        if (new != old) {
                neigh_del_timer(neigh);
index 96c9c0f..f1df04c 100644 (file)
@@ -2116,6 +2116,12 @@ struct net_device *rtnl_create_link(struct net *net,
        else if (ops->get_num_rx_queues)
                num_rx_queues = ops->get_num_rx_queues();
 
+       if (num_tx_queues < 1 || num_tx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
+       if (num_rx_queues < 1 || num_rx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
        err = -ENOMEM;
        dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
                               ops->setup, num_tx_queues, num_rx_queues);
index cfbf857..5bd724d 100644 (file)
@@ -2404,6 +2404,25 @@ void skb_queue_purge(struct sk_buff_head *list)
 EXPORT_SYMBOL(skb_queue_purge);
 
 /**
+ *     skb_rbtree_purge - empty a skb rbtree
+ *     @root: root of the rbtree to empty
+ *
+ *     Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
+ *     the list and one reference dropped. This function does not take
+ *     any lock. Synchronization should be handled by the caller (e.g., TCP
+ *     out-of-order queue is protected by the socket lock).
+ */
+void skb_rbtree_purge(struct rb_root *root)
+{
+       struct sk_buff *skb, *next;
+
+       rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+               kfree_skb(skb);
+
+       *root = RB_ROOT;
+}
+
+/**
  *     skb_queue_head - queue a buffer at the list head
  *     @list: list to use
  *     @newsk: buffer to queue
index aad274c..55eff96 100644 (file)
@@ -1301,6 +1301,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                if (encap)
                        skb_reset_inner_headers(skb);
                skb->network_header = (u8 *)iph - skb->head;
+               skb_reset_mac_len(skb);
        } while ((skb = skb->next));
 
 out:
index 249a894..860e33d 100644 (file)
@@ -1171,7 +1171,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct netdev_notifier_changeupper_info *info;
+       struct netdev_notifier_changeupper_info *upper_info = ptr;
+       struct netdev_notifier_info_ext *info_ext = ptr;
        struct in_device *in_dev;
        struct net *net = dev_net(dev);
        unsigned int flags;
@@ -1206,16 +1207,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                        fib_sync_up(dev, RTNH_F_LINKDOWN);
                else
                        fib_sync_down_dev(dev, event, false);
-               /* fall through */
+               rt_cache_flush(net);
+               break;
        case NETDEV_CHANGEMTU:
+               fib_sync_mtu(dev, info_ext->ext.mtu);
                rt_cache_flush(net);
                break;
        case NETDEV_CHANGEUPPER:
-               info = ptr;
+               upper_info = ptr;
                /* flush all routes if dev is linked to or unlinked from
                 * an L3 master device (e.g., VRF)
                 */
-               if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+               if (upper_info->upper_dev &&
+                   netif_is_l3_master(upper_info->upper_dev))
                        fib_disable_ip(dev, NETDEV_DOWN, true);
                break;
        }
index 03ebff3..3109b9b 100644 (file)
@@ -1373,6 +1373,56 @@ int fib_sync_down_addr(struct net *net, __be32 local)
        return ret;
 }
 
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ *   larger MTUs on the path. With that limit raised, we can now
+ *   discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+       struct fnhe_hash_bucket *bucket;
+       int i;
+
+       bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+       if (!bucket)
+               return;
+
+       for (i = 0; i < FNHE_HASH_SIZE; i++) {
+               struct fib_nh_exception *fnhe;
+
+               for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+                    fnhe;
+                    fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+                       if (fnhe->fnhe_mtu_locked) {
+                               if (new <= fnhe->fnhe_pmtu) {
+                                       fnhe->fnhe_pmtu = new;
+                                       fnhe->fnhe_mtu_locked = false;
+                               }
+                       } else if (new < fnhe->fnhe_pmtu ||
+                                  orig == fnhe->fnhe_pmtu) {
+                               fnhe->fnhe_pmtu = new;
+                       }
+               }
+       }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+       unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+       struct hlist_head *head = &fib_info_devhash[hash];
+       struct fib_nh *nh;
+
+       hlist_for_each_entry(nh, head, nh_hash) {
+               if (nh->nh_dev == dev)
+                       nh_update_mtu(nh, dev->mtu, orig_mtu);
+       }
+}
+
 /* Event              force Flags           Description
  * NETDEV_CHANGE      0     LINKDOWN        Carrier OFF, not for scope host
  * NETDEV_DOWN        0     LINKDOWN|DEAD   Link down, not for scope host
index 88426a6..3f8caf7 100644 (file)
@@ -134,7 +134,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
-       const struct iphdr *iph = ip_hdr(skb);
        __be16 *ports;
        int end;
 
@@ -149,7 +148,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
-       sin.sin_addr.s_addr = iph->daddr;
+       sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
        sin.sin_port = ports[1];
        memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
 
index 3d62feb..9d3176b 100644 (file)
@@ -597,6 +597,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                    const struct iphdr *tnl_params, u8 protocol)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
+       unsigned int inner_nhdr_len = 0;
        const struct iphdr *inner_iph;
        struct flowi4 fl4;
        u8     tos, ttl;
@@ -607,6 +608,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        int err;
        bool connected;
 
+       /* ensure we can access the inner net header, for several users below */
+       if (skb->protocol == htons(ETH_P_IP))
+               inner_nhdr_len = sizeof(struct iphdr);
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner_nhdr_len = sizeof(struct ipv6hdr);
+       if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+               goto tx_error;
+
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
        connected = (tunnel->parms.iph.daddr != 0);
 
index 46ad77d..718049f 100644 (file)
@@ -388,7 +388,7 @@ void tcp_init_sock(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       __skb_queue_head_init(&tp->out_of_order_queue);
+       tp->out_of_order_queue = RB_ROOT;
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
        INIT_LIST_HEAD(&tp->tsq_node);
@@ -2249,7 +2249,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
        tcp_write_queue_purge(sk);
-       __skb_queue_purge(&tp->out_of_order_queue);
+       skb_rbtree_purge(&tp->out_of_order_queue);
 
        inet->inet_dport = 0;
 
index d2fbd44..e3d661c 100644 (file)
@@ -4074,7 +4074,7 @@ static void tcp_fin(struct sock *sk)
        /* It _is_ possible, that we have something out-of-order _after_ FIN.
         * Probably, we should reset in this case. For now drop them.
         */
-       __skb_queue_purge(&tp->out_of_order_queue);
+       skb_rbtree_purge(&tp->out_of_order_queue);
        if (tcp_is_sack(tp))
                tcp_sack_reset(&tp->rx_opt);
        sk_mem_reclaim(sk);
@@ -4234,7 +4234,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        int this_sack;
 
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
-       if (skb_queue_empty(&tp->out_of_order_queue)) {
+       if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
                tp->rx_opt.num_sacks = 0;
                return;
        }
@@ -4297,6 +4297,29 @@ static bool tcp_try_coalesce(struct sock *sk,
        return true;
 }
 
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+                            struct sk_buff *to,
+                            struct sk_buff *from,
+                            bool *fragstolen)
+{
+       bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+       /* In case tcp_drop() is called later, update to->gso_segs */
+       if (res) {
+               u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+                              max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+               skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+       }
+       return res;
+}
+
+static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+       sk_drops_add(sk, skb);
+       __kfree_skb(skb);
+}
+
 /* This one checks to see if we can put data from the
  * out_of_order queue into the receive_queue.
  */
@@ -4304,10 +4327,13 @@ static void tcp_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 dsack_high = tp->rcv_nxt;
+       bool fin, fragstolen, eaten;
        struct sk_buff *skb, *tail;
-       bool fragstolen, eaten;
+       struct rb_node *p;
 
-       while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
+       p = rb_first(&tp->out_of_order_queue);
+       while (p) {
+               skb = rb_entry(p, struct sk_buff, rbnode);
                if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
                        break;
 
@@ -4317,11 +4343,12 @@ static void tcp_ofo_queue(struct sock *sk)
                                dsack_high = TCP_SKB_CB(skb)->end_seq;
                        tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
                }
+               p = rb_next(p);
+               rb_erase(&skb->rbnode, &tp->out_of_order_queue);
 
-               __skb_unlink(skb, &tp->out_of_order_queue);
-               if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+               if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
                        SOCK_DEBUG(sk, "ofo packet was already received\n");
-                       __kfree_skb(skb);
+                       tcp_drop(sk, skb);
                        continue;
                }
                SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
@@ -4331,12 +4358,19 @@ static void tcp_ofo_queue(struct sock *sk)
                tail = skb_peek_tail(&sk->sk_receive_queue);
                eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
                tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+               fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
                if (!eaten)
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
-                       tcp_fin(sk);
-               if (eaten)
+               else
                        kfree_skb_partial(skb, fragstolen);
+
+               if (unlikely(fin)) {
+                       tcp_fin(sk);
+                       /* tcp_fin() purges tp->out_of_order_queue,
+                        * so we must end this loop right now.
+                        */
+                       break;
+               }
        }
 }
 
@@ -4366,14 +4400,16 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct rb_node **p, *q, *parent;
        struct sk_buff *skb1;
        u32 seq, end_seq;
+       bool fragstolen;
 
        tcp_ecn_check_ce(sk, skb);
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
-               __kfree_skb(skb);
+               tcp_drop(sk, skb);
                return;
        }
 
@@ -4382,89 +4418,89 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        inet_csk_schedule_ack(sk);
 
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+       seq = TCP_SKB_CB(skb)->seq;
+       end_seq = TCP_SKB_CB(skb)->end_seq;
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
-                  tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+                  tp->rcv_nxt, seq, end_seq);
 
-       skb1 = skb_peek_tail(&tp->out_of_order_queue);
-       if (!skb1) {
+       p = &tp->out_of_order_queue.rb_node;
+       if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
                /* Initial out of order segment, build 1 SACK. */
                if (tcp_is_sack(tp)) {
                        tp->rx_opt.num_sacks = 1;
-                       tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
-                       tp->selective_acks[0].end_seq =
-                                               TCP_SKB_CB(skb)->end_seq;
-               }
-               __skb_queue_head(&tp->out_of_order_queue, skb);
-               goto end;
-       }
-
-       seq = TCP_SKB_CB(skb)->seq;
-       end_seq = TCP_SKB_CB(skb)->end_seq;
-
-       if (seq == TCP_SKB_CB(skb1)->end_seq) {
-               bool fragstolen;
-
-               if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
-                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
-               } else {
-                       tcp_grow_window(sk, skb);
-                       kfree_skb_partial(skb, fragstolen);
-                       skb = NULL;
+                       tp->selective_acks[0].start_seq = seq;
+                       tp->selective_acks[0].end_seq = end_seq;
                }
-
-               if (!tp->rx_opt.num_sacks ||
-                   tp->selective_acks[0].end_seq != seq)
-                       goto add_sack;
-
-               /* Common case: data arrive in order after hole. */
-               tp->selective_acks[0].end_seq = end_seq;
+               rb_link_node(&skb->rbnode, NULL, p);
+               rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+               tp->ooo_last_skb = skb;
                goto end;
        }
 
-       /* Find place to insert this segment. */
-       while (1) {
-               if (!after(TCP_SKB_CB(skb1)->seq, seq))
-                       break;
-               if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
-                       skb1 = NULL;
-                       break;
+       /* In the typical case, we are adding an skb to the end of the list.
+        * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+        */
+       if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+                                skb, &fragstolen)) {
+coalesce_done:
+               tcp_grow_window(sk, skb);
+               kfree_skb_partial(skb, fragstolen);
+               skb = NULL;
+               goto add_sack;
+       }
+
+       /* Find place to insert this segment. Handle overlaps on the way. */
+       parent = NULL;
+       while (*p) {
+               parent = *p;
+               skb1 = rb_entry(parent, struct sk_buff, rbnode);
+               if (before(seq, TCP_SKB_CB(skb1)->seq)) {
+                       p = &parent->rb_left;
+                       continue;
                }
-               skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
-       }
 
-       /* Do skb overlap to previous one? */
-       if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
-               if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
-                       /* All the bits are present. Drop. */
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
-                       __kfree_skb(skb);
-                       skb = NULL;
-                       tcp_dsack_set(sk, seq, end_seq);
-                       goto add_sack;
-               }
-               if (after(seq, TCP_SKB_CB(skb1)->seq)) {
-                       /* Partial overlap. */
-                       tcp_dsack_set(sk, seq,
-                                     TCP_SKB_CB(skb1)->end_seq);
-               } else {
-                       if (skb_queue_is_first(&tp->out_of_order_queue,
-                                              skb1))
-                               skb1 = NULL;
-                       else
-                               skb1 = skb_queue_prev(
-                                       &tp->out_of_order_queue,
-                                       skb1);
+               if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+                       if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+                               /* All the bits are present. Drop. */
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPOFOMERGE);
+                               tcp_drop(sk, skb);
+                               skb = NULL;
+                               tcp_dsack_set(sk, seq, end_seq);
+                               goto add_sack;
+                       }
+                       if (after(seq, TCP_SKB_CB(skb1)->seq)) {
+                               /* Partial overlap. */
+                               tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
+                       } else {
+                               /* skb's seq == skb1's seq and skb covers skb1.
+                                * Replace skb1 with skb.
+                                */
+                               rb_replace_node(&skb1->rbnode, &skb->rbnode,
+                                               &tp->out_of_order_queue);
+                               tcp_dsack_extend(sk,
+                                                TCP_SKB_CB(skb1)->seq,
+                                                TCP_SKB_CB(skb1)->end_seq);
+                               NET_INC_STATS(sock_net(sk),
+                                             LINUX_MIB_TCPOFOMERGE);
+                               tcp_drop(sk, skb1);
+                               goto merge_right;
+                       }
+               } else if (tcp_ooo_try_coalesce(sk, skb1,
+                                               skb, &fragstolen)) {
+                       goto coalesce_done;
                }
+               p = &parent->rb_right;
        }
-       if (!skb1)
-               __skb_queue_head(&tp->out_of_order_queue, skb);
-       else
-               __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
-       /* And clean segments covered by new one as whole. */
-       while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
-               skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+       /* Insert segment into RB tree. */
+       rb_link_node(&skb->rbnode, parent, p);
+       rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
 
+merge_right:
+       /* Remove other segments covered by skb. */
+       while ((q = rb_next(&skb->rbnode)) != NULL) {
+               skb1 = rb_entry(q, struct sk_buff, rbnode);
                if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
                        break;
                if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4472,12 +4508,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
                                         end_seq);
                        break;
                }
-               __skb_unlink(skb1, &tp->out_of_order_queue);
+               rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                 TCP_SKB_CB(skb1)->end_seq);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
-               __kfree_skb(skb1);
+               tcp_drop(sk, skb1);
        }
+       /* If there is no skb after us, we are the last_skb ! */
+       if (!q)
+               tp->ooo_last_skb = skb;
 
 add_sack:
        if (tcp_is_sack(tp))
@@ -4559,12 +4598,13 @@ err:
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int eaten = -1;
        bool fragstolen = false;
+       int eaten = -1;
 
-       if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
-               goto drop;
-
+       if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+               __kfree_skb(skb);
+               return;
+       }
        skb_dst_drop(skb);
        __skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
@@ -4615,13 +4655,13 @@ queue_and_out:
                if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                        tcp_fin(sk);
 
-               if (!skb_queue_empty(&tp->out_of_order_queue)) {
+               if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
                        tcp_ofo_queue(sk);
 
                        /* RFC2581. 4.2. SHOULD send immediate ACK, when
                         * gap in queue is filled.
                         */
-                       if (skb_queue_empty(&tp->out_of_order_queue))
+                       if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
                                inet_csk(sk)->icsk_ack.pingpong = 0;
                }
 
@@ -4646,7 +4686,7 @@ out_of_window:
                tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
                inet_csk_schedule_ack(sk);
 drop:
-               __kfree_skb(skb);
+               tcp_drop(sk, skb);
                return;
        }
 
@@ -4673,48 +4713,76 @@ drop:
        tcp_data_queue_ofo(sk, skb);
 }
 
+static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
+{
+       if (list)
+               return !skb_queue_is_last(list, skb) ? skb->next : NULL;
+
+       return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+}
+
 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
-                                       struct sk_buff_head *list)
+                                       struct sk_buff_head *list,
+                                       struct rb_root *root)
 {
-       struct sk_buff *next = NULL;
+       struct sk_buff *next = tcp_skb_next(skb, list);
 
-       if (!skb_queue_is_last(list, skb))
-               next = skb_queue_next(list, skb);
+       if (list)
+               __skb_unlink(skb, list);
+       else
+               rb_erase(&skb->rbnode, root);
 
-       __skb_unlink(skb, list);
        __kfree_skb(skb);
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
 
        return next;
 }
 
+/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
+static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent = NULL;
+       struct sk_buff *skb1;
+
+       while (*p) {
+               parent = *p;
+               skb1 = rb_entry(parent, struct sk_buff, rbnode);
+               if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
+                       p = &parent->rb_left;
+               else
+                       p = &parent->rb_right;
+       }
+       rb_link_node(&skb->rbnode, parent, p);
+       rb_insert_color(&skb->rbnode, root);
+}
+
 /* Collapse contiguous sequence of skbs head..tail with
  * sequence numbers start..end.
  *
- * If tail is NULL, this means until the end of the list.
+ * If tail is NULL, this means until the end of the queue.
  *
  * Segments with FIN/SYN are not collapsed (only because this
  * simplifies code)
  */
 static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
-            struct sk_buff *head, struct sk_buff *tail,
-            u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
+            struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
 {
-       struct sk_buff *skb, *n;
+       struct sk_buff *skb = head, *n;
+       struct sk_buff_head tmp;
        bool end_of_skbs;
 
        /* First, check that queue is collapsible and find
-        * the point where collapsing can be useful. */
-       skb = head;
+        * the point where collapsing can be useful.
+        */
 restart:
-       end_of_skbs = true;
-       skb_queue_walk_from_safe(list, skb, n) {
-               if (skb == tail)
-                       break;
+       for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
+               n = tcp_skb_next(skb, list);
+
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
-                       skb = tcp_collapse_one(sk, skb, list);
+                       skb = tcp_collapse_one(sk, skb, list, root);
                        if (!skb)
                                break;
                        goto restart;
@@ -4732,13 +4800,10 @@ restart:
                        break;
                }
 
-               if (!skb_queue_is_last(list, skb)) {
-                       struct sk_buff *next = skb_queue_next(list, skb);
-                       if (next != tail &&
-                           TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
-                               end_of_skbs = false;
-                               break;
-                       }
+               if (n && n != tail &&
+                   TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
+                       end_of_skbs = false;
+                       break;
                }
 
                /* Decided to skip this, advance start seq. */
@@ -4748,17 +4813,22 @@ restart:
            (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
                return;
 
+       __skb_queue_head_init(&tmp);
+
        while (before(start, end)) {
                int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
                struct sk_buff *nskb;
 
                nskb = alloc_skb(copy, GFP_ATOMIC);
                if (!nskb)
-                       return;
+                       break;
 
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
-               __skb_queue_before(list, skb, nskb);
+               if (list)
+                       __skb_queue_before(list, skb, nskb);
+               else
+                       __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
                skb_set_owner_r(nskb, sk);
 
                /* Copy data, releasing collapsed skbs. */
@@ -4776,14 +4846,17 @@ restart:
                                start += size;
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
-                               skb = tcp_collapse_one(sk, skb, list);
+                               skb = tcp_collapse_one(sk, skb, list, root);
                                if (!skb ||
                                    skb == tail ||
                                    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
-                                       return;
+                                       goto end;
                        }
                }
        }
+end:
+       skb_queue_walk_safe(&tmp, skb, n)
+               tcp_rbtree_insert(root, skb);
 }
 
 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
@@ -4793,34 +4866,39 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 range_truesize, sum_tiny = 0;
-       struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
-       struct sk_buff *head;
+       struct sk_buff *skb, *head;
+       struct rb_node *p;
        u32 start, end;
 
-       if (!skb)
+       p = rb_first(&tp->out_of_order_queue);
+       skb = rb_entry_safe(p, struct sk_buff, rbnode);
+new_range:
+       if (!skb) {
+               p = rb_last(&tp->out_of_order_queue);
+               /* Note: This is possible p is NULL here. We do not
+                * use rb_entry_safe(), as ooo_last_skb is valid only
+                * if rbtree is not empty.
+                */
+               tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
                return;
-
+       }
        start = TCP_SKB_CB(skb)->seq;
        end = TCP_SKB_CB(skb)->end_seq;
        range_truesize = skb->truesize;
-       head = skb;
 
-       for (;;) {
-               struct sk_buff *next = NULL;
+       for (head = skb;;) {
+               skb = tcp_skb_next(skb, NULL);
 
-               if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
-                       next = skb_queue_next(&tp->out_of_order_queue, skb);
-               skb = next;
-
-               /* Segment is terminated when we see gap or when
-                * we are at the end of all the queue. */
+               /* Range is terminated when we see a gap or when
+                * we are at the queue end.
+                */
                if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
                        /* Do not attempt collapsing tiny skbs */
                        if (range_truesize != head->truesize ||
                            end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
-                               tcp_collapse(sk, &tp->out_of_order_queue,
+                               tcp_collapse(sk, NULL, &tp->out_of_order_queue,
                                             head, skb, start, end);
                        } else {
                                sum_tiny += range_truesize;
@@ -4828,47 +4906,60 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
                                        return;
                        }
 
-                       head = skb;
-                       if (!skb)
-                               break;
-                       /* Start new segment */
+                       goto new_range;
+               }
+
+               range_truesize += skb->truesize;
+               if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
                        start = TCP_SKB_CB(skb)->seq;
+               if (after(TCP_SKB_CB(skb)->end_seq, end))
                        end = TCP_SKB_CB(skb)->end_seq;
-                       range_truesize = skb->truesize;
-               } else {
-                       range_truesize += skb->truesize;
-                       if (before(TCP_SKB_CB(skb)->seq, start))
-                               start = TCP_SKB_CB(skb)->seq;
-                       if (after(TCP_SKB_CB(skb)->end_seq, end))
-                               end = TCP_SKB_CB(skb)->end_seq;
-               }
        }
 }
 
 /*
  * Purge the out-of-order queue.
+ * Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
  * Return true if queue was pruned.
  */
 static bool tcp_prune_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       bool res = false;
+       struct rb_node *node, *prev;
+       int goal;
 
-       if (!skb_queue_empty(&tp->out_of_order_queue)) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
-               __skb_queue_purge(&tp->out_of_order_queue);
+       if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
+               return false;
 
-               /* Reset SACK state.  A conforming SACK implementation will
-                * do the same at a timeout based retransmit.  When a connection
-                * is in a sad state like this, we care only about integrity
-                * of the connection not performance.
-                */
-               if (tp->rx_opt.sack_ok)
-                       tcp_sack_reset(&tp->rx_opt);
-               sk_mem_reclaim(sk);
-               res = true;
-       }
-       return res;
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+       goal = sk->sk_rcvbuf >> 3;
+       node = &tp->ooo_last_skb->rbnode;
+       do {
+               prev = rb_prev(node);
+               rb_erase(node, &tp->out_of_order_queue);
+               goal -= rb_to_skb(node)->truesize;
+               __kfree_skb(rb_to_skb(node));
+               if (!prev || goal <= 0) {
+                       sk_mem_reclaim(sk);
+                       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+                           !tcp_under_memory_pressure(sk))
+                               break;
+                       goal = sk->sk_rcvbuf >> 3;
+               }
+
+               node = prev;
+       } while (node);
+       tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+
+       /* Reset SACK state.  A conforming SACK implementation will
+        * do the same at a timeout based retransmit.  When a connection
+        * is in a sad state like this, we care only about integrity
+        * of the connection not performance.
+        */
+       if (tp->rx_opt.sack_ok)
+               tcp_sack_reset(&tp->rx_opt);
+
+       return true;
 }
 
 /* Reduce allocated memory if we can, trying to get
@@ -4896,7 +4987,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        tcp_collapse_ofo_queue(sk);
        if (!skb_queue_empty(&sk->sk_receive_queue))
-               tcp_collapse(sk, &sk->sk_receive_queue,
+               tcp_collapse(sk, &sk->sk_receive_queue, NULL,
                             skb_peek(&sk->sk_receive_queue),
                             NULL,
                             tp->copied_seq, tp->rcv_nxt);
@@ -5002,7 +5093,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
            /* We ACK each frame or... */
            tcp_in_quickack_mode(sk) ||
            /* We have out of order data. */
-           (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
+           (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
                /* Then ack it now */
                tcp_send_ack(sk);
        } else {
@@ -5238,7 +5329,7 @@ syn_challenge:
        return true;
 
 discard:
-       __kfree_skb(skb);
+       tcp_drop(sk, skb);
        return false;
 }
 
@@ -5456,7 +5547,7 @@ csum_error:
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 discard:
-       __kfree_skb(skb);
+       tcp_drop(sk, skb);
 }
 EXPORT_SYMBOL(tcp_rcv_established);
 
@@ -5686,7 +5777,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                                  TCP_DELACK_MAX, TCP_RTO_MAX);
 
 discard:
-                       __kfree_skb(skb);
+                       tcp_drop(sk, skb);
                        return 0;
                } else {
                        tcp_send_ack(sk);
@@ -6043,7 +6134,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 
        if (!queued) {
 discard:
-               __kfree_skb(skb);
+               tcp_drop(sk, skb);
        }
        return 0;
 }
index 96f3209..21a0fcb 100644 (file)
@@ -1727,6 +1727,7 @@ discard_it:
        return 0;
 
 discard_and_relse:
+       sk_drops_add(sk, skb);
        sock_put(sk);
        goto discard_it;
 
@@ -1840,7 +1841,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        tcp_write_queue_purge(sk);
 
        /* Cleans up our, hopefully empty, out_of_order_queue. */
-       __skb_queue_purge(&tp->out_of_order_queue);
+       skb_rbtree_purge(&tp->out_of_order_queue);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Clean up the MD5 key list, if any */
index d270870..a48846d 100644 (file)
@@ -496,7 +496,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                newtp->snd_cwnd_cnt = 0;
 
                tcp_init_xmit_timers(newsk);
-               __skb_queue_head_init(&newtp->out_of_order_queue);
                newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
 
                newtp->rx_opt.saw_tstamp = 0;
index 199658a..2e478a4 100644 (file)
@@ -3858,7 +3858,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
                                p++;
                                continue;
                        }
-                       state->offset++;
                        return ifa;
                }
 
@@ -3882,13 +3881,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
                return ifa;
        }
 
+       state->offset = 0;
        while (++state->bucket < IN6_ADDR_HSIZE) {
-               state->offset = 0;
                hlist_for_each_entry_rcu_bh(ifa,
                                     &inet6_addr_lst[state->bucket], addr_lst) {
                        if (!net_eq(dev_net(ifa->idev->dev), net))
                                continue;
-                       state->offset++;
                        return ifa;
                }
        }
index c612daa..59127b4 100644 (file)
@@ -118,6 +118,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
                ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
                skb->network_header = (u8 *)ipv6h - skb->head;
+               skb_reset_mac_len(skb);
 
                if (udpfrag) {
                        int err = ip6_find_1stfragopt(skb, &prevhdr);
index 0feede4..530b62f 100644 (file)
@@ -193,12 +193,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                                kfree_skb(skb);
                                return -ENOBUFS;
                        }
+                       if (skb->sk)
+                               skb_set_owner_w(skb2, skb->sk);
                        consume_skb(skb);
                        skb = skb2;
-                       /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
-                        * it is safe to call in our context (socket lock not held)
-                        */
-                       skb_set_owner_w(skb, (struct sock *)sk);
                }
                if (opt->opt_flen)
                        ipv6_push_frag_opts(skb, opt, &proto);
index 3c2468b..8d55abb 100644 (file)
@@ -1096,7 +1096,7 @@ static inline int
 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       const struct iphdr  *iph = ip_hdr(skb);
+       const struct iphdr  *iph;
        int encap_limit = -1;
        struct flowi6 fl6;
        __u8 dsfield;
@@ -1104,6 +1104,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       /* ensure we can access the full inner ip header */
+       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+               return -1;
+
+       iph = ip_hdr(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
        tproto = ACCESS_ONCE(t->parms.proto);
@@ -1142,7 +1147,7 @@ static inline int
 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ipv6hdr *ipv6h;
        int encap_limit = -1;
        __u16 offset;
        struct flowi6 fl6;
@@ -1151,6 +1156,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+               return -1;
+
+       ipv6h = ipv6_hdr(skb);
        tproto = ACCESS_ONCE(t->parms.proto);
        if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
            ip6_tnl_addr_conflict(t, ipv6h))
index aa634b3..ce08617 100644 (file)
@@ -1507,6 +1507,7 @@ discard_it:
        return 0;
 
 discard_and_relse:
+       sk_drops_add(sk, skb);
        sock_put(sk);
        goto discard_it;
 
index 1f93003..67348d8 100644 (file)
@@ -219,7 +219,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
                /* Keys without a station are used for TX only */
-               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+               if (sta && test_sta_flag(sta, WLAN_STA_MFP))
                        key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
                break;
        case NL80211_IFTYPE_ADHOC:
index 08ac73b..0a35dd6 100644 (file)
@@ -948,8 +948,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
        if (len < IEEE80211_DEAUTH_FRAME_LEN)
                return;
 
-       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
        sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -967,9 +967,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
        auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
        auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-       ibss_dbg(sdata,
-                "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+       ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+                mgmt->bssid, auth_transaction);
 
        if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
                return;
@@ -1174,10 +1174,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                rx_timestamp = drv_get_tsf(local, sdata);
        }
 
-       ibss_dbg(sdata,
-                "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+       ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
                 mgmt->sa, mgmt->bssid,
-                (unsigned long long)rx_timestamp,
+                (unsigned long long)rx_timestamp);
+       ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
                 (unsigned long long)beacon_timestamp,
                 (unsigned long long)(rx_timestamp - beacon_timestamp),
                 jiffies);
@@ -1536,9 +1536,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
 
        tx_last_beacon = drv_tx_last_beacon(local);
 
-       ibss_dbg(sdata,
-                "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+       ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+                mgmt->bssid, tx_last_beacon);
 
        if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
                return;
index 2ee53dc..15d23ae 100644 (file)
@@ -253,8 +253,27 @@ static void ieee80211_restart_work(struct work_struct *work)
             "%s called with hardware scan in progress\n", __func__);
 
        rtnl_lock();
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /*
+                * XXX: there may be more work for other vif types and even
+                * for station mode: a good thing would be to run most of
+                * the iface type's dependent _stop (ieee80211_mg_stop,
+                * ieee80211_ibss_stop) etc...
+                * For now, fix only the specific bug that was seen: race
+                * between csa_connection_drop_work and us.
+                */
+               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+                       /*
+                        * This worker is scheduled from the iface worker that
+                        * runs on mac80211's workqueue, so we can't be
+                        * scheduling this worker after the cancel right here.
+                        * The exception is ieee80211_chswitch_done.
+                        * Then we can have a race...
+                        */
+                       cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+               }
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+       }
        ieee80211_scan_cancel(local);
        ieee80211_reconfig(local);
        rtnl_unlock();
@@ -460,10 +479,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
                cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
                            IEEE80211_VHT_CAP_SHORT_GI_80 |
                            IEEE80211_VHT_CAP_SHORT_GI_160 |
-                           IEEE80211_VHT_CAP_RXSTBC_1 |
-                           IEEE80211_VHT_CAP_RXSTBC_2 |
-                           IEEE80211_VHT_CAP_RXSTBC_3 |
-                           IEEE80211_VHT_CAP_RXSTBC_4 |
+                           IEEE80211_VHT_CAP_RXSTBC_MASK |
                            IEEE80211_VHT_CAP_TXSTBC |
                            IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
                            IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
index f55cddc..466922f 100644 (file)
@@ -552,6 +552,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                forward = false;
                reply = true;
                target_metric = 0;
+
+               if (SN_GT(target_sn, ifmsh->sn))
+                       ifmsh->sn = target_sn;
+
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
index c16b4da..000dd7e 100644 (file)
@@ -1021,6 +1021,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
         */
 
        if (sdata->reserved_chanctx) {
+               struct ieee80211_supported_band *sband = NULL;
+               struct sta_info *mgd_sta = NULL;
+               enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
                /*
                 * with multi-vif csa driver may call ieee80211_csa_finish()
                 * many times while waiting for other interfaces to use their
@@ -1029,6 +1033,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                if (sdata->reserved_ready)
                        goto out;
 
+               if (sdata->vif.bss_conf.chandef.width !=
+                   sdata->csa_chandef.width) {
+                       /*
+                        * For managed interface, we need to also update the AP
+                        * station bandwidth and align the rate scale algorithm
+                        * on the bandwidth change. Here we only consider the
+                        * bandwidth of the new channel definition (as channel
+                        * switch flow does not have the full HT/VHT/HE
+                        * information), assuming that if additional changes are
+                        * required they would be done as part of the processing
+                        * of the next beacon from the AP.
+                        */
+                       switch (sdata->csa_chandef.width) {
+                       case NL80211_CHAN_WIDTH_20_NOHT:
+                       case NL80211_CHAN_WIDTH_20:
+                       default:
+                               bw = IEEE80211_STA_RX_BW_20;
+                               break;
+                       case NL80211_CHAN_WIDTH_40:
+                               bw = IEEE80211_STA_RX_BW_40;
+                               break;
+                       case NL80211_CHAN_WIDTH_80:
+                               bw = IEEE80211_STA_RX_BW_80;
+                               break;
+                       case NL80211_CHAN_WIDTH_80P80:
+                       case NL80211_CHAN_WIDTH_160:
+                               bw = IEEE80211_STA_RX_BW_160;
+                               break;
+                       }
+
+                       mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+                       sband =
+                               local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+               }
+
+               if (sdata->vif.bss_conf.chandef.width >
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                ret = ieee80211_vif_use_reserved_context(sdata);
                if (ret) {
                        sdata_info(sdata,
@@ -1039,6 +1085,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                        goto out;
                }
 
+               if (sdata->vif.bss_conf.chandef.width <
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                goto out;
        }
 
index 3f33ec4..9f4ec16 100644 (file)
@@ -787,7 +787,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
 {
        u32 addr_len;
 
-       if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+       if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+           info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
                addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
                if (addr_len != sizeof(struct in_addr) &&
                    addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
index 0a13d55..e4b9dd9 100644 (file)
@@ -7687,6 +7687,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
                if (settings->n_ciphers_pairwise > cipher_limit)
                        return -EINVAL;
 
+               if (len > sizeof(u32) * NL80211_MAX_NR_CIPHER_SUITES)
+                       return -EINVAL;
+
                memcpy(settings->ciphers_pairwise, data, len);
 
                for (i = 0; i < settings->n_ciphers_pairwise; i++)
index 85c12c7..6ccaaa3 100644 (file)
@@ -498,7 +498,7 @@ static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev,
        if (!buf)
                return -ENOMEM;
 
-       if (ies_len) {
+       if (ies_len && ies) {
                static const u8 before_extcapa[] = {
                        /* not listing IEs expected to be created by driver */
                        WLAN_EID_RSN,
index afdbc12..4d3719d 100644 (file)
@@ -591,6 +591,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
        hdr.frame_control = fc;
        hdr.duration_id = 0;
        hdr.seq_ctrl = 0;
+       eth_zero_addr(hdr.addr4);
 
        skip_header_bytes = ETH_HLEN;
        if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
@@ -1355,7 +1356,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
                                          u8 *op_class)
 {
        u8 vht_opclass;
-       u16 freq = chandef->center_freq1;
+       u32 freq = chandef->center_freq1;
 
        if (freq >= 2412 && freq <= 2472) {
                if (chandef->width > NL80211_CHAN_WIDTH_40)
index ee6037d..14ced4e 100644 (file)
@@ -1404,6 +1404,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                    (ut[i].family != prev_family))
                        return -EINVAL;
 
+               if (ut[i].mode >= XFRM_MODE_MAX)
+                       return -EINVAL;
+
                prev_family = ut[i].family;
 
                switch (ut[i].family) {
index f341539..585b594 100644 (file)
@@ -88,8 +88,10 @@ static struct device_node *get_gpio(char *name,
        }
 
        reg = of_get_property(np, "reg", NULL);
-       if (!reg)
+       if (!reg) {
+               of_node_put(np);
                return NULL;
+       }
 
        *gpioptr = *reg;
 
index 07e5abd..0a576cc 100644 (file)
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
        struct fw_device *device = fw_parent_device(unit);
        int err, rcode;
        u64 date;
-       __le32 cues[3] = {
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
-       };
+       __le32 *cues;
 
        /* check date of software used to build */
        err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
                                   &date, sizeof(u64));
        if (err < 0)
-               goto end;
+               return err;
        /*
         * firmware version 5058 or later has date later than "20070401", but
         * 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
        if (date < 0x3230303730343031LL) {
                dev_err(&unit->device,
                        "Use firmware version 5058 or later\n");
-               err = -ENOSYS;
-               goto end;
+               return -ENXIO;
        }
 
+       cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+       if (!cues)
+               return -ENOMEM;
+
+       cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+       cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+       cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
        rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
                                   device->node_id, device->generation,
                                   device->max_speed, BEBOB_ADDR_REG_REQ,
-                                  cues, sizeof(cues));
+                                  cues, 3 * sizeof(*cues));
+       kfree(cues);
        if (rcode != RCODE_COMPLETE) {
                dev_err(&unit->device,
                        "Failed to send a cue to load firmware\n");
                err = -EIO;
        }
-end:
+
        return err;
 }
 
index b5a17cb..4727f5b 100644 (file)
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
  */
 void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
 {
+       WARN_ON_ONCE(!bus->rb.area);
+
        spin_lock_irq(&bus->reg_lock);
        /* CORB set up */
        bus->corb.addr = bus->rb.addr;
@@ -377,13 +379,15 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
        /* reset controller */
        azx_reset(bus, full_reset);
 
-       /* initialize interrupts */
+       /* clear interrupts */
        azx_int_clear(bus);
-       azx_int_enable(bus);
 
        /* initialize the codec command I/O */
        snd_hdac_bus_init_cmd_io(bus);
 
+       /* enable interrupts after CORB/RIRB buffers are initialized above */
+       azx_int_enable(bus);
+
        /* program the position buffer */
        if (bus->use_posbuf && bus->posbuf.addr) {
                snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
index 56fc47b..50b216f 100644 (file)
@@ -2520,7 +2520,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
                emu->support_tlv = 1;
                return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
        case SNDRV_EMU10K1_IOCTL_INFO:
-               info = kmalloc(sizeof(*info), GFP_KERNEL);
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
                if (!info)
                        return -ENOMEM;
                snd_emu10k1_fx8010_info(emu, info);
index cabccb1..95a82e4 100644 (file)
@@ -2360,7 +2360,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
        /* AMD Raven */
        { PCI_DEVICE(0x1022, 0x15e3),
-         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+                        AZX_DCAPS_PM_RUNTIME },
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x0002),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
index d706a41..0467e5b 100644 (file)
@@ -5642,6 +5642,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
index 55db19d..93b02be 100644 (file)
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
        SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
                                3, 1, 0),
        SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
-       SOC_SINGLE("MMTLR Data Switch", 0,
-                               1, 1, 0),
+       SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+                               0, 1, 0),
        SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
        SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
 };
index d53680a..6df1586 100644 (file)
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
        struct sigmadsp_control *ctrl, void *data)
 {
        /* safeload loads up to 20 bytes in a atomic operation */
-       if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
-           sigmadsp->ops->safeload)
+       if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
                return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
                        ctrl->num_bytes);
        else
index f27464c..7954196 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
+#include <linux/acpi.h>
 
 #include "wm8804.h"
 
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
 
+#if defined(CONFIG_OF)
 static const struct of_device_id wm8804_of_match[] = {
        { .compatible = "wlf,wm8804", },
        { }
 };
 MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+       { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+       { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
 
 static struct i2c_driver wm8804_i2c_driver = {
        .driver = {
                .name = "wm8804",
                .pm = &wm8804_pm,
-               .of_match_table = wm8804_of_match,
+               .of_match_table = of_match_ptr(wm8804_of_match),
+               .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
        },
        .probe = wm8804_i2c_probe,
        .remove = wm8804_i2c_remove,
index 69951e1..b2aa6bd 100644 (file)
@@ -1514,7 +1514,7 @@ static int msm_compr_configure_dsp_for_capture(struct snd_compr_stream *cstream)
 
 static int msm_compr_map_ion_fd(struct msm_compr_audio *prtd, int fd)
 {
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
        int ret = 0;
 
@@ -1544,7 +1544,7 @@ done:
 
 static int msm_compr_unmap_ion_fd(struct msm_compr_audio *prtd)
 {
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
        int ret = 0;
 
@@ -1759,7 +1759,7 @@ static int msm_compr_playback_free(struct snd_compr_stream *cstream)
        int dir = IN, ret = 0, stream_id;
        unsigned long flags;
        uint32_t stream_index;
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
 
        pr_debug("%s\n", __func__);
index 3b53614..ef9c652 100644 (file)
@@ -199,7 +199,7 @@ static void populate_codec_list(struct msm_transcode_loopback *trans,
 static int msm_transcode_map_ion_fd(struct msm_transcode_loopback *trans,
                                    int fd)
 {
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
        int ret = 0;
 
@@ -229,7 +229,7 @@ done:
 
 static int msm_transcode_unmap_ion_fd(struct msm_transcode_loopback *trans)
 {
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
        int ret = 0;
 
@@ -359,7 +359,7 @@ static int msm_transcode_loopback_free(struct snd_compr_stream *cstream)
        struct trans_loopback_pdata *pdata = snd_soc_platform_get_drvdata(
                                                                rtd->platform);
        int ret = 0;
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
 
        mutex_lock(&trans->lock);
index 3f90347..b7f85c5 100644 (file)
@@ -7742,7 +7742,7 @@ fail_cmd:
 int q6asm_audio_map_shm_fd(struct audio_client *ac, struct ion_client **client,
                        struct ion_handle **handle, int fd)
 {
-       ion_phys_addr_t paddr;
+       ion_phys_addr_t paddr = 0;
        size_t pa_len = 0;
        int ret;
        int sz = 0;
index 6278ca1..f7947f1 100644 (file)
@@ -3879,6 +3879,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
                        continue;
                }
 
+               /* let users know there is no DAI to link */
+               if (!dai_w->priv) {
+                       dev_dbg(card->dev, "dai widget %s has no DAI\n",
+                               dai_w->name);
+                       continue;
+               }
+
                dai = dai_w->priv;
 
                /* ...find all widgets with the same stream and link them */
index bbc1a50..873f19f 100644 (file)
@@ -27,15 +27,16 @@ void arch__elf_sym_adjust(GElf_Sym *sym)
 #endif
 #endif
 
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
 int arch__choose_best_symbol(struct symbol *syma,
                             struct symbol *symb __maybe_unused)
 {
        char *sym = syma->name;
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
        /* Skip over any initial dot */
        if (*sym == '.')
                sym++;
+#endif
 
        /* Avoid "SyS" kernel syscall aliases */
        if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -46,6 +47,7 @@ int arch__choose_best_symbol(struct symbol *syma,
        return SYMBOL_A;
 }
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 /* Allow matching against dot variants */
 int arch__compare_symbol_names(const char *namea, const char *nameb)
 {
index 1b02cdc..84cb591 100644 (file)
@@ -205,14 +205,23 @@ from ctypes import *
 libpq = CDLL("libpq.so.5")
 PQconnectdb = libpq.PQconnectdb
 PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
 PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
 PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
 PQexec = libpq.PQexec
 PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
 PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
 PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
 PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
 PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
 PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644 (file)
index 0000000..4e151f1
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
index 5a60162..c7fcc84 100644 (file)
@@ -150,12 +150,6 @@ static const char * const page_flag_names[] = {
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug",
-       "/debug",
-       0,
-};
-
 /*
  * data structures
  */
index 499b881..5173a19 100644 (file)
@@ -29,8 +29,8 @@ struct slabinfo {
        int alias;
        int refs;
        int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-       int hwcache_align, object_size, objs_per_slab;
-       int sanity_checks, slab_size, store_user, trace;
+       unsigned int hwcache_align, object_size, objs_per_slab;
+       unsigned int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
        unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;