OSDN Git Service

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
authorDavid S. Miller <davem@davemloft.net>
Thu, 20 Dec 2018 23:34:30 +0000 (15:34 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 20 Dec 2018 23:34:30 +0000 (15:34 -0800)
Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2018-12-20

This series contains updates to e100, igb, ixgbe, i40e and ice drivers.

I replaced spinlocks for mutex locks to reduce the latency on CPU0 for
igb when updating the statistics.  This work was based off a patch
provided by Jan Jablonsky, which was against an older version of the igb
driver.

Jesus adjusts the receive packet buffer size from 32K to 30K when
running in QAV mode, to stay within 60K for total packet buffer size for
igb.

Vinicius adds igb kernel documentation regarding the CBS algorithm and
its implementation in the i210 family of NICs.

YueHaibing from Huawei fixed the e100 driver that was potentially
passing a NULL pointer, so use the kernel macro IS_ERR_OR_NULL()
instead.

Konstantin Khorenko fixes i40e where we were not setting up the
neigh_priv_len in our net_device, which caused the driver to read beyond
the neighbor entry allocated memory.

Miroslav Lichvar extends the PTP gettime() to read the system clock by
adding support for PTP_SYS_OFFSET_EXTENDED ioctl in i40e.

Young Xiao fixed the ice driver to only enable NAPI on q_vectors that
actually have transmit and receive rings.

Kai-Heng Feng fixes an igb issue that when placed in suspend mode, the
NIC does not wake up when a cable is plugged in.  This was due to the
driver not setting PME during runtime suspend.

Stephen Douthit enables the ixgbe driver allow DSA devices to use the
MII interface to talk to switches.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
314 files changed:
CREDITS
Documentation/core-api/xarray.rst
Documentation/media/uapi/v4l/extended-controls.rst
MAINTAINERS
Makefile
arch/alpha/kernel/setup.c
arch/alpha/mm/numa.c
arch/arm/boot/dts/arm-realview-pb1176.dts
arch/arm/boot/dts/arm-realview-pb11mp.dts
arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
arch/arm/boot/dts/bcm2837-rpi-3-b.dts
arch/arm/boot/dts/imx7d-nitrogen7.dts
arch/arm/boot/dts/imx7d-pico.dtsi
arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
arch/arm/mach-imx/cpuidle-imx6sx.c
arch/arm/mach-mmp/cputype.h
arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
arch/arm64/boot/dts/mediatek/mt7622.dtsi
arch/arm64/include/asm/memory.h
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/init.c
arch/m68k/kernel/setup_mm.c
arch/m68k/mm/motorola.c
arch/powerpc/boot/Makefile
arch/powerpc/boot/crt0.S
arch/powerpc/include/asm/perf_event.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/msi.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/dump_linuxpagetables.c
arch/powerpc/mm/init_64.c
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/platforms/pseries/papr_scm.c
arch/sh/include/asm/io.h
arch/x86/include/asm/msr-index.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
block/bio.c
block/blk-zoned.c
drivers/clk/qcom/gcc-qcs404.c
drivers/crypto/chelsio/chtls/chtls.h
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/crypto/chelsio/chtls/chtls_main.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/intel_workarounds.h
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
drivers/hid/hid-ids.h
drivers/hid/hid-ite.c
drivers/hid/hid-quirks.c
drivers/hv/Kconfig
drivers/hv/vmbus_drv.c
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/odp.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/media/Kconfig
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/common/videobuf2/videobuf2-v4l2.c
drivers/media/media-device.c
drivers/media/platform/vicodec/vicodec-core.c
drivers/media/platform/vivid/vivid-sdr-cap.c
drivers/media/platform/vivid/vivid-vbi-cap.c
drivers/media/platform/vivid/vivid-vbi-out.c
drivers/media/platform/vivid/vivid-vid-cap.c
drivers/media/platform/vivid/vivid-vid-out.c
drivers/media/platform/vsp1/vsp1_lif.c
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/mellanox/mlxsw/trap.h
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/phy/phy_device.c
drivers/net/usb/hso.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/thermal.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/marvell/mwifiex/11n.c
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/xen-netfront.c
drivers/pci/pcie/aer.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/qcom/pinctrl-sdm660.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/qla2xxx/qla_os.c
drivers/staging/media/sunxi/cedrus/Kconfig
drivers/staging/media/sunxi/cedrus/cedrus_hw.c
drivers/thermal/hisi_thermal.c
drivers/thermal/st/stm_thermal.c
drivers/tty/serial/8250/8250_port.c
drivers/uio/uio_hv_generic.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci.h
drivers/usb/serial/option.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/video/backlight/pwm_bl.c
fs/aio.c
fs/ceph/super.c
fs/ceph/super.h
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/overlayfs/dir.c
fs/overlayfs/export.c
fs/overlayfs/inode.c
fs/userfaultfd.c
include/asm-generic/fixmap.h
include/linux/filter.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/netfilter/nfnetlink.h
include/linux/t10-pi.h
include/linux/xarray.h
include/media/mpeg2-ctrls.h [new file with mode: 0644]
include/media/v4l2-ctrls.h
include/media/videobuf2-core.h
include/net/ip_tunnels.h
include/net/sock.h
include/net/tls.h
include/net/xfrm.h
include/uapi/asm-generic/Kbuild.asm
include/uapi/linux/blkzoned.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/in.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/net_tstamp.h
include/uapi/linux/netlink.h
include/uapi/linux/v4l2-controls.h
include/uapi/linux/videodev2.h
init/Kconfig
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/dma/direct.c
kernel/trace/ftrace.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_trigger.c
lib/radix-tree.c
lib/test_xarray.c
lib/xarray.c
mm/hugetlb.c
mm/memblock.c
mm/shmem.c
mm/sparse.c
net/can/raw.c
net/core/flow_dissector.c
net/core/gro_cells.c
net/core/neighbour.c
net/core/sysctl_net_core.c
net/ipv4/devinet.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/raw.c
net/ipv6/ip6_output.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/raw.c
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/status.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/rds/message.c
net/rds/rdma.c
net/rds/rds.h
net/rds/send.c
net/sched/cls_flower.c
net/sctp/ipv6.c
net/smc/af_smc.c
net/smc/smc.h
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/socket.c
net/tipc/udp_media.c
net/tls/tls_main.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/checkstack.pl
scripts/spdxcheck.py
security/integrity/ima/ima_policy.c
security/keys/keyctl_pkey.c
security/keys/trusted.c
sound/firewire/fireface/ff-protocol-ff400.c
sound/pci/hda/patch_realtek.c
tools/include/uapi/linux/netlink.h
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/main.c
tools/testing/radix-tree/regression.h
tools/testing/radix-tree/regression4.c [new file with mode: 0644]
tools/testing/selftests/bpf/bpf_flow.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/test_vxlan_fdb_changelink.sh [new file with mode: 0755]
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/virtio/linux/kernel.h
virt/kvm/coalesced_mmio.c

diff --git a/CREDITS b/CREDITS
index c927339..7d397ee 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2541,6 +2541,10 @@ S: Ormond
 S: Victoria 3163
 S: Australia
 
+N: Eric Miao
+E: eric.y.miao@gmail.com
+D: MMP support
+
 N: Pauline Middelink
 E: middelin@polyware.nl
 D: General low-level bug fixes, /proc fixes, identd support
@@ -4115,6 +4119,10 @@ S: 1507 145th Place SE #B5
 S: Bellevue, Washington 98007
 S: USA
 
+N: Haojian Zhuang
+E: haojian.zhuang@gmail.com
+D: MMP support
+
 N: Richard Zidlicky
 E: rz@linux-m68k.org, rdzidlic@geocities.com
 W: http://www.geocities.com/rdzidlic
index dbe96cb..6a6d67a 100644 (file)
@@ -187,6 +187,8 @@ Takes xa_lock internally:
  * :c:func:`xa_erase_bh`
  * :c:func:`xa_erase_irq`
  * :c:func:`xa_cmpxchg`
+ * :c:func:`xa_cmpxchg_bh`
+ * :c:func:`xa_cmpxchg_irq`
  * :c:func:`xa_store_range`
  * :c:func:`xa_alloc`
  * :c:func:`xa_alloc_bh`
@@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
 context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
 in the interrupt handler.  Some of the more common patterns have helper
 functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
-:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
+:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh`
+and :c:func:`xa_cmpxchg_irq`.
 
 Sometimes you need to protect access to the XArray with a mutex because
 that lock sits above another mutex in the locking hierarchy.  That does
index 65a1d87..027358b 100644 (file)
@@ -1505,6 +1505,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
     configuring a stateless hardware decoding pipeline for MPEG-2.
     The bitstream parameters are defined according to :ref:`mpeg2part2`.
 
+    .. note::
+
+       This compound control is not yet part of the public kernel API and
+       it is expected to change.
+
 .. c:type:: v4l2_ctrl_mpeg2_slice_params
 
 .. cssclass:: longtable
@@ -1625,6 +1630,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
     Specifies quantization matrices (as extracted from the bitstream) for the
     associated MPEG-2 slice data.
 
+    .. note::
+
+       This compound control is not yet part of the public kernel API and
+       it is expected to change.
+
 .. c:type:: v4l2_ctrl_mpeg2_quantization
 
 .. cssclass:: longtable
index 95baadb..6de660a 100644 (file)
@@ -1739,13 +1739,17 @@ ARM/Mediatek SoC support
 M:     Matthias Brugger <matthias.bgg@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+W:     https://mtk.bcnfs.org/
+C:     irc://chat.freenode.net/linux-mediatek
 S:     Maintained
 F:     arch/arm/boot/dts/mt6*
 F:     arch/arm/boot/dts/mt7*
 F:     arch/arm/boot/dts/mt8*
 F:     arch/arm/mach-mediatek/
 F:     arch/arm64/boot/dts/mediatek/
+F:     drivers/soc/mediatek/
 N:     mtk
+N:     mt[678]
 K:     mediatek
 
 ARM/Mediatek USB3 PHY DRIVER
@@ -4843,6 +4847,7 @@ F:        include/uapi/drm/vmwgfx_drm.h
 
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
+M:     Daniel Vetter <daniel@ffwll.ch>
 L:     dri-devel@lists.freedesktop.org
 T:     git git://anongit.freedesktop.org/drm/drm
 B:     https://bugs.freedesktop.org/
@@ -6902,8 +6907,10 @@ Hyper-V CORE AND DRIVERS
 M:     "K. Y. Srinivasan" <kys@microsoft.com>
 M:     Haiyang Zhang <haiyangz@microsoft.com>
 M:     Stephen Hemminger <sthemmin@microsoft.com>
+M:     Sasha Levin <sashal@kernel.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
 L:     devel@linuxdriverproject.org
-S:     Maintained
+S:     Supported
 F:     Documentation/networking/device_drivers/microsoft/netvsc.txt
 F:     arch/x86/include/asm/mshyperv.h
 F:     arch/x86/include/asm/trace/hyperv.h
@@ -8932,7 +8939,7 @@ F:        arch/mips/boot/dts/img/pistachio_marduk.dts
 
 MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M:     Vivien Didelot <vivien.didelot@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/dsa/mv88e6xxx/
@@ -9437,6 +9444,13 @@ F:       drivers/media/platform/mtk-vpu/
 F:     Documentation/devicetree/bindings/media/mediatek-vcodec.txt
 F:     Documentation/devicetree/bindings/media/mediatek-vpu.txt
 
+MEDIATEK MT76 WIRELESS LAN DRIVER
+M:     Felix Fietkau <nbd@nbd.name>
+M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+F:     drivers/net/wireless/mediatek/mt76/
+
 MEDIATEK MT7601U WIRELESS LAN DRIVER
 M:     Jakub Kicinski <kubakici@wp.pl>
 L:     linux-wireless@vger.kernel.org
@@ -10000,12 +10014,9 @@ S:     Odd Fixes
 F:     drivers/media/radio/radio-miropcm20*
 
 MMP SUPPORT
-M:     Eric Miao <eric.y.miao@gmail.com>
-M:     Haojian Zhuang <haojian.zhuang@gmail.com>
+R:     Lubomir Rintel <lkundrak@v3.sk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T:     git git://github.com/hzhuang1/linux.git
-T:     git git://git.linaro.org/people/ycmiao/pxa-linux.git
-S:     Maintained
+S:     Odd Fixes
 F:     arch/arm/boot/dts/mmp*
 F:     arch/arm/mach-mmp/
 
@@ -10411,7 +10422,7 @@ F:      drivers/net/wireless/
 
 NETWORKING [DSA]
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M:     Vivien Didelot <vivien.didelot@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/dsa/
index f2c3423..d45856f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 20
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
@@ -962,11 +962,6 @@ ifdef CONFIG_STACK_VALIDATION
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    ifdef CONFIG_UNWINDER_ORC
-      $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    else
-      $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    endif
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
@@ -1125,6 +1120,14 @@ uapi-asm-generic:
 
 PHONY += prepare-objtool
 prepare-objtool: $(objtool_target)
+ifeq ($(SKIP_STACK_VALIDATION),1)
+ifdef CONFIG_UNWINDER_ORC
+       @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+       @false
+else
+       @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+endif
+endif
 
 # Generate some files
 # ---------------------------------------------------------------------------
index a37fd99..4b5b1b2 100644 (file)
@@ -634,6 +634,7 @@ setup_arch(char **cmdline_p)
 
        /* Find our memory.  */
        setup_memory(kernel_end);
+       memblock_set_bottom_up(true);
 
        /* First guess at cpu cache sizes.  Do this before init_arch.  */
        determine_cpu_caches(cpu->type);
index 7484655..d0b7337 100644 (file)
@@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end)
        if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
                panic("kernel loaded out of ram");
 
+       memblock_add(PFN_PHYS(node_min_pfn),
+                    (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
+
        /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
           Note that we round this down, not up - node memory
           has much larger alignment than 8Mb, so it's safe. */
        node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
 
-       memblock_add(PFN_PHYS(node_min_pfn),
-                    (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
-
        NODE_DATA(nid)->node_start_pfn = node_min_pfn;
        NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
 
index f2a1d25..83e0fbc 100644 (file)
@@ -45,7 +45,7 @@
        };
 
        /* The voltage to the MMC card is hardwired at 3.3V */
-       vmmc: fixedregulator@0 {
+       vmmc: regulator-vmmc {
                compatible = "regulator-fixed";
                regulator-name = "vmmc";
                regulator-min-microvolt = <3300000>;
@@ -53,7 +53,7 @@
                regulator-boot-on;
         };
 
-       veth: fixedregulator@0 {
+       veth: regulator-veth {
                compatible = "regulator-fixed";
                regulator-name = "veth";
                regulator-min-microvolt = <3300000>;
index 7f9cbdf..2f6aa24 100644 (file)
        };
 
        /* The voltage to the MMC card is hardwired at 3.3V */
-       vmmc: fixedregulator@0 {
+       vmmc: regulator-vmmc {
                compatible = "regulator-fixed";
                regulator-name = "vmmc";
                regulator-min-microvolt = <3300000>;
                regulator-boot-on;
         };
 
-       veth: fixedregulator@0 {
+       veth: regulator-veth {
                compatible = "regulator-fixed";
                regulator-name = "veth";
                regulator-min-microvolt = <3300000>;
index 4adb85e..9376224 100644 (file)
@@ -31,7 +31,7 @@
 
        wifi_pwrseq: wifi-pwrseq {
                compatible = "mmc-pwrseq-simple";
-               reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
        };
 };
 
index c318bcb..89e6fd5 100644 (file)
@@ -26,7 +26,7 @@
 
        wifi_pwrseq: wifi-pwrseq {
                compatible = "mmc-pwrseq-simple";
-               reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
        };
 };
 
index d8aac4a..177d21f 100644 (file)
                compatible = "regulator-fixed";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
-               clock-names = "slow";
                regulator-name = "reg_wlan";
                startup-delay-us = <70000>;
                gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
                enable-active-high;
        };
+
+       usdhc2_pwrseq: usdhc2_pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+               clock-names = "ext_clock";
+       };
 };
 
 &adc1 {
        bus-width = <4>;
        non-removable;
        vmmc-supply = <&reg_wlan>;
+       mmc-pwrseq = <&usdhc2_pwrseq>;
        cap-power-off-card;
        keep-power-in-suspend;
        status = "okay";
index 21973eb..f27b384 100644 (file)
                regulator-min-microvolt = <1800000>;
                regulator-max-microvolt = <1800000>;
        };
+
+       usdhc2_pwrseq: usdhc2_pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+               clock-names = "ext_clock";
+       };
+};
+
+&clks {
+       assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+                         <&clks IMX7D_CLKO2_ROOT_DIV>;
+       assigned-clock-parents = <&clks IMX7D_CKIL>;
+       assigned-clock-rates = <0>, <32768>;
 };
 
 &i2c4 {
 
 &usdhc2 { /* Wifi SDIO */
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_usdhc2>;
+       pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
        no-1-8-v;
        non-removable;
        keep-power-in-suspend;
        wakeup-source;
        vmmc-supply = <&reg_ap6212>;
+       mmc-pwrseq = <&usdhc2_pwrseq>;
        status = "okay";
 };
 
 };
 
 &iomuxc_lpsr {
+       pinctrl_wifi_clk: wificlkgrp {
+               fsl,pins = <
+                       MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2     0x7d
+               >;
+       };
+
        pinctrl_wdog: wdoggrp {
                fsl,pins = <
                        MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B  0x74
index 742d294..583a5a0 100644 (file)
 
 &reg_dldo3 {
        regulator-always-on;
-       regulator-min-microvolt = <2500000>;
-       regulator-max-microvolt = <2500000>;
+       regulator-min-microvolt = <3300000>;
+       regulator-max-microvolt = <3300000>;
        regulator-name = "vcc-pd";
 };
 
index 243a108..fd0053e 100644 (file)
@@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
         * except for power up sw2iso which need to be
         * larger than LDO ramp up time.
         */
-       imx_gpc_set_arm_power_up_timing(2, 1);
+       imx_gpc_set_arm_power_up_timing(0xf, 1);
        imx_gpc_set_arm_power_down_timing(1, 1);
 
        return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
index 446edae..a96abcf 100644 (file)
@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
 #define cpu_is_pxa910()        (0)
 #endif
 
-#ifdef CONFIG_CPU_MMP2
+#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
 static inline int cpu_is_mmp2(void)
 {
-       return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+       return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
+               (((mmp_chip_id & 0xfff) == 0x410) ||
+                ((mmp_chip_id & 0xfff) == 0x610));
 }
 #else
 #define cpu_is_mmp2()  (0)
index 64632c8..01ea662 100644 (file)
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x000>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu1: cpu@1 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x001>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu2: cpu@100 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x100>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu3: cpu@101 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x101>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
        };
 };
index 073610a..7d94c1f 100644 (file)
                method = "smc";
        };
 
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               idle_states {
-                       entry_method = "arm,pcsi";
-
-                       CPU_SLEEP_0: cpu-sleep-0 {
-                               compatible = "arm,idle-state";
-                               local-timer-stop;
-                               arm,psci-suspend-param = <0x0010000>;
-                               entry-latency-us = <80>;
-                               exit-latency-us  = <160>;
-                               min-residency-us = <320>;
-                       };
-
-                       CLUSTER_SLEEP_0: cluster-sleep-0 {
-                               compatible = "arm,idle-state";
-                               local-timer-stop;
-                               arm,psci-suspend-param = <0x1010000>;
-                               entry-latency-us = <500>;
-                               exit-latency-us = <1000>;
-                               min-residency-us = <2500>;
-                       };
-               };
-       };
-
        ap806 {
                #address-cells = <2>;
                #size-cells = <2>;
index 5d6005c..710c5c3 100644 (file)
        model = "Bananapi BPI-R64";
        compatible = "bananapi,bpi-r64", "mediatek,mt7622";
 
+       aliases {
+               serial0 = &uart0;
+       };
+
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+               stdout-path = "serial0:115200n8";
+               bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
        };
 
        cpus {
index dcad086..3f78334 100644 (file)
        model = "MediaTek MT7622 RFB1 board";
        compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
 
+       aliases {
+               serial0 = &uart0;
+       };
+
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+               stdout-path = "serial0:115200n8";
+               bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
        };
 
        cpus {
index fe0c875..14a1028 100644 (file)
                #reset-cells = <1>;
        };
 
-       timer: timer@10004000 {
-               compatible = "mediatek,mt7622-timer",
-                            "mediatek,mt6577-timer";
-               reg = <0 0x10004000 0 0x80>;
-               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>;
-               clocks = <&infracfg CLK_INFRA_APXGPT_PD>,
-                        <&topckgen CLK_TOP_RTC>;
-               clock-names = "system-clk", "rtc-clk";
-       };
-
        scpsys: scpsys@10006000 {
                compatible = "mediatek,mt7622-scpsys",
                             "syscon";
index ee20fc6..932c60e 100644 (file)
 #define PCI_IO_SIZE            SZ_16M
 
 /*
- * Log2 of the upper bound of the size of a struct page. Used for sizing
- * the vmemmap region only, does not affect actual memory footprint.
- * We don't use sizeof(struct page) directly since taking its size here
- * requires its definition to be available at this point in the inclusion
- * chain, and it may not be a power of 2 in the first place.
- */
-#define STRUCT_PAGE_MAX_SHIFT  6
-
-/*
  * VMEMMAP_SIZE - allows the whole linear region to be covered by
  *                a struct page array
  */
index a3ac262..a537044 100644 (file)
@@ -429,9 +429,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                                   prot,
                                                   __builtin_return_address(0));
                if (addr) {
-                       memset(addr, 0, size);
                        if (!coherent)
                                __dma_flush_area(page_to_virt(page), iosize);
+                       memset(addr, 0, size);
                } else {
                        iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
                        dma_release_from_contiguous(dev, page,
index 9b432d9..0340e45 100644 (file)
@@ -610,14 +610,6 @@ void __init mem_init(void)
        BUILD_BUG_ON(TASK_SIZE_32                       > TASK_SIZE_64);
 #endif
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-       /*
-        * Make sure we chose the upper bound of sizeof(struct page)
-        * correctly when sizing the VMEMMAP array.
-        */
-       BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
-#endif
-
        if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
                extern int sysctl_overcommit_memory;
                /*
index a1a3eae..ad0195c 100644 (file)
@@ -164,8 +164,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
                                        be32_to_cpu(m->addr);
                                m68k_memory[m68k_num_memory].size =
                                        be32_to_cpu(m->size);
-                               memblock_add(m68k_memory[m68k_num_memory].addr,
-                                            m68k_memory[m68k_num_memory].size);
                                m68k_num_memory++;
                        } else
                                pr_warn("%s: too many memory chunks\n",
index 7497cf3..3f3d0bf 100644 (file)
@@ -228,6 +228,7 @@ void __init paging_init(void)
 
        min_addr = m68k_memory[0].addr;
        max_addr = min_addr + m68k_memory[0].size;
+       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
        for (i = 1; i < m68k_num_memory;) {
                if (m68k_memory[i].addr < min_addr) {
                        printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
@@ -238,6 +239,7 @@ void __init paging_init(void)
                                (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
                        continue;
                }
+               memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
                addr = m68k_memory[i].addr + m68k_memory[i].size;
                if (addr > max_addr)
                        max_addr = addr;
index 3935436..ed98831 100644 (file)
@@ -197,7 +197,7 @@ $(obj)/empty.c:
 $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
        $(Q)cp $< $@
 
-$(obj)/serial.c: $(obj)/autoconf.h
+$(srctree)/$(src)/serial.c: $(obj)/autoconf.h
 
 $(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
        $(Q)cp $< $@
index 32dfe6d..9b9d174 100644 (file)
@@ -15,7 +15,7 @@
 RELA = 7
 RELACOUNT = 0x6ffffff9
 
-       .text
+       .data
        /* A procedure descriptor used when booting this as a COFF file.
         * When making COFF, this comes first in the link and we're
         * linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
        .globl  _zimage_start_opd
 _zimage_start_opd:
        .long   0x500000, 0, 0, 0
+       .text
+       b       _zimage_start
 
 #ifdef __powerpc64__
 .balign 8
index 8bf1b63..16a4981 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/ptrace.h>
 #include <asm/reg.h>
 
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
+
 /*
  * Overload regs->result to specify whether we should use the MSR (result
  * is zero) or the SIAR (result is non zero).
index a658091..3712152 100644 (file)
@@ -1,7 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-generic-y += bpf_perf_event.h
 generic-y += param.h
 generic-y += poll.h
 generic-y += resource.h
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 33b34a5..5b9dce1 100644 (file)
@@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
 
        /* Now find out if one of these is out firmware console */
        path = of_get_property(of_chosen, "linux,stdout-path", NULL);
+       if (path == NULL)
+               path = of_get_property(of_chosen, "stdout-path", NULL);
        if (path != NULL) {
                stdout = of_find_node_by_path(path);
                if (stdout)
@@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
        /* We are getting a weird phandle from OF ... */
        /* ... So use the full path instead */
        name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+       if (name == NULL)
+               name = of_get_property(of_chosen, "stdout-path", NULL);
        if (name == NULL) {
-               DBG(" no linux,stdout-path !\n");
+               DBG(" no stdout-path !\n");
                return -ENODEV;
        }
        prom_stdout = of_find_node_by_path(name);
index dab616a..f219765 100644 (file)
@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
 {
        struct pci_controller *phb = pci_bus_to_host(dev->bus);
 
-       phb->controller_ops.teardown_msi_irqs(dev);
+       /*
+        * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
+        * so check the pointer again.
+        */
+       if (phb->controller_ops.teardown_msi_irqs)
+               phb->controller_ops.teardown_msi_irqs(dev);
 }
index afb819f..714c348 100644 (file)
@@ -3266,12 +3266,17 @@ long do_syscall_trace_enter(struct pt_regs *regs)
        user_exit();
 
        if (test_thread_flag(TIF_SYSCALL_EMU)) {
-               ptrace_report_syscall(regs);
                /*
+                * A nonzero return code from tracehook_report_syscall_entry()
+                * tells us to prevent the syscall execution, but we are not
+                * going to execute it anyway.
+                *
                 * Returning -1 will skip the syscall execution. We want to
                 * avoid clobbering any register also, thus, not 'gotoing'
                 * skip label.
                 */
+               if (tracehook_report_syscall_entry(regs))
+                       ;
                return -1;
        }
 
index 2b74f8a..6aa4166 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/hugetlb.h>
 #include <linux/io.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <asm/fixmap.h>
index 7a9886f..a5091c0 100644 (file)
@@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
        pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 
        for (; start < end; start += page_size) {
-               void *p;
+               void *p = NULL;
                int rc;
 
                if (vmemmap_populated(start, page_size))
                        continue;
 
+               /*
+                * Allocate from the altmap first if we have one. This may
+                * fail due to alignment issues when using 16MB hugepages, so
+                * fall back to system memory if the altmap allocation fail.
+                */
                if (altmap)
                        p = altmap_alloc_block_buf(page_size, altmap);
-               else
+               if (!p)
                        p = vmemmap_alloc_block_buf(page_size, node);
                if (!p)
                        return -ENOMEM;
@@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
 {
        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
        unsigned long page_order = get_order(page_size);
+       unsigned long alt_start = ~0, alt_end = ~0;
+       unsigned long base_pfn;
 
        start = _ALIGN_DOWN(start, page_size);
+       if (altmap) {
+               alt_start = altmap->base_pfn;
+               alt_end = altmap->base_pfn + altmap->reserve +
+                         altmap->free + altmap->alloc + altmap->align;
+       }
 
        pr_debug("vmemmap_free %lx...%lx\n", start, end);
 
@@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
                page = pfn_to_page(addr >> PAGE_SHIFT);
                section_base = pfn_to_page(vmemmap_section_start(start));
                nr_pages = 1 << page_order;
+               base_pfn = PHYS_PFN(addr);
 
-               if (altmap) {
+               if (base_pfn >= alt_start && base_pfn < alt_end) {
                        vmem_altmap_free(altmap, nr_pages);
                } else if (PageReserved(page)) {
                        /* allocated from bootmem */
index 2e4bd32..472b784 100644 (file)
@@ -140,8 +140,7 @@ config IBMEBUS
          Bus device driver for GX bus based adapters.
 
 config PAPR_SCM
-       depends on PPC_PSERIES && MEMORY_HOTPLUG
-       select LIBNVDIMM
+       depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
        tristate "Support for the PAPR Storage Class Memory interface"
        help
          Enable access to hypervisor provided storage class memory.
index ee9372b..7d6457a 100644 (file)
@@ -55,7 +55,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
        do {
                rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
                                p->blocks, BIND_ANY_ADDR, token);
-               token = be64_to_cpu(ret[0]);
+               token = ret[0];
                cond_resched();
        } while (rc == H_BUSY);
 
@@ -64,7 +64,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
                return -ENXIO;
        }
 
-       p->bound_addr = be64_to_cpu(ret[1]);
+       p->bound_addr = ret[1];
 
        dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
 
@@ -82,7 +82,7 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
        do {
                rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
                                p->bound_addr, p->blocks, token);
-               token = be64_to_cpu(ret);
+               token = ret[0];
                cond_resched();
        } while (rc == H_BUSY);
 
@@ -223,6 +223,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
                goto err;
        }
 
+       if (nvdimm_bus_check_dimm_count(p->bus, 1))
+               goto err;
+
        /* now add the region */
 
        memset(&mapping, 0, sizeof(mapping));
@@ -257,9 +260,12 @@ err:       nvdimm_bus_unregister(p->bus);
 
 static int papr_scm_probe(struct platform_device *pdev)
 {
-       uint32_t drc_index, metadata_size, unit_cap[2];
        struct device_node *dn = pdev->dev.of_node;
+       u32 drc_index, metadata_size;
+       u64 blocks, block_size;
        struct papr_scm_priv *p;
+       const char *uuid_str;
+       u64 uuid[2];
        int rc;
 
        /* check we have all the required DT properties */
@@ -268,8 +274,18 @@ static int papr_scm_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) {
-               dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn);
+       if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
+               dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
+               return -ENODEV;
+       }
+
+       if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
+               dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
+               return -ENODEV;
+       }
+
+       if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
+               dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
                return -ENODEV;
        }
 
@@ -282,8 +298,13 @@ static int papr_scm_probe(struct platform_device *pdev)
 
        p->dn = dn;
        p->drc_index = drc_index;
-       p->block_size = unit_cap[0];
-       p->blocks     = unit_cap[1];
+       p->block_size = block_size;
+       p->blocks = blocks;
+
+       /* We just need to ensure that set cookies are unique across */
+       uuid_parse(uuid_str, (uuid_t *) uuid);
+       p->nd_set.cookie1 = uuid[0];
+       p->nd_set.cookie2 = uuid[1];
 
        /* might be zero */
        p->metadata_size = metadata_size;
@@ -296,7 +317,7 @@ static int papr_scm_probe(struct platform_device *pdev)
 
        /* setup the resource for the newly bound range */
        p->res.start = p->bound_addr;
-       p->res.end   = p->bound_addr + p->blocks * p->block_size;
+       p->res.end   = p->bound_addr + p->blocks * p->block_size - 1;
        p->res.name  = pdev->name;
        p->res.flags = IORESOURCE_MEM;
 
index 98cb8c8..4f7f235 100644 (file)
@@ -24,6 +24,7 @@
 #define __IO_PREFIX     generic
 #include <asm/io_generic.h>
 #include <asm/io_trapped.h>
+#include <asm-generic/pci_iomap.h>
 #include <mach/mangle-port.h>
 
 #define __raw_writeb(v,a)      (__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
index c8f73ef..9e39cc8 100644 (file)
 #define MSR_F15H_NB_PERF_CTR           0xc0010241
 #define MSR_F15H_PTSC                  0xc0010280
 #define MSR_F15H_IC_CFG                        0xc0011021
+#define MSR_F15H_EX_CFG                        0xc001102c
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
index 02edd99..8d5d984 100644 (file)
@@ -11985,6 +11985,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                        kunmap(vmx->nested.pi_desc_page);
                        kvm_release_page_dirty(vmx->nested.pi_desc_page);
                        vmx->nested.pi_desc_page = NULL;
+                       vmx->nested.pi_desc = NULL;
+                       vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
                }
                page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
                if (is_error_page(page))
index d029377..f049ecf 100644 (file)
@@ -2426,6 +2426,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_PATCH_LOADER:
        case MSR_AMD64_BU_CFG2:
        case MSR_AMD64_DC_CFG:
+       case MSR_F15H_EX_CFG:
                break;
 
        case MSR_IA32_UCODE_REV:
@@ -2721,6 +2722,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_BU_CFG2:
        case MSR_IA32_PERF_CTL:
        case MSR_AMD64_DC_CFG:
+       case MSR_F15H_EX_CFG:
                msr_info->data = 0;
                break;
        case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -7446,7 +7448,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
-       if (!kvm_apic_hw_enabled(vcpu->arch.apic))
+       if (!kvm_apic_present(vcpu))
                return;
 
        bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
index 4f4d988..4d86e90 100644 (file)
@@ -1261,7 +1261,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                if (ret)
                        goto cleanup;
        } else {
-               zero_fill_bio(bio);
+               if (bmd->is_our_pages)
+                       zero_fill_bio(bio);
                iov_iter_advance(iter, bio->bi_iter.bi_size);
        }
 
index 13ba201..a327bef 100644 (file)
@@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
        struct page *page;
        int order;
 
-       for (order = get_order(size); order > 0; order--) {
+       for (order = get_order(size); order >= 0; order--) {
                page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
                if (page) {
                        *nr_zones = min_t(unsigned int, *nr_zones,
index ef1b267..64da032 100644 (file)
@@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = {
                .hw.init = &(struct clk_init_data){
                        .name = "gpll0_out_main",
                        .parent_names = (const char *[])
-                                       { "gpll0_sleep_clk_src" },
+                                       { "cxo" },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_ops,
                },
index 7725b6e..59bb67d 100644 (file)
@@ -153,6 +153,11 @@ struct chtls_dev {
        unsigned int cdev_state;
 };
 
+struct chtls_listen {
+       struct chtls_dev *cdev;
+       struct sock *sk;
+};
+
 struct chtls_hws {
        struct sk_buff_head sk_recv_queue;
        u8 txqid;
@@ -215,6 +220,8 @@ struct chtls_sock {
        u16 resv2;
        u32 delack_mode;
        u32 delack_seq;
+       u32 snd_win;
+       u32 rcv_win;
 
        void *passive_reap_next;        /* placeholder for passive */
        struct chtls_hws tlshws;
index 228b91b..59b7529 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kallsyms.h>
 #include <linux/kprobes.h>
 #include <linux/if_vlan.h>
+#include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/dst.h>
 
@@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
        return mtu_idx;
 }
 
-static unsigned int select_rcv_wnd(struct chtls_sock *csk)
-{
-       unsigned int rcvwnd;
-       unsigned int wnd;
-       struct sock *sk;
-
-       sk = csk->sk;
-       wnd = tcp_full_space(sk);
-
-       if (wnd < MIN_RCV_WND)
-               wnd = MIN_RCV_WND;
-
-       rcvwnd = MAX_RCV_WND;
-
-       csk_set_flag(csk, CSK_UPDATE_RCV_WND);
-       return min(wnd, rcvwnd);
-}
-
 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
 {
        int wscale = 0;
@@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
        csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
                                        req);
        opt0 = TCAM_BYPASS_F |
-              WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
+              WND_SCALE_V(RCV_WSCALE(tp)) |
               MSS_IDX_V(csk->mtu_idx) |
               L2T_IDX_V(csk->l2t_entry->idx) |
               NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
+static void chtls_set_tcp_window(struct chtls_sock *csk)
+{
+       struct net_device *ndev = csk->egress_dev;
+       struct port_info *pi = netdev_priv(ndev);
+       unsigned int linkspeed;
+       u8 scale;
+
+       linkspeed = pi->link_cfg.speed;
+       scale = linkspeed / SPEED_10000;
+#define CHTLS_10G_RCVWIN (256 * 1024)
+       csk->rcv_win = CHTLS_10G_RCVWIN;
+       if (scale)
+               csk->rcv_win *= scale;
+#define CHTLS_10G_SNDWIN (256 * 1024)
+       csk->snd_win = CHTLS_10G_SNDWIN;
+       if (scale)
+               csk->snd_win *= scale;
+}
+
 static struct sock *chtls_recv_sock(struct sock *lsk,
                                    struct request_sock *oreq,
                                    void *network_hdr,
@@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        csk->port_id = port_id;
        csk->egress_dev = ndev;
        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+       chtls_set_tcp_window(csk);
+       tp->rcv_wnd = csk->rcv_win;
+       csk->sndbuf = csk->snd_win;
        csk->ulp_mode = ULP_MODE_TLS;
        step = cdev->lldi->nrxq / cdev->lldi->nchan;
        csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@@ -1075,9 +1080,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
                        port_id * step;
        csk->sndbuf = newsk->sk_sndbuf;
        csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
-       tp->rcv_wnd = select_rcv_wnd(csk);
        RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
-                                          WSCALE_OK(tp),
+                                          sock_net(newsk)->
+                                               ipv4.sysctl_tcp_window_scaling,
                                           tp->window_clamp);
        neigh_release(n);
        inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1129,6 +1134,7 @@ static void chtls_pass_accept_request(struct sock *sk,
        struct cpl_t5_pass_accept_rpl *rpl;
        struct cpl_pass_accept_req *req;
        struct listen_ctx *listen_ctx;
+       struct vlan_ethhdr *vlan_eh;
        struct request_sock *oreq;
        struct sk_buff *reply_skb;
        struct chtls_sock *csk;
@@ -1141,6 +1147,10 @@ static void chtls_pass_accept_request(struct sock *sk,
        unsigned int stid;
        unsigned int len;
        unsigned int tid;
+       bool th_ecn, ect;
+       __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+       u16 eth_hdr_len;
+       bool ecn_ok;
 
        req = cplhdr(skb) + RSS_HDR;
        tid = GET_TID(req);
@@ -1179,24 +1189,40 @@ static void chtls_pass_accept_request(struct sock *sk,
        oreq->mss = 0;
        oreq->ts_recent = 0;
 
-       eh = (struct ethhdr *)(req + 1);
-       iph = (struct iphdr *)(eh + 1);
+       eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
+       if (eth_hdr_len == ETH_HLEN) {
+               eh = (struct ethhdr *)(req + 1);
+               iph = (struct iphdr *)(eh + 1);
+               network_hdr = (void *)(eh + 1);
+       } else {
+               vlan_eh = (struct vlan_ethhdr *)(req + 1);
+               iph = (struct iphdr *)(vlan_eh + 1);
+               network_hdr = (void *)(vlan_eh + 1);
+       }
        if (iph->version != 0x4)
                goto free_oreq;
 
-       network_hdr = (void *)(eh + 1);
        tcph = (struct tcphdr *)(iph + 1);
+       skb_set_network_header(skb, (void *)iph - (void *)req);
 
        tcp_rsk(oreq)->tfo_listener = false;
        tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
        chtls_set_req_port(oreq, tcph->source, tcph->dest);
-       inet_rsk(oreq)->ecn_ok = 0;
        chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
-       if (req->tcpopt.wsf <= 14) {
+       ip_dsfield = ipv4_get_dsfield(iph);
+       if (req->tcpopt.wsf <= 14 &&
+           sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
                inet_rsk(oreq)->wscale_ok = 1;
                inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
        }
        inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
+       th_ecn = tcph->ece && tcph->cwr;
+       if (th_ecn) {
+               ect = !INET_ECN_is_not_ect(ip_dsfield);
+               ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+               if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
+                       inet_rsk(oreq)->ecn_ok = 1;
+       }
 
        newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
        if (!newsk)
index afebbd8..18f553f 100644 (file)
@@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
 
        req_wr->lsodisable_to_flags =
                        htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
-                             FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
+                             TX_URG_V(skb_urgent(skb)) |
                              T6_TX_FORCE_F | wr_ulp_mode_force |
                              TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
                                         skb_queue_empty(&csk->txq)));
@@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
                                FW_OFLD_TX_DATA_WR_SHOVE_F);
 
        req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
-                       FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
-                       FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
-                                       (sk, CSK_TX_MORE_DATA)) &&
-                                        skb_queue_empty(&csk->txq)));
+                       TX_URG_V(skb_urgent(skb)) |
+                       TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+                                  skb_queue_empty(&csk->txq)));
        req->plen = htonl(len);
 }
 
@@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        int mss, flags, err;
        int recordsz = 0;
        int copied = 0;
-       int hdrlen = 0;
        long timeo;
 
        lock_sock(sk);
@@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
                        recordsz = tls_header_read(&hdr, &msg->msg_iter);
                        size -= TLS_HEADER_LENGTH;
-                       hdrlen += TLS_HEADER_LENGTH;
+                       copied += TLS_HEADER_LENGTH;
                        csk->tlshws.txleft = recordsz;
                        csk->tlshws.type = hdr.type;
                        if (skb)
@@ -1083,10 +1081,8 @@ new_buf:
                        int off = TCP_OFF(sk);
                        bool merge;
 
-                       if (!page)
-                               goto wait_for_memory;
-
-                       pg_size <<= compound_order(page);
+                       if (page)
+                               pg_size <<= compound_order(page);
                        if (off < pg_size &&
                            skb_can_coalesce(skb, i, page, off)) {
                                merge = 1;
@@ -1187,7 +1183,7 @@ out:
                chtls_tcp_push(sk, flags);
 done:
        release_sock(sk);
-       return copied + hdrlen;
+       return copied;
 do_fault:
        if (!skb->len) {
                __skb_unlink(skb, &csk->txq);
index f472c51..563f8fe 100644 (file)
@@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
 static int listen_notify_handler(struct notifier_block *this,
                                 unsigned long event, void *data)
 {
-       struct chtls_dev *cdev;
-       struct sock *sk;
-       int ret;
+       struct chtls_listen *clisten;
+       int ret = NOTIFY_DONE;
 
-       sk = data;
-       ret =  NOTIFY_DONE;
+       clisten = (struct chtls_listen *)data;
 
        switch (event) {
        case CHTLS_LISTEN_START:
+               ret = chtls_listen_start(clisten->cdev, clisten->sk);
+               kfree(clisten);
+               break;
        case CHTLS_LISTEN_STOP:
-               mutex_lock(&cdev_list_lock);
-               list_for_each_entry(cdev, &cdev_list, list) {
-                       if (event == CHTLS_LISTEN_START)
-                               ret = chtls_listen_start(cdev, sk);
-                       else
-                               chtls_listen_stop(cdev, sk);
-               }
-               mutex_unlock(&cdev_list_lock);
+               chtls_listen_stop(clisten->cdev, clisten->sk);
+               kfree(clisten);
                break;
        }
        return ret;
@@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
-static int chtls_start_listen(struct sock *sk)
+static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
 {
+       struct chtls_listen *clisten;
        int err;
 
        if (sk->sk_protocol != IPPROTO_TCP)
@@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
                return -EADDRNOTAVAIL;
 
        sk->sk_backlog_rcv = listen_backlog_rcv;
+       clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+       if (!clisten)
+               return -ENOMEM;
+       clisten->cdev = cdev;
+       clisten->sk = sk;
        mutex_lock(&notify_mutex);
        err = raw_notifier_call_chain(&listen_notify_list,
-                                     CHTLS_LISTEN_START, sk);
+                                     CHTLS_LISTEN_START, clisten);
        mutex_unlock(&notify_mutex);
        return err;
 }
 
-static void chtls_stop_listen(struct sock *sk)
+static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
 {
+       struct chtls_listen *clisten;
+
        if (sk->sk_protocol != IPPROTO_TCP)
                return;
 
+       clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+       if (!clisten)
+               return;
+       clisten->cdev = cdev;
+       clisten->sk = sk;
        mutex_lock(&notify_mutex);
        raw_notifier_call_chain(&listen_notify_list,
-                               CHTLS_LISTEN_STOP, sk);
+                               CHTLS_LISTEN_STOP, clisten);
        mutex_unlock(&notify_mutex);
 }
 
@@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
 
 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
 {
+       struct chtls_dev *cdev = to_chtls_dev(dev);
+
        if (sk->sk_state == TCP_LISTEN)
-               return chtls_start_listen(sk);
+               return chtls_start_listen(cdev, sk);
        return 0;
 }
 
 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
 {
+       struct chtls_dev *cdev = to_chtls_dev(dev);
+
        if (sk->sk_state == TCP_LISTEN)
-               chtls_stop_listen(sk);
+               chtls_stop_listen(cdev, sk);
+}
+
+static void chtls_free_uld(struct chtls_dev *cdev)
+{
+       int i;
+
+       tls_unregister_device(&cdev->tlsdev);
+       kvfree(cdev->kmap.addr);
+       idr_destroy(&cdev->hwtid_idr);
+       for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
+               kfree_skb(cdev->rspq_skb_cache[i]);
+       kfree(cdev->lldi);
+       kfree_skb(cdev->askb);
+       kfree(cdev);
+}
+
+static inline void chtls_dev_release(struct kref *kref)
+{
+       struct chtls_dev *cdev;
+       struct tls_device *dev;
+
+       dev = container_of(kref, struct tls_device, kref);
+       cdev = to_chtls_dev(dev);
+       chtls_free_uld(cdev);
 }
 
 static void chtls_register_dev(struct chtls_dev *cdev)
@@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
        tlsdev->feature = chtls_inline_feature;
        tlsdev->hash = chtls_create_hash;
        tlsdev->unhash = chtls_destroy_hash;
-       tls_register_device(&cdev->tlsdev);
+       tlsdev->release = chtls_dev_release;
+       kref_init(&tlsdev->kref);
+       tls_register_device(tlsdev);
        cdev->cdev_state = CHTLS_CDEV_STATE_UP;
 }
 
-static void chtls_unregister_dev(struct chtls_dev *cdev)
-{
-       tls_unregister_device(&cdev->tlsdev);
-}
-
 static void process_deferq(struct work_struct *task_param)
 {
        struct chtls_dev *cdev = container_of(task_param,
@@ -262,28 +295,16 @@ out:
        return NULL;
 }
 
-static void chtls_free_uld(struct chtls_dev *cdev)
-{
-       int i;
-
-       chtls_unregister_dev(cdev);
-       kvfree(cdev->kmap.addr);
-       idr_destroy(&cdev->hwtid_idr);
-       for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
-               kfree_skb(cdev->rspq_skb_cache[i]);
-       kfree(cdev->lldi);
-       kfree_skb(cdev->askb);
-       kfree(cdev);
-}
-
 static void chtls_free_all_uld(void)
 {
        struct chtls_dev *cdev, *tmp;
 
        mutex_lock(&cdev_mutex);
        list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
-               if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
-                       chtls_free_uld(cdev);
+               if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
+                       list_del(&cdev->list);
+                       kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
+               }
        }
        mutex_unlock(&cdev_mutex);
 }
@@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
                mutex_lock(&cdev_mutex);
                list_del(&cdev->list);
                mutex_unlock(&cdev_mutex);
-               chtls_free_uld(cdev);
+               kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
                break;
        default:
                break;
index 8816c69..387f1cf 100644 (file)
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                        case CHIP_TOPAZ:
                                if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
                                    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
-                                   ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+                                   ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+                                   ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+                                   ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
                                        info->is_kicker = true;
                                        strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
                                } else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                if (type == CGS_UCODE_ID_SMU) {
                                        if (((adev->pdev->device == 0x67ef) &&
                                             ((adev->pdev->revision == 0xe0) ||
-                                             (adev->pdev->revision == 0xe2) ||
                                              (adev->pdev->revision == 0xe5))) ||
                                            ((adev->pdev->device == 0x67ff) &&
                                             ((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                              (adev->pdev->revision == 0xff)))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
-                                       } else
+                                       } else if ((adev->pdev->device == 0x67ef) &&
+                                                  (adev->pdev->revision == 0xe2)) {
+                                               info->is_kicker = true;
+                                               strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+                                       } else {
                                                strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+                                       }
                                } else if (type == CGS_UCODE_ID_SMU_SK) {
                                        strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
                                }
@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                              (adev->pdev->revision == 0xe7) ||
                                              (adev->pdev->revision == 0xef))) ||
                                            ((adev->pdev->device == 0x6fdf) &&
-                                            (adev->pdev->revision == 0xef))) {
+                                            ((adev->pdev->revision == 0xef) ||
+                                             (adev->pdev->revision == 0xff)))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
-                                       } else
+                                       } else if ((adev->pdev->device == 0x67df) &&
+                                                  ((adev->pdev->revision == 0xe1) ||
+                                                   (adev->pdev->revision == 0xf7))) {
+                                               info->is_kicker = true;
+                                               strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+                                       } else {
                                                strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+                                       }
                                } else if (type == CGS_UCODE_ID_SMU_SK) {
                                        strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
                                }
                                break;
                        case CHIP_POLARIS12:
-                               strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               if (((adev->pdev->device == 0x6987) &&
+                                    ((adev->pdev->revision == 0xc0) ||
+                                     (adev->pdev->revision == 0xc3))) ||
+                                   ((adev->pdev->device == 0x6981) &&
+                                    ((adev->pdev->revision == 0x00) ||
+                                     (adev->pdev->revision == 0x01) ||
+                                     (adev->pdev->revision == 0x10)))) {
+                                       info->is_kicker = true;
+                                       strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+                               } else {
+                                       strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               }
                                break;
                        case CHIP_VEGAM:
                                strcpy(fw_name, "amdgpu/vegam_smc.bin");
index 663043c..0acc8de 100644 (file)
@@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                goto free_chunk;
        }
 
+       mutex_lock(&p->ctx->lock);
+
        /* skip guilty context job */
        if (atomic_read(&p->ctx->guilty) == 1) {
                ret = -ECANCELED;
                goto free_chunk;
        }
 
-       mutex_lock(&p->ctx->lock);
-
        /* get chunks */
        chunk_array_user = u64_to_user_ptr(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
index 8de55f7..74b611e 100644 (file)
@@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        /* Vega 12 */
        {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+       {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        /* Raven */
index a9f18ea..e4ded89 100644 (file)
@@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x6864, &vega10_device_info },        /* Vega10 */
        { 0x6867, &vega10_device_info },        /* Vega10 */
        { 0x6868, &vega10_device_info },        /* Vega10 */
+       { 0x6869, &vega10_device_info },        /* Vega10 */
+       { 0x686A, &vega10_device_info },        /* Vega10 */
+       { 0x686B, &vega10_device_info },        /* Vega10 */
        { 0x686C, &vega10_vf_device_info },     /* Vega10  vf*/
+       { 0x686D, &vega10_device_info },        /* Vega10 */
+       { 0x686E, &vega10_device_info },        /* Vega10 */
+       { 0x686F, &vega10_device_info },        /* Vega10 */
        { 0x687F, &vega10_device_info },        /* Vega10 */
        { 0x66a0, &vega20_device_info },        /* Vega20 */
        { 0x66a1, &vega20_device_info },        /* Vega20 */
        { 0x66a2, &vega20_device_info },        /* Vega20 */
        { 0x66a3, &vega20_device_info },        /* Vega20 */
+       { 0x66a4, &vega20_device_info },        /* Vega20 */
        { 0x66a7, &vega20_device_info },        /* Vega20 */
        { 0x66af, &vega20_device_info }         /* Vega20 */
 };
index 3367dd3..3b7fce5 100644 (file)
@@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->registry_data.disable_auto_wattman = 1;
        data->registry_data.auto_wattman_debug = 0;
        data->registry_data.auto_wattman_sample_period = 100;
-       data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+       data->registry_data.fclk_gfxclk_ratio = 0;
        data->registry_data.auto_wattman_threshold = 50;
        data->registry_data.gfxoff_controlled_by_driver = 1;
        data->gfxoff_allowed = false;
index 62f36ba..c1a99df 100644 (file)
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_AgmResetPsm                 ((uint16_t) 0x403)
 #define PPSMC_MSG_ReadVftCell                 ((uint16_t) 0x404)
 
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage      ((uint16_t) 0x415)
+
 #define PPSMC_MSG_GFX_CU_PG_ENABLE            ((uint16_t) 0x280)
 #define PPSMC_MSG_GFX_CU_PG_DISABLE           ((uint16_t) 0x281)
 #define PPSMC_MSG_GetCurrPkgPwr               ((uint16_t) 0x282)
index 872d382..a1e0ac9 100644 (file)
@@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
 
+       /* Apply avfs cks-off voltages to avoid the overshoot
+        * when switching to the highest sclk frequency
+        */
+       if (data->apply_avfs_cks_off_voltage)
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
        return 0;
 }
 
index 99d5e4f..a6edd5d 100644 (file)
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
index 481896f..85e6736 100644 (file)
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
                plane->bpp = skl_pixel_formats[fmt].bpp;
                plane->drm_format = skl_pixel_formats[fmt].drm_format;
        } else {
-               plane->tiled = !!(val & DISPPLANE_TILED);
+               plane->tiled = val & DISPPLANE_TILED;
                fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
                plane->bpp = bdw_pixel_formats[fmt].bpp;
                plane->drm_format = bdw_pixel_formats[fmt].drm_format;
index ffdbbac..47062ee 100644 (file)
@@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        intel_uncore_sanitize(dev_priv);
 
+       intel_gt_init_workarounds(dev_priv);
        i915_gem_load_init_fences(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
index 9102571..872a2e1 100644 (file)
@@ -67,6 +67,7 @@
 #include "intel_ringbuffer.h"
 #include "intel_uncore.h"
 #include "intel_wopcm.h"
+#include "intel_workarounds.h"
 #include "intel_uc.h"
 
 #include "i915_gem.h"
@@ -1805,6 +1806,7 @@ struct drm_i915_private {
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
        struct i915_workarounds workarounds;
+       struct i915_wa_list gt_wa_list;
 
        struct i915_frontbuffer_tracking fb_tracking;
 
@@ -2148,6 +2150,8 @@ struct drm_i915_private {
                struct delayed_work idle_work;
 
                ktime_t last_init_time;
+
+               struct i915_vma *scratch;
        } gt;
 
        /* perform PHY state sanity checks? */
@@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
                return I915_HWS_CSB_WRITE_INDEX;
 }
 
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+       return i915_ggtt_offset(i915->gt.scratch);
+}
+
 #endif
index 0c8aa57..6ae9a60 100644 (file)
@@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
                }
        }
 
-       intel_gt_workarounds_apply(dev_priv);
+       intel_gt_apply_workarounds(dev_priv);
 
        i915_gem_init_swizzling(dev_priv);
 
@@ -5500,6 +5500,44 @@ err_active:
        goto out_ctx;
 }
 
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int ret;
+
+       obj = i915_gem_object_create_stolen(i915, size);
+       if (!obj)
+               obj = i915_gem_object_create_internal(i915, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               return PTR_ERR(obj);
+       }
+
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unref;
+       }
+
+       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+       if (ret)
+               goto err_unref;
+
+       i915->gt.scratch = vma;
+       return 0;
+
+err_unref:
+       i915_gem_object_put(obj);
+       return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+       i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
@@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                goto err_unlock;
        }
 
-       ret = i915_gem_contexts_init(dev_priv);
+       ret = i915_gem_init_scratch(dev_priv,
+                                   IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
                goto err_ggtt;
        }
 
+       ret = i915_gem_contexts_init(dev_priv);
+       if (ret) {
+               GEM_BUG_ON(ret == -EIO);
+               goto err_scratch;
+       }
+
        ret = intel_engines_init(dev_priv);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5669,8 @@ err_pm:
 err_context:
        if (ret != -EIO)
                i915_gem_contexts_fini(dev_priv);
+err_scratch:
+       i915_gem_fini_scratch(dev_priv);
 err_ggtt:
 err_unlock:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
        intel_uc_fini(dev_priv);
        i915_gem_cleanup_engines(dev_priv);
        i915_gem_contexts_fini(dev_priv);
+       i915_gem_fini_scratch(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       intel_wa_list_free(&dev_priv->gt_wa_list);
+
        intel_cleanup_gt_powersave(dev_priv);
 
        intel_uc_fini_misc(dev_priv);
index d4fac09..1aaccbe 100644 (file)
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
                else if (gen >= 4)
                        len = 4;
                else
-                       len = 6;
+                       len = 3;
 
                batch = reloc_gpu(eb, vma, len);
                if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
                        *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *batch++ = addr;
                        *batch++ = target_offset;
-
-                       /* And again for good measure (blb/pnv) */
-                       *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-                       *batch++ = addr;
-                       *batch++ = target_offset;
                }
 
                goto out;
index 3eb33e0..db4128d 100644 (file)
@@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
                        if (HAS_BROKEN_CS_TLB(i915))
                                ee->wa_batchbuffer =
                                        i915_error_object_create(i915,
-                                                                engine->scratch);
+                                                                i915->gt.scratch);
                        request_record_user_bo(request, ee);
 
                        ee->ctx =
index 217ed3e..76b5f94 100644 (file)
@@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
        intel_engine_init_cmd_parser(engine);
 }
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
-                               unsigned int size)
-{
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
-       int ret;
-
-       WARN_ON(engine->scratch);
-
-       obj = i915_gem_object_create_stolen(engine->i915, size);
-       if (!obj)
-               obj = i915_gem_object_create_internal(engine->i915, size);
-       if (IS_ERR(obj)) {
-               DRM_ERROR("Failed to allocate scratch page\n");
-               return PTR_ERR(obj);
-       }
-
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto err_unref;
-       }
-
-       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
-       if (ret)
-               goto err_unref;
-
-       engine->scratch = vma;
-       return 0;
-
-err_unref:
-       i915_gem_object_put(obj);
-       return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
-       i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
        if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       intel_engine_cleanup_scratch(engine);
-
        cleanup_status_page(engine);
 
        intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        __intel_context_unpin(i915->kernel_context, engine);
 
        i915_timeline_fini(&engine->timeline);
+
+       intel_wa_list_free(&engine->wa_list);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
index 37c94a5..58d1d3d 100644 (file)
@@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
         * may not be visible to the HW prior to the completion of the UC
         * register write and that we may begin execution from the context
         * before its image is complete leading to invalid PD chasing.
+        *
+        * Furthermore, Braswell, at least, wants a full mb to be sure that
+        * the writes are coherent in memory (visible to the GPU) prior to
+        * execution, and not just visible to other CPUs (as is the result of
+        * wmb).
         */
-       wmb();
+       mb();
        return ce->lrc_desc;
 }
 
@@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
 static u32 *
 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 {
+       /* NB no one else is allowed to scribble over scratch + 256! */
        *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
        *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-       *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+       *batch++ = i915_scratch_offset(engine->i915) + 256;
        *batch++ = 0;
 
        *batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 
        *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
        *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-       *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+       *batch++ = i915_scratch_offset(engine->i915) + 256;
        *batch++ = 0;
 
        return batch;
@@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                                       PIPE_CONTROL_GLOBAL_GTT_IVB |
                                       PIPE_CONTROL_CS_STALL |
                                       PIPE_CONTROL_QW_WRITE,
-                                      i915_ggtt_offset(engine->scratch) +
+                                      i915_scratch_offset(engine->i915) +
                                       2 * CACHELINE_BYTES);
 
        *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                                               PIPE_CONTROL_GLOBAL_GTT_IVB |
                                               PIPE_CONTROL_CS_STALL |
                                               PIPE_CONTROL_QW_WRITE,
-                                              i915_ggtt_offset(engine->scratch)
+                                              i915_scratch_offset(engine->i915)
                                               + 2 * CACHELINE_BYTES);
        }
 
@@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
+       intel_engine_apply_workarounds(engine);
+
        intel_mocs_init_engine(engine);
 
        intel_engine_reset_breadcrumbs(engine);
@@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
 {
        struct intel_engine_cs *engine = request->engine;
        u32 scratch_addr =
-               i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+               i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
        bool vf_flush_wa = false, dc_flush_wa = false;
        u32 *cs, flags = 0;
        int len;
@@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
-       ret = intel_engine_create_scratch(engine, PAGE_SIZE);
-       if (ret)
-               goto err_cleanup_common;
-
        ret = intel_init_workaround_bb(engine);
        if (ret) {
                /*
@@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
                          ret);
        }
 
-       return 0;
+       intel_engine_init_workarounds(engine);
 
-err_cleanup_common:
-       intel_engine_cleanup_common(engine);
-       return ret;
+       return 0;
 }
 
 int logical_xcs_ring_init(struct intel_engine_cs *engine)
index 187bb0c..1f8d2a6 100644 (file)
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
 {
+       unsigned int num_store_dw;
        u32 cmd, *cs;
 
        cmd = MI_FLUSH;
-
+       num_store_dw = 0;
        if (mode & EMIT_INVALIDATE)
                cmd |= MI_READ_FLUSH;
+       if (mode & EMIT_FLUSH)
+               num_store_dw = 4;
 
-       cs = intel_ring_begin(rq, 2);
+       cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = cmd;
-       *cs++ = MI_NOOP;
+       while (num_store_dw--) {
+               *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+               *cs++ = i915_scratch_offset(rq->i915);
+               *cs++ = 0;
+       }
+       *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
        intel_ring_advance(rq, cs);
 
        return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
         */
        if (mode & EMIT_INVALIDATE) {
                *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
-                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
                *cs++ = 0;
                *cs++ = 0;
 
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
                        *cs++ = MI_FLUSH;
 
                *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
-                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
                *cs++ = 0;
                *cs++ = 0;
        }
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
-       u32 scratch_addr =
-               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs;
 
        cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 static int
 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 scratch_addr =
-               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
        int ret;
 
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 static int
 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 scratch_addr =
-               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
 
        /*
@@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
 }
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
@@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 len,
                   unsigned int dispatch_flags)
 {
-       u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+       u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+       GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
 
        cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
@@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
        struct i915_timeline *timeline;
        struct intel_ring *ring;
-       unsigned int size;
        int err;
 
        intel_engine_setup_common(engine);
@@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        GEM_BUG_ON(engine->buffer);
        engine->buffer = ring;
 
-       size = PAGE_SIZE;
-       if (HAS_BROKEN_CS_TLB(engine->i915))
-               size = I830_WA_SIZE;
-       err = intel_engine_create_scratch(engine, size);
-       if (err)
-               goto err_unpin;
-
        err = intel_engine_init_common(engine);
        if (err)
-               goto err_scratch;
+               goto err_unpin;
 
        return 0;
 
-err_scratch:
-       intel_engine_cleanup_scratch(engine);
 err_unpin:
        intel_ring_unpin(ring);
 err_ring:
@@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
        /* Stall until the page table load is complete */
        *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
        *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-       *cs++ = i915_ggtt_offset(engine->scratch);
+       *cs++ = i915_scratch_offset(rq->i915);
        *cs++ = MI_NOOP;
 
        intel_ring_advance(rq, cs);
@@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                        /* Insert a delay before the next switch! */
                        *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
                        *cs++ = i915_mmio_reg_offset(last_reg);
-                       *cs++ = i915_ggtt_offset(engine->scratch);
+                       *cs++ = i915_scratch_offset(rq->i915);
                        *cs++ = MI_NOOP;
                }
                *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
index 2dfa585..767a719 100644 (file)
@@ -15,6 +15,7 @@
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 #include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
 
 struct drm_printer;
 struct i915_sched_attr;
@@ -440,7 +441,7 @@ struct intel_engine_cs {
 
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
-       struct i915_vma *scratch;
+       struct i915_wa_list wa_list;
 
        u32             irq_keep_mask; /* always keep these interrupts */
        u32             irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
-                               unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
index 4bcdeaf..6e58089 100644 (file)
  * - Public functions to init or apply the given workaround type.
  */
 
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
+{
+       wal->name = name;
+}
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+       if (!wal->count)
+               return;
+
+       DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+                        wal->count, wal->name);
+}
+
 static void wa_add(struct drm_i915_private *i915,
                   i915_reg_t reg, const u32 mask, const u32 val)
 {
@@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
        return 0;
 }
 
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+       const unsigned int grow = 1 << 4;
+
+       GEM_BUG_ON(!is_power_of_2(grow));
+
+       if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+               struct i915_wa *list;
+
+               list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+                                    GFP_KERNEL);
+               if (!list) {
+                       DRM_ERROR("No space for workaround init!\n");
+                       return;
+               }
+
+               if (wal->list)
+                       memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+               wal->list = list;
+       }
+
+       wal->list[wal->count++] = *wa;
+}
+
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+       struct i915_wa wa = {
+               .reg = reg,
+               .mask = val,
+               .val = _MASKED_BIT_ENABLE(val)
+       };
+
+       wal_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+                  u32 val)
 {
+       struct i915_wa wa = {
+               .reg = reg,
+               .mask = mask,
+               .val = val
+       };
+
+       wal_add(wal, &wa);
 }
 
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
+       wa_write_masked_or(wal, reg, ~0, val);
 }
 
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-       /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
-       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
-                  _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+       wa_write_masked_or(wal, reg, val, val);
+}
 
-       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
-       I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
-                  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
        /* WaDisableKillLogic:bxt,skl,kbl */
-       if (!IS_COFFEELAKE(dev_priv))
-               I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                          ECOCHK_DIS_TLB);
+       if (!IS_COFFEELAKE(i915))
+               wa_write_or(wal,
+                           GAM_ECOCHK,
+                           ECOCHK_DIS_TLB);
 
-       if (HAS_LLC(dev_priv)) {
+       if (HAS_LLC(i915)) {
                /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
                 *
                 * Must match Display Engine. See
                 * WaCompressedResourceDisplayNewHashMode.
                 */
-               I915_WRITE(MMCD_MISC_CTRL,
-                          I915_READ(MMCD_MISC_CTRL) |
-                          MMCD_PCLA |
-                          MMCD_HOTSPOT_EN);
+               wa_write_or(wal,
+                           MMCD_MISC_CTRL,
+                           MMCD_PCLA | MMCD_HOTSPOT_EN);
        }
 
        /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
-       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                  BDW_DISABLE_HDC_INVALIDATION);
-
-       /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
-       if (IS_GEN9_LP(dev_priv)) {
-               u32 val = I915_READ(GEN8_L3SQCREG1);
-
-               val &= ~L3_PRIO_CREDITS_MASK;
-               val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
-               I915_WRITE(GEN8_L3SQCREG1, val);
-       }
-
-       /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
-       I915_WRITE(GEN8_L3SQCREG4,
-                  I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+       wa_write_or(wal,
+                   GAM_ECOCHK,
+                   BDW_DISABLE_HDC_INVALIDATION);
 }
 
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaEnableGapsTsvCreditFix:skl */
-       I915_WRITE(GEN8_GARBCNTL,
-                  I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+       gen9_gt_workarounds_init(i915);
 
        /* WaDisableGafsUnitClkGating:skl */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+       wa_write_or(wal,
+                   GEN7_UCGCTL4,
+                   GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:skl */
-       if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-               I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                          I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+               wa_write_or(wal,
+                           GEN9_GAMT_ECO_REG_RW_IA,
+                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaDisablePooledEuLoadBalancingFix:bxt */
-       I915_WRITE(FF_SLICE_CS_CHICKEN2,
-                  _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+       gen9_gt_workarounds_init(i915);
 
        /* WaInPlaceDecompressionHang:bxt */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaEnableGapsTsvCreditFix:kbl */
-       I915_WRITE(GEN8_GARBCNTL,
-                  I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+       gen9_gt_workarounds_init(i915);
 
        /* WaDisableDynamicCreditSharing:kbl */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-               I915_WRITE(GAMT_CHKN_BIT_REG,
-                          I915_READ(GAMT_CHKN_BIT_REG) |
-                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+       if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+               wa_write_or(wal,
+                           GAMT_CHKN_BIT_REG,
+                           GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
 
        /* WaDisableGafsUnitClkGating:kbl */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+       wa_write_or(wal,
+                   GEN7_UCGCTL4,
+                   GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:kbl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaKBLVECSSemaphoreWaitPoll:kbl */
-       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
-               struct intel_engine_cs *engine;
-               unsigned int tmp;
-
-               for_each_engine(engine, dev_priv, tmp) {
-                       if (engine->id == RCS)
-                               continue;
-
-                       I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
-               }
-       }
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       gen9_gt_workarounds_init(i915);
 }
 
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaEnableGapsTsvCreditFix:cfl */
-       I915_WRITE(GEN8_GARBCNTL,
-                  I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+       gen9_gt_workarounds_init(i915);
 
        /* WaDisableGafsUnitClkGating:cfl */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+       wa_write_or(wal,
+                   GEN7_UCGCTL4,
+                   GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:cfl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
 static void wa_init_mcr(struct drm_i915_private *dev_priv)
 {
        const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
-       u32 mcr;
+       struct i915_wa_list *wal = &dev_priv->gt_wa_list;
        u32 mcr_slice_subslice_mask;
 
        /*
@@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
                WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
        }
 
-       mcr = I915_READ(GEN8_MCR_SELECTOR);
-
        if (INTEL_GEN(dev_priv) >= 11)
                mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
                                          GEN11_MCR_SUBSLICE_MASK;
@@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
         * occasions, such as INSTDONE, where this value is dependent
         * on s/ss combo, the read should be done with read_subslice_reg.
         */
-       mcr &= ~mcr_slice_subslice_mask;
-       mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
-       I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+       wa_write_masked_or(wal,
+                          GEN8_MCR_SELECTOR,
+                          mcr_slice_subslice_mask,
+                          intel_calculate_mcr_s_ss_select(dev_priv));
 }
 
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       wa_init_mcr(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
+
+       wa_init_mcr(i915);
 
        /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-       if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-               I915_WRITE(GAMT_CHKN_BIT_REG,
-                          I915_READ(GAMT_CHKN_BIT_REG) |
-                          GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+       if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+               wa_write_or(wal,
+                           GAMT_CHKN_BIT_REG,
+                           GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
 
        /* WaInPlaceDecompressionHang:cnl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaEnablePreemptionGranularityControlByUMD:cnl */
-       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       wa_init_mcr(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* This is not an Wa. Enable for better image quality */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+       wa_init_mcr(i915);
 
        /* WaInPlaceDecompressionHang:icl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaPipelineFlushCoherentLines:icl */
-       I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                                  GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-       /* Wa_1405543622:icl
-        * Formerly known as WaGAPZPriorityScheme
-        */
-       I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
-                                 GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
-       /* Wa_1604223664:icl
-        * Formerly known as WaL3BankAddressHashing
-        */
-       I915_WRITE(GEN8_GARBCNTL,
-                  (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
-                  GEN11_HASH_CTRL_EXCL_BIT0);
-       I915_WRITE(GEN11_GLBLINVL,
-                  (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
-                  GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 
        /* WaModifyGamTlbPartitioning:icl */
-       I915_WRITE(GEN11_GACB_PERF_CTRL,
-                  (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
-                  GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
-       /* Wa_1405733216:icl
-        * Formerly known as WaDisableCleanEvicts
-        */
-       I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                                  GEN11_LQSC_CLEAN_EVICT_DISABLE);
+       wa_write_masked_or(wal,
+                          GEN11_GACB_PERF_CTRL,
+                          GEN11_HASH_CTRL_MASK,
+                          GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
 
        /* Wa_1405766107:icl
         * Formerly known as WaCL2SFHalfMaxAlloc
         */
-       I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
-                                     GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
-                                     GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+       wa_write_or(wal,
+                   GEN11_LSN_UNSLCVC,
+                   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+                   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
 
        /* Wa_220166154:icl
         * Formerly known as WaDisCtxReload
         */
-       I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
-                                          GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+       wa_write_or(wal,
+                   GEN8_GAMW_ECO_DEV_RW_IA,
+                   GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 
        /* Wa_1405779004:icl (pre-prod) */
-       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
-               I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
-                          I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
-                          MSCUNIT_CLKGATE_DIS);
+       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+               wa_write_or(wal,
+                           SLICE_UNIT_LEVEL_CLKGATE,
+                           MSCUNIT_CLKGATE_DIS);
 
        /* Wa_1406680159:icl */
-       I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
-                  I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
-                  GWUNIT_CLKGATE_DIS);
-
-       /* Wa_1604302699:icl */
-       I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
-                  I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
-                  GEN11_I2M_WRITE_DISABLE);
+       wa_write_or(wal,
+                   SUBSLICE_UNIT_LEVEL_CLKGATE,
+                   GWUNIT_CLKGATE_DIS);
 
        /* Wa_1406838659:icl (pre-prod) */
-       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
-               I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
-                          I915_READ(INF_UNIT_LEVEL_CLKGATE) |
-                          CGPSF_CLKGATE_DIS);
-
-       /* WaForwardProgressSoftReset:icl */
-       I915_WRITE(GEN10_SCRATCH_LNCF2,
-                  I915_READ(GEN10_SCRATCH_LNCF2) |
-                  PMFLUSHDONE_LNICRSDROP |
-                  PMFLUSH_GAPL3UNBLOCK |
-                  PMFLUSHDONE_LNEBLK);
+       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+               wa_write_or(wal,
+                           INF_UNIT_LEVEL_CLKGATE,
+                           CGPSF_CLKGATE_DIS);
 
        /* Wa_1406463099:icl
         * Formerly known as WaGamTlbPendError
         */
-       I915_WRITE(GAMT_CHKN_BIT_REG,
-                  I915_READ(GAMT_CHKN_BIT_REG) |
-                  GAMT_CHKN_DISABLE_L3_COH_PIPE);
+       wa_write_or(wal,
+                   GAMT_CHKN_BIT_REG,
+                   GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
 {
-       if (INTEL_GEN(dev_priv) < 8)
+       struct i915_wa_list *wal = &i915->gt_wa_list;
+
+       wa_init_start(wal, "GT");
+
+       if (INTEL_GEN(i915) < 8)
                return;
-       else if (IS_BROADWELL(dev_priv))
-               bdw_gt_workarounds_apply(dev_priv);
-       else if (IS_CHERRYVIEW(dev_priv))
-               chv_gt_workarounds_apply(dev_priv);
-       else if (IS_SKYLAKE(dev_priv))
-               skl_gt_workarounds_apply(dev_priv);
-       else if (IS_BROXTON(dev_priv))
-               bxt_gt_workarounds_apply(dev_priv);
-       else if (IS_KABYLAKE(dev_priv))
-               kbl_gt_workarounds_apply(dev_priv);
-       else if (IS_GEMINILAKE(dev_priv))
-               glk_gt_workarounds_apply(dev_priv);
-       else if (IS_COFFEELAKE(dev_priv))
-               cfl_gt_workarounds_apply(dev_priv);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_gt_workarounds_apply(dev_priv);
-       else if (IS_ICELAKE(dev_priv))
-               icl_gt_workarounds_apply(dev_priv);
+       else if (IS_BROADWELL(i915))
+               return;
+       else if (IS_CHERRYVIEW(i915))
+               return;
+       else if (IS_SKYLAKE(i915))
+               skl_gt_workarounds_init(i915);
+       else if (IS_BROXTON(i915))
+               bxt_gt_workarounds_init(i915);
+       else if (IS_KABYLAKE(i915))
+               kbl_gt_workarounds_init(i915);
+       else if (IS_GEMINILAKE(i915))
+               glk_gt_workarounds_init(i915);
+       else if (IS_COFFEELAKE(i915))
+               cfl_gt_workarounds_init(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_gt_workarounds_init(i915);
+       else if (IS_ICELAKE(i915))
+               icl_gt_workarounds_init(i915);
        else
-               MISSING_CASE(INTEL_GEN(dev_priv));
+               MISSING_CASE(INTEL_GEN(i915));
+
+       wa_init_finish(wal);
+}
+
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+                  const struct i915_wa_list *wal)
+{
+       enum forcewake_domains fw = 0;
+       struct i915_wa *wa;
+       unsigned int i;
+
+       for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+               fw |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    wa->reg,
+                                                    FW_REG_READ |
+                                                    FW_REG_WRITE);
+
+       return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+{
+       enum forcewake_domains fw;
+       unsigned long flags;
+       struct i915_wa *wa;
+       unsigned int i;
+
+       if (!wal->count)
+               return;
+
+       fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+       intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+       for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+               u32 val = I915_READ_FW(wa->reg);
+
+               val &= ~wa->mask;
+               val |= wa->val;
+
+               I915_WRITE_FW(wa->reg, val);
+       }
+
+       intel_uncore_forcewake_put__locked(dev_priv, fw);
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+       DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+       wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
 }
 
 struct whitelist {
@@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
        whitelist_apply(engine, whitelist_build(engine, &w));
 }
 
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_wa_list *wal = &engine->wa_list;
+
+       if (IS_ICELAKE(i915)) {
+               /* This is not an Wa. Enable for better image quality */
+               wa_masked_en(wal,
+                            _3D_CHICKEN3,
+                            _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+               /* WaPipelineFlushCoherentLines:icl */
+               wa_write_or(wal,
+                           GEN8_L3SQCREG4,
+                           GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+               /*
+                * Wa_1405543622:icl
+                * Formerly known as WaGAPZPriorityScheme
+                */
+               wa_write_or(wal,
+                           GEN8_GARBCNTL,
+                           GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+               /*
+                * Wa_1604223664:icl
+                * Formerly known as WaL3BankAddressHashing
+                */
+               wa_write_masked_or(wal,
+                                  GEN8_GARBCNTL,
+                                  GEN11_HASH_CTRL_EXCL_MASK,
+                                  GEN11_HASH_CTRL_EXCL_BIT0);
+               wa_write_masked_or(wal,
+                                  GEN11_GLBLINVL,
+                                  GEN11_BANK_HASH_ADDR_EXCL_MASK,
+                                  GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+               /*
+                * Wa_1405733216:icl
+                * Formerly known as WaDisableCleanEvicts
+                */
+               wa_write_or(wal,
+                           GEN8_L3SQCREG4,
+                           GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+               /* Wa_1604302699:icl */
+               wa_write_or(wal,
+                           GEN10_L3_CHICKEN_MODE_REGISTER,
+                           GEN11_I2M_WRITE_DISABLE);
+
+               /* WaForwardProgressSoftReset:icl */
+               wa_write_or(wal,
+                           GEN10_SCRATCH_LNCF2,
+                           PMFLUSHDONE_LNICRSDROP |
+                           PMFLUSH_GAPL3UNBLOCK |
+                           PMFLUSHDONE_LNEBLK);
+       }
+
+       if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+               /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+               wa_masked_en(wal,
+                            GEN7_FF_SLICE_CS_CHICKEN1,
+                            GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+       }
+
+       if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+               /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+               wa_write_or(wal,
+                           GEN8_GARBCNTL,
+                           GEN9_GAPS_TSV_CREDIT_DISABLE);
+       }
+
+       if (IS_BROXTON(i915)) {
+               /* WaDisablePooledEuLoadBalancingFix:bxt */
+               wa_masked_en(wal,
+                            FF_SLICE_CS_CHICKEN2,
+                            GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+       }
+
+       if (IS_GEN9(i915)) {
+               /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+               wa_masked_en(wal,
+                            GEN9_CSFE_CHICKEN1_RCS,
+                            GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+               /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+               wa_write_or(wal,
+                           BDW_SCRATCH1,
+                           GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+               /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+               if (IS_GEN9_LP(i915))
+                       wa_write_masked_or(wal,
+                                          GEN8_L3SQCREG1,
+                                          L3_PRIO_CREDITS_MASK,
+                                          L3_GENERAL_PRIO_CREDITS(62) |
+                                          L3_HIGH_PRIO_CREDITS(2));
+
+               /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+               wa_write_or(wal,
+                           GEN8_L3SQCREG4,
+                           GEN8_LQSC_FLUSH_COHERENT_LINES);
+       }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_wa_list *wal = &engine->wa_list;
+
+       /* WaKBLVECSSemaphoreWaitPoll:kbl */
+       if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+               wa_write(wal,
+                        RING_SEMA_WAIT_POLL(engine->mmio_base),
+                        1);
+       }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct i915_wa_list *wal = &engine->wa_list;
+
+       if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+               return;
+
+       wa_init_start(wal, engine->name);
+
+       if (engine->id == RCS)
+               rcs_engine_wa_init(engine);
+       else
+               xcs_engine_wa_init(engine);
+
+       wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+       wa_list_apply(engine->i915, &engine->wa_list);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/intel_workarounds.c"
 #endif
index b11d062..979695a 100644 (file)
@@ -7,11 +7,35 @@
 #ifndef _I915_WORKAROUNDS_H_
 #define _I915_WORKAROUNDS_H_
 
+#include <linux/slab.h>
+
+struct i915_wa {
+       i915_reg_t        reg;
+       u32               mask;
+       u32               val;
+};
+
+struct i915_wa_list {
+       const char      *name;
+       struct i915_wa  *list;
+       unsigned int    count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+       kfree(wal->list);
+       memset(wal, 0, sizeof(*wal));
+}
+
 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
 int intel_ctx_workarounds_emit(struct i915_request *rq);
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
 
 void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
 
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+
 #endif
index 66df1b1..27b507e 100644 (file)
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
        dsi->encoder.possible_crtcs = 1;
 
        /* If there's a bridge, attach to it and let it create the connector */
-       ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
-       if (ret) {
-               DRM_ERROR("Failed to attach bridge to drm\n");
-
+       if (dsi->bridge) {
+               ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+               if (ret) {
+                       DRM_ERROR("Failed to attach bridge to drm\n");
+                       goto err_encoder_cleanup;
+               }
+       } else {
                /* Otherwise create our own connector and attach to a panel */
                ret = mtk_dsi_create_connector(drm, dsi);
                if (ret)
index 6cbbae3..db1bf7f 100644 (file)
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 /******************************************************************************
  * EVO channel helpers
  *****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+       /* Push buffer fetches are not coherent with BAR1, we need to ensure
+        * writes have been flushed right through to VRAM before writing PUT.
+        */
+       if (dmac->push.type & NVIF_MEM_VRAM) {
+               struct nvif_device *device = dmac->base.device;
+               nvif_wr32(&device->object, 0x070000, 0x00000001);
+               nvif_msec(device, 2000,
+                       if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+                               break;
+               );
+       }
+}
+
 u32 *
 evo_wait(struct nv50_dmac *evoc, int nr)
 {
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
        mutex_lock(&dmac->lock);
        if (put + nr >= (PAGE_SIZE / 4) - 8) {
                dmac->ptr[put] = 0x20000000;
+               evo_flush(dmac);
 
                nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
                if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
 {
        struct nv50_dmac *dmac = evoc;
 
-       /* Push buffer fetches are not coherent with BAR1, we need to ensure
-        * writes have been flushed right through to VRAM before writing PUT.
-        */
-       if (dmac->push.type & NVIF_MEM_VRAM) {
-               struct nvif_device *device = dmac->base.device;
-               nvif_wr32(&device->object, 0x070000, 0x00000001);
-               nvif_msec(device, 2000,
-                       if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
-                               break;
-               );
-       }
+       evo_flush(dmac);
 
        nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
        mutex_unlock(&dmac->lock);
@@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
 {
        struct nv50_mstm *mstm = *pmstm;
        if (mstm) {
+               drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
                kfree(*pmstm);
                *pmstm = NULL;
        }
index 2b2baf6..d2928d4 100644 (file)
@@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
                goto err_free;
        }
 
+       err = nouveau_drm_device_init(drm);
+       if (err)
+               goto err_put;
+
        platform_set_drvdata(pdev, drm);
 
        return drm;
 
+err_put:
+       drm_dev_put(drm);
 err_free:
        nvkm_device_del(pdevice);
 
index 941f352..5864cb4 100644 (file)
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
-       rockchip_drm_platform_remove(pdev);
-}
-
 static const struct of_device_id rockchip_drm_dt_ids[] = {
        { .compatible = "rockchip,display-subsystem", },
        { /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
 static struct platform_driver rockchip_drm_platform_driver = {
        .probe = rockchip_drm_platform_probe,
        .remove = rockchip_drm_platform_remove,
-       .shutdown = rockchip_drm_platform_shutdown,
        .driver = {
                .name = "rockchip-drm",
                .of_match_table = rockchip_drm_dt_ids,
index 61a84b9..d7a2dfb 100644 (file)
@@ -49,6 +49,8 @@
 
 #define VMWGFX_REPO "In Tree"
 
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
 
 /**
  * Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                spin_unlock(&dev_priv->cap_lock);
        }
 
-
+       vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
        ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_kms;
index 59f6142..aca974b 100644 (file)
@@ -606,6 +606,9 @@ struct vmw_private {
 
        struct vmw_cmdbuf_man *cman;
        DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+       /* Validation memory reservation */
+       struct vmw_validation_mem vvm;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+                                       size_t gran);
 /**
  * TTM buffer object driver - vmwgfx_ttm_buffer.c
  */
index 5a6b70b..f2d13a7 100644 (file)
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
                                      void *buf)
 {
        struct vmw_buffer_object *vmw_bo;
-       int ret;
 
        struct {
                uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return vmw_translate_guest_ptr(dev_priv, sw_context,
                                       &cmd->body.ptr,
                                       &vmw_bo);
-       return ret;
 }
 
 
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        struct sync_file *sync_file = NULL;
        DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
 
+       vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
index 7b1e5a5..f882470 100644 (file)
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
        drm_global_item_unref(&dev_priv->bo_global_ref.ref);
        drm_global_item_unref(&dev_priv->mem_global_ref);
 }
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+       static struct ttm_operation_ctx ctx = {.interruptible = false,
+                                              .no_wait_gpu = false};
+       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+       return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+       return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+       struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+       vvm->reserve_mem = vmw_vmt_reserve;
+       vvm->unreserve_mem = vmw_vmt_unreserve;
+       vvm->gran = gran;
+}
index 184025f..f116f09 100644 (file)
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
                return NULL;
 
        if (ctx->mem_size_left < size) {
-               struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               struct page *page;
 
+               if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+                       int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+                       if (ret)
+                               return NULL;
+
+                       ctx->vm_size_left += ctx->vm->gran;
+                       ctx->total_mem += ctx->vm->gran;
+               }
+
+               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
                if (!page)
                        return NULL;
 
+               if (ctx->vm)
+                       ctx->vm_size_left -= PAGE_SIZE;
+
                list_add_tail(&page->lru, &ctx->page_list);
                ctx->page_address = page_address(page);
                ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
        }
 
        ctx->mem_size_left = 0;
+       if (ctx->vm && ctx->total_mem) {
+               ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+               ctx->total_mem = 0;
+               ctx->vm_size_left = 0;
+       }
 }
 
 /**
index b57e329..3b396fe 100644 (file)
 #include <drm/ttm/ttm_execbuf_util.h>
 
 /**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+       int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+       void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+       size_t gran;
+};
+
+/**
  * struct vmw_validation_context - Per command submission validation context
  * @ht: Hash table used to find resource- or buffer object duplicates
  * @resource_list: List head for resource validation metadata
  * buffer objects
  * @mem_size_left: Free memory left in the last page in @page_list
  * @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
  */
 struct vmw_validation_context {
        struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
        unsigned int merge_dups;
        unsigned int mem_size_left;
        u8 *page_address;
+       struct vmw_validation_mem *vm;
+       size_t vm_size_left;
+       size_t total_mem;
 };
 
 struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
 }
 
 /**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+                          struct vmw_validation_mem *vm)
+{
+       ctx->vm = vm;
+}
+
+/**
  * vmw_validation_set_ht - Register a hash table for duplicate finding
  * @ctx: The validation context
  * @ht: Pointer to a hash table to use for duplicate finding
index ed35c9a..27519eb 100644 (file)
@@ -17,6 +17,9 @@
 #ifndef HID_IDS_H_FILE
 #define HID_IDS_H_FILE
 
+#define USB_VENDOR_ID_258A             0x258a
+#define USB_DEVICE_ID_258A_6A88                0x6a88
+
 #define USB_VENDOR_ID_3M               0x0596
 #define USB_DEVICE_ID_3M1968           0x0500
 #define USB_DEVICE_ID_3M2256           0x0502
 #define USB_VENDOR_ID_REALTEK          0x0bda
 #define USB_DEVICE_ID_REALTEK_READER   0x0152
 
+#define USB_VENDOR_ID_RETROUSB         0xf000
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD   0x0003
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT  0x00f1
+
 #define USB_VENDOR_ID_ROCCAT           0x1e7d
 #define USB_DEVICE_ID_ROCCAT_ARVO      0x30d4
 #define USB_DEVICE_ID_ROCCAT_ISKU      0x319c
index 1882a4a..98b059d 100644 (file)
@@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
 
 static const struct hid_device_id ite_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
index c85a799..94088c0 100644 (file)
@@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
index 97954f5..1c1a251 100644 (file)
@@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support"
 
 config HYPERV
        tristate "Microsoft Hyper-V client drivers"
-       depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+       depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
        select PARAVIRT
        help
          Select this option to run Linux as a Hyper-V client operating
index 283d184..d0ff656 100644 (file)
@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
 }
@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.current_read_index);
 }
@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.current_write_index);
 }
@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
 }
@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
 }
@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
 }
@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.current_read_index);
 }
@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.current_write_index);
 }
@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
 }
@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
 }
index 25d43c8..558de0b 100644 (file)
@@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
        struct net_device *cookie_ndev = cookie;
        bool match = false;
 
+       if (!rdma_ndev)
+               return false;
+
        rcu_read_lock();
        if (netif_is_bond_master(cookie_ndev) &&
            rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
index 9b20479..7e6d709 100644 (file)
@@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
        }
 
        /* allocate space for the counter values */
-       dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
+       dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
+                           GFP_KERNEL);
        if (!dd->cntrs)
                goto bail;
 
index 1401b6e..2b88234 100644 (file)
@@ -155,6 +155,8 @@ struct hfi1_ib_stats {
 extern struct hfi1_ib_stats hfi1_stats;
 extern const struct pci_error_handlers hfi1_pci_err_handler;
 
+extern int num_driver_cntrs;
+
 /*
  * First-cut criterion for "device is active" is
  * two thousand dwords combined Tx, Rx traffic per
index 6f3bc4d..1a01624 100644 (file)
@@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
        default:
                break;
        }
+
+       /*
+        * System latency between send and schedule is large enough that
+        * forcing call_send to true for piothreshold packets is necessary.
+        */
+       if (wqe->length <= piothreshold)
+               *call_send = true;
        return 0;
 }
 
index 48e11e5..a365089 100644 (file)
@@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
 static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
 static const char **dev_cntr_names;
 static const char **port_cntr_names;
-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
 static int num_dev_cntrs;
 static int num_port_cntrs;
 static int cntr_names_initialized;
index 61aab7c..45c421c 100644 (file)
@@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
 
        err = uverbs_get_flags32(&access, attrs,
                                 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
-                                IB_ACCESS_SUPPORTED);
+                                IB_ACCESS_LOCAL_WRITE |
+                                IB_ACCESS_REMOTE_WRITE |
+                                IB_ACCESS_REMOTE_READ);
        if (err)
                return err;
 
index 4ead8c0..7309fb6 100644 (file)
@@ -552,14 +552,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                        u64 io_virt, size_t bcnt, u32 *bytes_mapped)
 {
+       int npages = 0, current_seq, page_shift, ret, np;
+       bool implicit = false;
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        u64 access_mask = ODP_READ_ALLOWED_BIT;
-       int npages = 0, page_shift, np;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
-       int current_seq;
        size_t size;
-       int ret;
 
        if (!odp_mr->page_list) {
                odp = implicit_mr_get_data(mr, io_virt, bcnt);
@@ -567,7 +566,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                if (IS_ERR(odp))
                        return PTR_ERR(odp);
                mr = odp->private;
-
+               implicit = true;
        } else {
                odp = odp_mr;
        }
@@ -646,7 +645,7 @@ next_mr:
 
 out:
        if (ret == -EAGAIN) {
-               if (mr->parent || !odp->dying) {
+               if (implicit || !odp->dying) {
                        unsigned long timeout =
                                msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
 
index 5936de7..6fc9383 100644 (file)
@@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
        bool dirty_flag;
        *result = true;
 
+       if (from_cblock(cmd->cache_blocks) == 0)
+               /* Nothing to do */
+               return 0;
+
        r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
                                   from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
        if (r) {
index 0bd8d49..dadd969 100644 (file)
@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
 struct dm_thin_new_mapping;
 
 /*
- * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
+ * The pool runs in various modes.  Ordered in degraded order for comparisons.
  */
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
@@ -282,9 +282,38 @@ struct pool {
        mempool_t mapping_pool;
 };
 
-static enum pool_mode get_pool_mode(struct pool *pool);
 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
 
+static enum pool_mode get_pool_mode(struct pool *pool)
+{
+       return pool->pf.mode;
+}
+
+static void notify_of_pool_mode_change(struct pool *pool)
+{
+       const char *descs[] = {
+               "write",
+               "out-of-data-space",
+               "read-only",
+               "read-only",
+               "fail"
+       };
+       const char *extra_desc = NULL;
+       enum pool_mode mode = get_pool_mode(pool);
+
+       if (mode == PM_OUT_OF_DATA_SPACE) {
+               if (!pool->pf.error_if_no_space)
+                       extra_desc = " (queue IO)";
+               else
+                       extra_desc = " (error IO)";
+       }
+
+       dm_table_event(pool->ti->table);
+       DMINFO("%s: switching pool to %s%s mode",
+              dm_device_name(pool->pool_md),
+              descs[(int)mode], extra_desc ? : "");
+}
+
 /*
  * Target context for a pool.
  */
@@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
 }
 
-static void notify_of_pool_mode_change_to_oods(struct pool *pool);
-
 /*
  * We're holding onto IO to allow userland time to react.  After the
  * timeout either the pool will have been resized (and thus back in
@@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
 
        if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
                pool->pf.error_if_no_space = true;
-               notify_of_pool_mode_change_to_oods(pool);
+               notify_of_pool_mode_change(pool);
                error_retry_list_with_code(pool, BLK_STS_NOSPC);
        }
 }
@@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
 
 /*----------------------------------------------------------------*/
 
-static enum pool_mode get_pool_mode(struct pool *pool)
-{
-       return pool->pf.mode;
-}
-
-static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
-{
-       dm_table_event(pool->ti->table);
-       DMINFO("%s: switching pool to %s mode",
-              dm_device_name(pool->pool_md), new_mode);
-}
-
-static void notify_of_pool_mode_change_to_oods(struct pool *pool)
-{
-       if (!pool->pf.error_if_no_space)
-               notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
-       else
-               notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
-}
-
 static bool passdown_enabled(struct pool_c *pt)
 {
        return pt->adjusted_pf.discard_passdown;
@@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 
        switch (new_mode) {
        case PM_FAIL:
-               if (old_mode != new_mode)
-                       notify_of_pool_mode_change(pool, "failure");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_fail;
                pool->process_discard = process_bio_fail;
@@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 
        case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (!is_read_only_pool_mode(old_mode))
-                       notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
                pool->process_discard = process_bio_success;
@@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                 * alarming rate.  Adjust your low water mark if you're
                 * frequently seeing this mode.
                 */
-               if (old_mode != new_mode)
-                       notify_of_pool_mode_change_to_oods(pool);
                pool->out_of_data_space = true;
                pool->process_bio = process_bio_read_only;
                pool->process_discard = process_discard_bio;
@@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                break;
 
        case PM_WRITE:
-               if (old_mode != new_mode)
-                       notify_of_pool_mode_change(pool, "write");
                if (old_mode == PM_OUT_OF_DATA_SPACE)
                        cancel_delayed_work_sync(&pool->no_space_timeout);
                pool->out_of_data_space = false;
@@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
         * doesn't cause an unexpected mode transition on resume.
         */
        pt->adjusted_pf.mode = new_mode;
+
+       if (old_mode != new_mode)
+               notify_of_pool_mode_change(pool);
 }
 
 static void abort_transaction(struct pool *pool)
@@ -4023,7 +4025,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 20, 0},
+       .version = {1, 21, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 20, 0},
+       .version = {1, 21, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
index 981154e..6af5bab 100644 (file)
@@ -20,7 +20,6 @@ struct dmz_bioctx {
        struct dm_zone          *zone;
        struct bio              *bio;
        refcount_t              ref;
-       blk_status_t            status;
 };
 
 /*
@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
 {
        struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
 
-       if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
-               bioctx->status = status;
-       bio_endio(bio);
+       if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
+               bio->bi_status = status;
+
+       if (refcount_dec_and_test(&bioctx->ref)) {
+               struct dm_zone *zone = bioctx->zone;
+
+               if (zone) {
+                       if (bio->bi_status != BLK_STS_OK &&
+                           bio_op(bio) == REQ_OP_WRITE &&
+                           dmz_is_seq(zone))
+                               set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+                       dmz_deactivate_zone(zone);
+               }
+               bio_endio(bio);
+       }
 }
 
 /*
- * Partial clone read BIO completion callback. This terminates the
+ * Completion callback for an internally cloned target BIO. This terminates the
  * target BIO when there are no more references to its context.
  */
-static void dmz_read_bio_end_io(struct bio *bio)
+static void dmz_clone_endio(struct bio *clone)
 {
-       struct dmz_bioctx *bioctx = bio->bi_private;
-       blk_status_t status = bio->bi_status;
+       struct dmz_bioctx *bioctx = clone->bi_private;
+       blk_status_t status = clone->bi_status;
 
-       bio_put(bio);
+       bio_put(clone);
        dmz_bio_endio(bioctx->bio, status);
 }
 
 /*
- * Issue a BIO to a zone. The BIO may only partially process the
+ * Issue a clone of a target BIO. The clone may only partially process the
  * original target BIO.
  */
-static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
-                              struct bio *bio, sector_t chunk_block,
-                              unsigned int nr_blocks)
+static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
+                         struct bio *bio, sector_t chunk_block,
+                         unsigned int nr_blocks)
 {
        struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-       sector_t sector;
        struct bio *clone;
 
-       /* BIO remap sector */
-       sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
-
-       /* If the read is not partial, there is no need to clone the BIO */
-       if (nr_blocks == dmz_bio_blocks(bio)) {
-               /* Setup and submit the BIO */
-               bio->bi_iter.bi_sector = sector;
-               refcount_inc(&bioctx->ref);
-               generic_make_request(bio);
-               return 0;
-       }
-
-       /* Partial BIO: we need to clone the BIO */
        clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
        if (!clone)
                return -ENOMEM;
 
-       /* Setup the clone */
-       clone->bi_iter.bi_sector = sector;
+       bio_set_dev(clone, dmz->dev->bdev);
+       clone->bi_iter.bi_sector =
+               dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
        clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
-       clone->bi_end_io = dmz_read_bio_end_io;
+       clone->bi_end_io = dmz_clone_endio;
        clone->bi_private = bioctx;
 
        bio_advance(bio, clone->bi_iter.bi_size);
 
-       /* Submit the clone */
        refcount_inc(&bioctx->ref);
        generic_make_request(clone);
 
+       if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+               zone->wp_block += nr_blocks;
+
        return 0;
 }
 
@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
                if (nr_blocks) {
                        /* Valid blocks found: read them */
                        nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
-                       ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+                       ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
                        if (ret)
                                return ret;
                        chunk_block += nr_blocks;
@@ -229,25 +229,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
 }
 
 /*
- * Issue a write BIO to a zone.
- */
-static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
-                                struct bio *bio, sector_t chunk_block,
-                                unsigned int nr_blocks)
-{
-       struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
-       /* Setup and submit the BIO */
-       bio_set_dev(bio, dmz->dev->bdev);
-       bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
-       refcount_inc(&bioctx->ref);
-       generic_make_request(bio);
-
-       if (dmz_is_seq(zone))
-               zone->wp_block += nr_blocks;
-}
-
-/*
  * Write blocks directly in a data zone, at the write pointer.
  * If a buffer zone is assigned, invalidate the blocks written
  * in place.
@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
                return -EROFS;
 
        /* Submit write */
-       dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
+       ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
+       if (ret)
+               return ret;
 
        /*
         * Validate the blocks in the data zone and invalidate
@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
                return -EROFS;
 
        /* Submit write */
-       dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+       ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+       if (ret)
+               return ret;
 
        /*
         * Validate the blocks in the buffer zone
@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
        bioctx->zone = NULL;
        bioctx->bio = bio;
        refcount_set(&bioctx->ref, 1);
-       bioctx->status = BLK_STS_OK;
 
        /* Set the BIO pending in the flush list */
        if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@@ -624,35 +608,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
 }
 
 /*
- * Completed target BIO processing.
- */
-static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
-{
-       struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
-       if (bioctx->status == BLK_STS_OK && *error)
-               bioctx->status = *error;
-
-       if (!refcount_dec_and_test(&bioctx->ref))
-               return DM_ENDIO_INCOMPLETE;
-
-       /* Done */
-       bio->bi_status = bioctx->status;
-
-       if (bioctx->zone) {
-               struct dm_zone *zone = bioctx->zone;
-
-               if (*error && bio_op(bio) == REQ_OP_WRITE) {
-                       if (dmz_is_seq(zone))
-                               set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
-               }
-               dmz_deactivate_zone(zone);
-       }
-
-       return DM_ENDIO_DONE;
-}
-
-/*
  * Get zoned device information.
  */
 static int dmz_get_zoned_device(struct dm_target *ti, char *path)
@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
        .ctr             = dmz_ctr,
        .dtr             = dmz_dtr,
        .map             = dmz_map,
-       .end_io          = dmz_end_io,
        .io_hints        = dmz_io_hints,
        .prepare_ioctl   = dmz_prepare_ioctl,
        .postsuspend     = dmz_suspend,
index c510179..63a7c41 100644 (file)
@@ -1593,6 +1593,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                return ret;
        }
 
+       blk_queue_split(md->queue, &bio);
+
        init_clone_info(&ci, md, map, bio);
 
        if (bio->bi_opf & REQ_PREFLUSH) {
index 8add62a..102eb35 100644 (file)
@@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB
 
          This is currently experimental.
 
+config MEDIA_CONTROLLER_REQUEST_API
+       bool "Enable Media controller Request API (EXPERIMENTAL)"
+       depends on MEDIA_CONTROLLER && STAGING_MEDIA
+       default n
+       ---help---
+         DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+
+         This option enables the Request API for the Media controller and V4L2
+         interfaces. It is currently needed by a few stateless codec drivers.
+
+         There is currently no intention to provide API or ABI stability for
+         this new API as of yet.
+
 #
 # Video4Linux support
 #      Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
index 975ff56..8ff8722 100644 (file)
@@ -947,7 +947,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        }
        atomic_dec(&q->owned_by_drv_count);
 
-       if (vb->req_obj.req) {
+       if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
                /* This is not supported at the moment */
                WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
                media_request_object_unbind(&vb->req_obj);
@@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj)
 {
        struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
 
-       if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+       if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
                vb->state = VB2_BUF_STATE_DEQUEUED;
+               if (vb->request)
+                       media_request_put(vb->request);
+               vb->request = NULL;
+       }
 }
 
 static const struct media_request_object_ops vb2_core_req_ops = {
@@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
                        return ret;
 
                vb->state = VB2_BUF_STATE_IN_REQUEST;
+
+               /*
+                * Increment the refcount and store the request.
+                * The request refcount is decremented again when the
+                * buffer is dequeued. This is to prevent vb2_buffer_done()
+                * from freeing the request from interrupt context, which can
+                * happen if the application closed the request fd after
+                * queueing the request.
+                */
+               media_request_get(req);
+               vb->request = req;
+
                /* Fill buffer information for the userspace */
                if (pb) {
                        call_void_bufop(q, copy_timestamp, vb, pb);
@@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
                        call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
                        vb->planes[i].dbuf_mapped = 0;
                }
-       if (vb->req_obj.req) {
-               media_request_object_unbind(&vb->req_obj);
-               media_request_object_put(&vb->req_obj);
-       }
        call_void_bufop(q, init_buffer, vb);
 }
 
@@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
        /* go back to dequeued state */
        __vb2_dqbuf(vb);
 
+       if (WARN_ON(vb->req_obj.req)) {
+               media_request_object_unbind(&vb->req_obj);
+               media_request_object_put(&vb->req_obj);
+       }
+       if (vb->request)
+               media_request_put(vb->request);
+       vb->request = NULL;
+
        dprintk(2, "dqbuf of buffer %d, with state %d\n",
                        vb->index, vb->state);
 
@@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
                        vb->prepared = false;
                }
                __vb2_dqbuf(vb);
+
+               if (vb->req_obj.req) {
+                       media_request_object_unbind(&vb->req_obj);
+                       media_request_object_put(&vb->req_obj);
+               }
+               if (vb->request)
+                       media_request_put(vb->request);
+               vb->request = NULL;
        }
 }
 
@@ -1940,10 +1968,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
                if (ret)
                        return ret;
                ret = vb2_start_streaming(q);
-               if (ret) {
-                       __vb2_queue_cancel(q);
+               if (ret)
                        return ret;
-               }
        }
 
        q->streaming = 1;
index a17033a..1d35aea 100644 (file)
@@ -333,10 +333,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
 }
 
 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-                                   struct v4l2_buffer *b,
-                                   const char *opname,
+                                   struct v4l2_buffer *b, bool is_prepare,
                                    struct media_request **p_req)
 {
+       const char *opname = is_prepare ? "prepare_buf" : "qbuf";
        struct media_request *req;
        struct vb2_v4l2_buffer *vbuf;
        struct vb2_buffer *vb;
@@ -378,6 +378,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
                        return ret;
        }
 
+       if (is_prepare)
+               return 0;
+
        if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
                if (q->uses_requests) {
                        dprintk(1, "%s: queue uses requests\n", opname);
@@ -631,8 +634,10 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
                *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
        if (q->io_modes & VB2_DMABUF)
                *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
        if (q->supports_requests)
                *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+#endif
 }
 
 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
@@ -657,7 +662,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
        if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
                return -EINVAL;
 
-       ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
+       ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
 
        return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
 }
@@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
                return -EBUSY;
        }
 
-       ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
+       ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
        if (ret)
                return ret;
        ret = vb2_core_qbuf(q, b->index, b, req);
index bed2437..b8ec886 100644 (file)
@@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
 static long media_device_request_alloc(struct media_device *mdev,
                                       int *alloc_fd)
 {
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
        if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
                return -ENOTTY;
 
        return media_request_alloc(mdev, alloc_fd);
+#else
+       return -ENOTTY;
+#endif
 }
 
 static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
index 013cdeb..13fb69c 100644 (file)
@@ -997,11 +997,18 @@ static int vicodec_start_streaming(struct vb2_queue *q,
 
        q_data->sequence = 0;
 
-       if (!V4L2_TYPE_IS_OUTPUT(q->type))
+       if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+               if (!ctx->is_enc) {
+                       state->width = q_data->width;
+                       state->height = q_data->height;
+               }
                return 0;
+       }
 
-       state->width = q_data->width;
-       state->height = q_data->height;
+       if (ctx->is_enc) {
+               state->width = q_data->width;
+               state->height = q_data->height;
+       }
        state->ref_frame.width = state->ref_frame.height = 0;
        state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div,
                                         GFP_KERNEL);
index dcdc80e..9acc709 100644 (file)
@@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_sdr_cap);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 903cebe..d666271 100644 (file)
@@ -204,8 +204,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vbi_cap);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 9357c07..cd56476 100644 (file)
@@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vbi_out);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 9c8e8be..673772c 100644 (file)
@@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vid_cap);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index aaf13f0..628eae1 100644 (file)
@@ -162,8 +162,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vid_out);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 0b18f0b..8b0a263 100644 (file)
@@ -95,7 +95,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
        format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
                                            LIF_PAD_SOURCE);
 
-       switch (entity->vsp1->version & VI6_IP_VERSION_SOC_MASK) {
+       switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
        case VI6_IP_VERSION_MODEL_VSPD_GEN2:
        case VI6_IP_VERSION_MODEL_VSPD_V2H:
                hbth = 1536;
index 5f2b033..10b8d94 100644 (file)
@@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
        u64 offset;
        s64 val;
 
-       switch (ctrl->type) {
+       switch ((u32)ctrl->type) {
        case V4L2_CTRL_TYPE_INTEGER:
                return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
        case V4L2_CTRL_TYPE_INTEGER64:
@@ -2232,7 +2232,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
        is_array = nr_of_dims > 0;
 
        /* Prefill elem_size for all types handled by std_type_ops */
-       switch (type) {
+       switch ((u32)type) {
        case V4L2_CTRL_TYPE_INTEGER64:
                elem_size = sizeof(s64);
                break;
index c35b5b0..1119348 100644 (file)
@@ -472,7 +472,7 @@ out:
 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
                               struct mmc_blk_ioc_data *idata)
 {
-       struct mmc_command cmd = {};
+       struct mmc_command cmd = {}, sbc = {};
        struct mmc_data data = {};
        struct mmc_request mrq = {};
        struct scatterlist sg;
@@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
        }
 
        if (idata->rpmb) {
-               err = mmc_set_blockcount(card, data.blocks,
-                       idata->ic.write_flag & (1 << 31));
-               if (err)
-                       return err;
+               sbc.opcode = MMC_SET_BLOCK_COUNT;
+               /*
+                * We don't do any blockcount validation because the max size
+                * may be increased by a future standard. We just copy the
+                * 'Reliable Write' bit here.
+                */
+               sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+               sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+               mrq.sbc = &sbc;
        }
 
        if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
index bc1bd2c..55997cf 100644 (file)
@@ -30,6 +30,7 @@
 #include "pwrseq.h"
 
 #define DEFAULT_CMD6_TIMEOUT_MS        500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
 
 static const unsigned int tran_exp[] = {
        10000,          100000,         1000000,        10000000,
@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        card->cid.year += 16;
 
                /* check whether the eMMC card supports BKOPS */
-               if (!mmc_card_broken_hpi(card) &&
-                   ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+               if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
                        card->ext_csd.bkops = 1;
                        card->ext_csd.man_bkops_en =
                                        (ext_csd[EXT_CSD_BKOPS_EN] &
@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                if (err) {
                        pr_warn("%s: Enabling HPI failed\n",
                                mmc_hostname(card->host));
+                       card->ext_csd.hpi_en = 0;
                        err = 0;
-               } else
+               } else {
                        card->ext_csd.hpi_en = 1;
+               }
        }
 
        /*
-        * If cache size is higher than 0, this indicates
-        * the existence of cache and it can be turned on.
+        * If cache size is higher than 0, this indicates the existence of cache
+        * and it can be turned on. Note that some eMMCs from Micron has been
+        * reported to need ~800 ms timeout, while enabling the cache after
+        * sudden power failure tests. Let's extend the timeout to a minimum of
+        * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
         */
-       if (!mmc_card_broken_hpi(card) &&
-           card->ext_csd.cache_size > 0) {
+       if (card->ext_csd.cache_size > 0) {
+               unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+               timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                               EXT_CSD_CACHE_CTRL, 1,
-                               card->ext_csd.generic_cmd6_time);
+                               EXT_CSD_CACHE_CTRL, 1, timeout_ms);
                if (err && err != -EBADMSG)
                        goto free_card;
 
index adf3268..c60a762 100644 (file)
@@ -104,6 +104,7 @@ struct mmc_omap_slot {
        unsigned int            vdd;
        u16                     saved_con;
        u16                     bus_mode;
+       u16                     power_mode;
        unsigned int            fclk_freq;
 
        struct tasklet_struct   cover_tasklet;
@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        struct mmc_omap_slot *slot = mmc_priv(mmc);
        struct mmc_omap_host *host = slot->host;
        int i, dsor;
-       int clk_enabled;
+       int clk_enabled, init_stream;
 
        mmc_omap_select_slot(slot, 0);
 
@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                slot->vdd = ios->vdd;
 
        clk_enabled = 0;
+       init_stream = 0;
        switch (ios->power_mode) {
        case MMC_POWER_OFF:
                mmc_omap_set_power(slot, 0, ios->vdd);
@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        case MMC_POWER_UP:
                /* Cannot touch dsor yet, just power up MMC */
                mmc_omap_set_power(slot, 1, ios->vdd);
+               slot->power_mode = ios->power_mode;
                goto exit;
        case MMC_POWER_ON:
                mmc_omap_fclk_enable(host, 1);
                clk_enabled = 1;
                dsor |= 1 << 11;
+               if (slot->power_mode != MMC_POWER_ON)
+                       init_stream = 1;
                break;
        }
+       slot->power_mode = ios->power_mode;
 
        if (slot->bus_mode != ios->bus_mode) {
                if (slot->pdata->set_bus_mode != NULL)
@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        for (i = 0; i < 2; i++)
                OMAP_MMC_WRITE(host, CON, dsor);
        slot->saved_con = dsor;
-       if (ios->power_mode == MMC_POWER_ON) {
+       if (init_stream) {
                /* worst case at 400kHz, 80 cycles makes 200 microsecs */
                int usecs = 250;
 
@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
        slot->host = host;
        slot->mmc = mmc;
        slot->id = id;
+       slot->power_mode = MMC_POWER_UNDEFINED;
        slot->pdata = &host->pdata->slots[id];
 
        host->slots[id] = slot;
index 467d889..3f4ea8f 100644 (file)
@@ -1909,7 +1909,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
        mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
        mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
-       mmc->max_seg_size = mmc->max_req_size;
 
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
                     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -1939,6 +1938,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
+       /*
+        * Limit the maximum segment size to the lower of the request size
+        * and the DMA engine device segment size limits.  In reality, with
+        * 32-bit transfers, the DMA engine can do longer segments than this
+        * but there is no way to represent that in the DMA model - if we
+        * increase this figure here, we get warnings from the DMA API debug.
+        */
+       mmc->max_seg_size = min3(mmc->max_req_size,
+                       dma_get_max_seg_size(host->rx_chan->device->dev),
+                       dma_get_max_seg_size(host->tx_chan->device->dev));
+
        /* Request IRQ for MMC operations */
        ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
                        mmc_hostname(mmc), host);
index 88347ce..d264391 100644 (file)
@@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
        struct device *dev = omap_host->dev;
        struct mmc_ios *ios = &mmc->ios;
        u32 start_window = 0, max_window = 0;
+       bool dcrc_was_enabled = false;
        u8 cur_match, prev_match = 0;
        u32 length = 0, max_len = 0;
-       u32 ier = host->ier;
        u32 phase_delay = 0;
        int ret = 0;
        u32 reg;
@@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * during the tuning procedure. So disable it during the
         * tuning procedure.
         */
-       ier &= ~SDHCI_INT_DATA_CRC;
-       sdhci_writel(host, ier, SDHCI_INT_ENABLE);
-       sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+       if (host->ier & SDHCI_INT_DATA_CRC) {
+               host->ier &= ~SDHCI_INT_DATA_CRC;
+               dcrc_was_enabled = true;
+       }
 
        while (phase_delay <= MAX_PHASE_DELAY) {
                sdhci_omap_set_dll(omap_host, phase_delay);
@@ -366,6 +367,9 @@ tuning_error:
 
 ret:
        sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+       /* Reenable forbidden interrupt */
+       if (dcrc_was_enabled)
+               host->ier |= SDHCI_INT_DATA_CRC;
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
        return ret;
index 7b95d08..e6ace31 100644 (file)
@@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
-                       &autocal->pull_up_3v3);
+                       &autocal->pull_up_3v3_timeout);
        if (err)
                autocal->pull_up_3v3_timeout = 0;
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
-                       &autocal->pull_down_3v3);
+                       &autocal->pull_down_3v3_timeout);
        if (err)
                autocal->pull_down_3v3_timeout = 0;
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
-                       &autocal->pull_up_1v8);
+                       &autocal->pull_up_1v8_timeout);
        if (err)
                autocal->pull_up_1v8_timeout = 0;
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
-                       &autocal->pull_down_1v8);
+                       &autocal->pull_down_1v8_timeout);
        if (err)
                autocal->pull_down_1v8_timeout = 0;
 
index 99bdae5..df05352 100644 (file)
@@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
 {
        u16 ctrl2;
 
-       ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
+       ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
        if (ctrl2 & SDHCI_CTRL_V4_MODE)
                return;
 
        ctrl2 |= SDHCI_CTRL_V4_MODE;
-       sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
+       sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
 }
 
 /*
@@ -216,8 +216,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
        timeout = ktime_add_ms(ktime_get(), 100);
 
        /* hw clears the bit when it's done */
-       while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
-               if (ktime_after(ktime_get(), timeout)) {
+       while (1) {
+               bool timedout = ktime_after(ktime_get(), timeout);
+
+               if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
+                       break;
+               if (timedout) {
                        pr_err("%s: Reset 0x%x never completed.\n",
                                mmc_hostname(host->mmc), (int)mask);
                        sdhci_dumpregs(host);
@@ -1608,9 +1612,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
 
        /* Wait max 20 ms */
        timeout = ktime_add_ms(ktime_get(), 20);
-       while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
-               & SDHCI_CLOCK_INT_STABLE)) {
-               if (ktime_after(ktime_get(), timeout)) {
+       while (1) {
+               bool timedout = ktime_after(ktime_get(), timeout);
+
+               clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+               if (clk & SDHCI_CLOCK_INT_STABLE)
+                       break;
+               if (timedout) {
                        pr_err("%s: Internal clock never stabilised.\n",
                               mmc_hostname(host->mmc));
                        sdhci_dumpregs(host);
index 0f749d1..8a517d8 100644 (file)
@@ -1124,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
        u16 *p = _p;
        int i;
 
-       regs->version = 0;
+       regs->version = chip->info->prod_num;
 
        memset(p, 0xff, 32 * sizeof(u16));
 
index 3b889ef..50dd6bf 100644 (file)
@@ -29,9 +29,6 @@
 #define RES_RING_CSR   1
 #define RES_RING_CMD   2
 
-static const struct of_device_id xgene_enet_of_match[];
-static const struct acpi_device_id xgene_enet_acpi_match[];
-
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
        struct xgene_enet_raw_desc16 *raw_desc;
index 0de487a..5cd3135 100644 (file)
@@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
        BNX2X_SP_RTNL_TX_STOP,
        BNX2X_SP_RTNL_GET_DRV_VERSION,
        BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+       BNX2X_SP_RTNL_UPDATE_SVID,
 };
 
 enum bnx2x_iov_flag {
@@ -2520,6 +2521,7 @@ void bnx2x_update_mfw_dump(struct bnx2x *bp);
 void bnx2x_init_ptp(struct bnx2x *bp);
 int bnx2x_configure_ptp_filters(struct bnx2x *bp);
 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+void bnx2x_register_phc(struct bnx2x *bp);
 
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
index 686899d..ecb1bd7 100644 (file)
@@ -2842,6 +2842,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        bnx2x_set_rx_mode_inner(bp);
 
        if (bp->flags & PTP_SUPPORTED) {
+               bnx2x_register_phc(bp);
                bnx2x_init_ptp(bp);
                bnx2x_configure_ptp_filters(bp);
        }
index 95309b2..b164f70 100644 (file)
@@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
+       /* Prepare parameters for function state transitions */
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
        if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
                int func = BP_ABS_FUNC(bp);
                u32 val;
@@ -4311,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                                bnx2x_handle_eee_event(bp);
 
                        if (val & DRV_STATUS_OEM_UPDATE_SVID)
-                               bnx2x_handle_update_svid_cmd(bp);
+                               bnx2x_schedule_sp_rtnl(bp,
+                                       BNX2X_SP_RTNL_UPDATE_SVID, 0);
 
                        if (bp->link_vars.periodic_flags &
                            PERIODIC_FLAGS_LINK_EVENT) {
@@ -7723,6 +7728,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                REG_WR(bp, reg_addr, val);
        }
 
+       if (CHIP_IS_E3B0(bp))
+               bp->flags |= PTP_SUPPORTED;
+
        return 0;
 }
 
@@ -8472,6 +8480,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
        /* Fill a user request section if needed */
        if (!test_bit(RAMROD_CONT, ramrod_flags)) {
                ramrod_param.user_req.u.vlan.vlan = vlan;
+               __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
                /* Set the command: ADD or DEL */
                if (set)
                        ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8492,6 +8501,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
        return rc;
 }
 
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+       unsigned long ramrod_flags = 0, vlan_flags = 0;
+       struct bnx2x_vlan_entry *vlan;
+       int rc;
+
+       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       __set_bit(BNX2X_VLAN, &vlan_flags);
+       rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+       if (rc)
+               return rc;
+
+       /* Mark that hw forgot all entries */
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               vlan->hw = false;
+       bp->vlan_cnt = 0;
+
+       return 0;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
                       struct bnx2x_vlan_mac_obj *mac_obj,
                       int mac_type, bool wait_for_comp)
@@ -9330,6 +9360,11 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
                BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
                          rc);
 
+       /* Remove all currently configured VLANs */
+       rc = bnx2x_del_all_vlans(bp);
+       if (rc < 0)
+               BNX2X_ERR("Failed to delete all VLANs\n");
+
        /* Disable LLH */
        if (!CHIP_IS_E1(bp))
                REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -9417,8 +9452,13 @@ unload_error:
         * function stop ramrod is sent, since as part of this ramrod FW access
         * PTP registers.
         */
-       if (bp->flags & PTP_SUPPORTED)
+       if (bp->flags & PTP_SUPPORTED) {
                bnx2x_stop_ptp(bp);
+               if (bp->ptp_clock) {
+                       ptp_clock_unregister(bp->ptp_clock);
+                       bp->ptp_clock = NULL;
+               }
+       }
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
@@ -10359,6 +10399,9 @@ sp_rtnl_not_reset:
                               &bp->sp_rtnl_state))
                bnx2x_update_mng_version(bp);
 
+       if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+               bnx2x_handle_update_svid_cmd(bp);
+
        if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
                               &bp->sp_rtnl_state)) {
                if (bnx2x_udp_port_update(bp)) {
@@ -11750,8 +11793,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
         * If maximum allowed number of connections is zero -
         * disable the feature.
         */
-       if (!bp->cnic_eth_dev.max_fcoe_conn)
+       if (!bp->cnic_eth_dev.max_fcoe_conn) {
                bp->flags |= NO_FCOE_FLAG;
+               eth_zero_addr(bp->fip_mac);
+       }
 }
 
 static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -12494,9 +12539,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        bp->dump_preset_idx = 1;
 
-       if (CHIP_IS_E3B0(bp))
-               bp->flags |= PTP_SUPPORTED;
-
        return rc;
 }
 
@@ -13024,13 +13066,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
 
 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
 {
-       struct bnx2x_vlan_entry *vlan;
-
-       /* The hw forgot all entries after reload */
-       list_for_each_entry(vlan, &bp->vlan_reg, link)
-               vlan->hw = false;
-       bp->vlan_cnt = 0;
-
        /* Don't set rx mode here. Our caller will do it. */
        bnx2x_vlan_configure(bp, false);
 
@@ -13895,7 +13930,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
        return -ENOTSUPP;
 }
 
-static void bnx2x_register_phc(struct bnx2x *bp)
+void bnx2x_register_phc(struct bnx2x *bp)
 {
        /* Fill the ptp_clock_info struct and register PTP clock*/
        bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14097,8 +14132,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
               dev->base_addr, bp->pdev->irq, dev->dev_addr);
        pcie_print_link_status(bp->pdev);
 
-       bnx2x_register_phc(bp);
-
        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
 
@@ -14131,11 +14164,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
                           struct bnx2x *bp,
                           bool remove_netdev)
 {
-       if (bp->ptp_clock) {
-               ptp_clock_unregister(bp->ptp_clock);
-               bp->ptp_clock = NULL;
-       }
-
        /* Delete storage MAC address */
        if (!NO_FCOE(bp)) {
                rtnl_lock();
index 0bf2fd4..7a6e82d 100644 (file)
@@ -265,6 +265,7 @@ enum {
        BNX2X_ETH_MAC,
        BNX2X_ISCSI_ETH_MAC,
        BNX2X_NETQ_ETH_MAC,
+       BNX2X_VLAN,
        BNX2X_DONT_CONSUME_CAM_CREDIT,
        BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
 };
@@ -272,7 +273,8 @@ enum {
 #define BNX2X_VLAN_MAC_CMP_MASK        (1 << BNX2X_UC_LIST_MAC | \
                                 1 << BNX2X_ETH_MAC | \
                                 1 << BNX2X_ISCSI_ETH_MAC | \
-                                1 << BNX2X_NETQ_ETH_MAC)
+                                1 << BNX2X_NETQ_ETH_MAC | \
+                                1 << BNX2X_VLAN)
 #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
        ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
 
index 9977757..adabbe9 100644 (file)
@@ -2727,6 +2727,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 static int bnxt_run_loopback(struct bnxt *bp)
 {
        struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+       struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
        struct bnxt_cp_ring_info *cpr;
        int pkt_size, i = 0;
        struct sk_buff *skb;
@@ -2734,7 +2735,9 @@ static int bnxt_run_loopback(struct bnxt *bp)
        u8 *data;
        int rc;
 
-       cpr = &txr->bnapi->cp_ring;
+       cpr = &rxr->bnapi->cp_ring;
+       if (bp->flags & BNXT_FLAG_CHIP_P5)
+               cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
        pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
        skb = netdev_alloc_skb(bp->dev, pkt_size);
        if (!skb)
index d9a208f..b126926 100644 (file)
@@ -61,7 +61,8 @@
 #define MACB_TX_ERR_FLAGS      (MACB_BIT(ISR_TUND)                     \
                                        | MACB_BIT(ISR_RLE)             \
                                        | MACB_BIT(TXERR))
-#define MACB_TX_INT_FLAGS      (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+#define MACB_TX_INT_FLAGS      (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)    \
+                                       | MACB_BIT(TXUBR))
 
 /* Max length of transmit frame must be a multiple of 8 bytes */
 #define MACB_TX_LEN_ALIGN      8
@@ -680,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
        if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
                desc_64 = macb_64b_desc(bp, desc);
                desc_64->addrh = upper_32_bits(addr);
+               /* The low bits of RX address contain the RX_USED bit, clearing
+                * of which allows packet RX. Make sure the high bits are also
+                * visible to HW at that point.
+                */
+               dma_wmb();
        }
 #endif
        desc->addr = lower_32_bits(addr);
@@ -928,14 +934,19 @@ static void gem_rx_refill(struct macb_queue *queue)
 
                        if (entry == bp->rx_ring_size - 1)
                                paddr |= MACB_BIT(RX_WRAP);
-                       macb_set_addr(bp, desc, paddr);
                        desc->ctrl = 0;
+                       /* Setting addr clears RX_USED and allows reception,
+                        * make sure ctrl is cleared first to avoid a race.
+                        */
+                       dma_wmb();
+                       macb_set_addr(bp, desc, paddr);
 
                        /* properly align Ethernet header */
                        skb_reserve(skb, NET_IP_ALIGN);
                } else {
-                       desc->addr &= ~MACB_BIT(RX_USED);
                        desc->ctrl = 0;
+                       dma_wmb();
+                       desc->addr &= ~MACB_BIT(RX_USED);
                }
        }
 
@@ -989,11 +1000,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
 
                rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
                addr = macb_get_addr(bp, desc);
-               ctrl = desc->ctrl;
 
                if (!rxused)
                        break;
 
+               /* Ensure ctrl is at least as up-to-date as rxused */
+               dma_rmb();
+
+               ctrl = desc->ctrl;
+
                queue->rx_tail++;
                count++;
 
@@ -1168,11 +1183,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               ctrl = desc->ctrl;
-
                if (!(desc->addr & MACB_BIT(RX_USED)))
                        break;
 
+               /* Ensure ctrl is at least as up-to-date as addr */
+               dma_rmb();
+
+               ctrl = desc->ctrl;
+
                if (ctrl & MACB_BIT(RX_SOF)) {
                        if (first_frag != -1)
                                discard_partial_frame(queue, first_frag, tail);
@@ -1312,6 +1330,21 @@ static void macb_hresp_error_task(unsigned long data)
        netif_tx_start_all_queues(dev);
 }
 
+static void macb_tx_restart(struct macb_queue *queue)
+{
+       unsigned int head = queue->tx_head;
+       unsigned int tail = queue->tx_tail;
+       struct macb *bp = queue->bp;
+
+       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+               queue_writel(queue, ISR, MACB_BIT(TXUBR));
+
+       if (head == tail)
+               return;
+
+       macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
        struct macb_queue *queue = dev_id;
@@ -1369,6 +1402,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                if (status & MACB_BIT(TCOMP))
                        macb_tx_interrupt(queue);
 
+               if (status & MACB_BIT(TXUBR))
+                       macb_tx_restart(queue);
+
                /* Link change detection isn't possible with RMII, so we'll
                 * add that if/when we get our hands on a full-blown MII PHY.
                 */
index cd5296b..a6dc47e 100644 (file)
@@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
        desc_ptp = macb_ptp_desc(queue->bp, desc);
        tx_timestamp = &queue->tx_timestamps[head];
        tx_timestamp->skb = skb;
+       /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
+       dma_rmb();
        tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
        tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
        /* move head */
index f152da1..c62a0c8 100644 (file)
@@ -1453,6 +1453,9 @@ struct cpl_tx_data {
 #define T6_TX_FORCE_V(x)       ((x) << T6_TX_FORCE_S)
 #define T6_TX_FORCE_F          T6_TX_FORCE_V(1U)
 
+#define TX_URG_S    16
+#define TX_URG_V(x) ((x) << TX_URG_S)
+
 #define TX_SHOVE_S    14
 #define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
 
index b52029e..ad1779f 100644 (file)
@@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
 
        hns_ae_ring_enable_all(handle, 0);
 
+       /* clean rx fbd. */
+       hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
        (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
 }
 
index aaf72c0..1790cda 100644 (file)
@@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
        /*enable GE rX/tX */
-       if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
 
-       if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+               /* enable rx pcs */
+               dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+       }
 }
 
 static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
        /*disable GE rX/tX */
-       if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
 
-       if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+               /* disable rx pcs */
+               dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+       }
 }
 
 /* hns_gmac_get_en - get port enable
index 3613e40..a97228c 100644 (file)
@@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
        return rc;
 }
 
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+       if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+               return;
+
+       phy_device_remove(mac_cb->phy_dev);
+       phy_device_free(mac_cb->phy_dev);
+
+       mac_cb->phy_dev = NULL;
+}
+
 #define MAC_MEDIA_TYPE_MAX_LEN         16
 
 static const struct {
@@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
        int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
 
        for (i = 0; i < max_port_num; i++) {
+               if (!dsaf_dev->mac_cb[i])
+                       continue;
+
                dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+               hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
                dsaf_dev->mac_cb[i] = NULL;
        }
 }
index e557a4e..3b9e74b 100644 (file)
@@ -935,6 +935,62 @@ static void hns_dsaf_tcam_mc_cfg(
 }
 
 /**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+                                      u32 address,
+                                      struct dsaf_tbl_tcam_data *tcam_data,
+                                      struct dsaf_tbl_tcam_data *tcam_mask,
+                                      struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+       spin_lock_bh(&dsaf_dev->tcam_lock);
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+       hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+       hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+       hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+       /*Restore Match Data*/
+       tcam_mask->tbl_tcam_data_high = 0xffffffff;
+       tcam_mask->tbl_tcam_data_low = 0xffffffff;
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+       spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mask
+ * @ptbl_tcam_mcast
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+                                      u32 address,
+                                      struct dsaf_tbl_tcam_data *tcam_data,
+                                      struct dsaf_tbl_tcam_data *tcam_mask,
+                                      struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+       spin_lock_bh(&dsaf_dev->tcam_lock);
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+       hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+       hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+       hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+       /*Restore Match Data*/
+       tcam_mask->tbl_tcam_data_high = 0xffffffff;
+       tcam_mask->tbl_tcam_data_low = 0xffffffff;
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+       spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
  * hns_dsaf_tcam_mc_invld - INT
  * @dsaf_id: dsa fabric id
  * @address
@@ -1493,6 +1549,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
 }
 
 /**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+       struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       int i;
+
+       soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+       for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+               /* search all entry from end to start.*/
+               if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+                       return i;
+               soft_mac_entry--;
+       }
+       return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
  * hns_dsaf_set_mac_key - set mac key
  * @dsaf_dev: dsa fabric device struct pointer
  * @mac_key: tcam key pointer
@@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
                DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
 
        hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
-               DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+               DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
        hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
-               DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+               DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
 
        /* pfc pause frame statistics stored in dsaf inode*/
        if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
                                DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
                p[223 + i] = dsaf_read_dev(ddev,
                                DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
-               p[224 + i] = dsaf_read_dev(ddev,
+               p[226 + i] = dsaf_read_dev(ddev,
                                DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
        }
 
-       p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+       p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
 
        for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
                j = i * DSAF_COMM_CHN + port;
-               p[228 + i] = dsaf_read_dev(ddev,
+               p[230 + i] = dsaf_read_dev(ddev,
                                DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
        }
 
-       p[231] = dsaf_read_dev(ddev,
-               DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+       p[233] = dsaf_read_dev(ddev,
+               DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
 
        /* dsaf inode registers */
        for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
                j = i * DSAF_COMM_CHN + port;
-               p[232 + i] = dsaf_read_dev(ddev,
+               p[234 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_CFG_REG_0_REG + j * 0x80);
-               p[235 + i] = dsaf_read_dev(ddev,
+               p[237 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
-               p[238 + i] = dsaf_read_dev(ddev,
+               p[240 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
-               p[241 + i] = dsaf_read_dev(ddev,
+               p[243 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
-               p[244 + i] = dsaf_read_dev(ddev,
+               p[246 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
-               p[245 + i] = dsaf_read_dev(ddev,
+               p[249 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
-               p[248 + i] = dsaf_read_dev(ddev,
+               p[252 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
-               p[251 + i] = dsaf_read_dev(ddev,
+               p[255 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
-               p[254 + i] = dsaf_read_dev(ddev,
+               p[258 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
-               p[257 + i] = dsaf_read_dev(ddev,
+               p[261 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
-               p[260 + i] = dsaf_read_dev(ddev,
+               p[264 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_INER_ST_0_REG + j * 0x80);
-               p[263 + i] = dsaf_read_dev(ddev,
+               p[267 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
-               p[266 + i] = dsaf_read_dev(ddev,
+               p[270 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
-               p[269 + i] = dsaf_read_dev(ddev,
+               p[273 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
-               p[272 + i] = dsaf_read_dev(ddev,
+               p[276 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
-               p[275 + i] = dsaf_read_dev(ddev,
+               p[279 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
-               p[278 + i] = dsaf_read_dev(ddev,
+               p[282 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
-               p[281 + i] = dsaf_read_dev(ddev,
+               p[285 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
-               p[284 + i] = dsaf_read_dev(ddev,
+               p[288 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
-               p[287 + i] = dsaf_read_dev(ddev,
+               p[291 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
-               p[290 + i] = dsaf_read_dev(ddev,
+               p[294 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
-               p[293 + i] = dsaf_read_dev(ddev,
+               p[297 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
-               p[296 + i] = dsaf_read_dev(ddev,
+               p[300 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
-               p[299 + i] = dsaf_read_dev(ddev,
+               p[303 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
-               p[302 + i] = dsaf_read_dev(ddev,
+               p[306 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
-               p[305 + i] = dsaf_read_dev(ddev,
+               p[309 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
-               p[308 + i] = dsaf_read_dev(ddev,
+               p[312 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
        }
 
        /* dsaf onode registers */
        for (i = 0; i < DSAF_XOD_NUM; i++) {
-               p[311 + i] = dsaf_read_dev(ddev,
+               p[315 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
-               p[319 + i] = dsaf_read_dev(ddev,
+               p[323 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
-               p[327 + i] = dsaf_read_dev(ddev,
+               p[331 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
-               p[335 + i] = dsaf_read_dev(ddev,
+               p[339 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
-               p[343 + i] = dsaf_read_dev(ddev,
+               p[347 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
-               p[351 + i] = dsaf_read_dev(ddev,
+               p[355 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
        }
 
-       p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
-       p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
-       p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+       p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+       p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+       p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
 
        for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
                j = i * DSAF_COMM_CHN + port;
-               p[362 + i] = dsaf_read_dev(ddev,
+               p[366 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_GNT_L_0_REG + j * 0x90);
-               p[365 + i] = dsaf_read_dev(ddev,
+               p[369 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_GNT_H_0_REG + j * 0x90);
-               p[368 + i] = dsaf_read_dev(ddev,
+               p[372 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
-               p[371 + i] = dsaf_read_dev(ddev,
+               p[375 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
-               p[374 + i] = dsaf_read_dev(ddev,
+               p[378 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
-               p[377 + i] = dsaf_read_dev(ddev,
+               p[381 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
-               p[380 + i] = dsaf_read_dev(ddev,
+               p[384 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
-               p[383 + i] = dsaf_read_dev(ddev,
+               p[387 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
-               p[386 + i] = dsaf_read_dev(ddev,
+               p[390 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
-               p[389 + i] = dsaf_read_dev(ddev,
+               p[393 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
        }
 
-       p[392] = dsaf_read_dev(ddev,
+       p[396] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
-       p[393] = dsaf_read_dev(ddev,
+       p[397] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
-       p[394] = dsaf_read_dev(ddev,
+       p[398] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
-       p[395] = dsaf_read_dev(ddev,
+       p[399] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
-       p[396] = dsaf_read_dev(ddev,
+       p[400] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
-       p[397] = dsaf_read_dev(ddev,
+       p[401] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
-       p[398] = dsaf_read_dev(ddev,
+       p[402] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
-       p[399] = dsaf_read_dev(ddev,
+       p[403] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
-       p[400] = dsaf_read_dev(ddev,
+       p[404] = dsaf_read_dev(ddev,
                DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
-       p[401] = dsaf_read_dev(ddev,
+       p[405] = dsaf_read_dev(ddev,
                DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
-       p[402] = dsaf_read_dev(ddev,
+       p[406] = dsaf_read_dev(ddev,
                DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
-       p[403] = dsaf_read_dev(ddev,
+       p[407] = dsaf_read_dev(ddev,
                DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
-       p[404] = dsaf_read_dev(ddev,
+       p[408] = dsaf_read_dev(ddev,
                DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
 
        /* dsaf voq registers */
        for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
                j = (i * DSAF_COMM_CHN + port) * 0x90;
-               p[405 + i] = dsaf_read_dev(ddev,
+               p[409 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
-               p[408 + i] = dsaf_read_dev(ddev,
+               p[412 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
-               p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
-               p[414 + i] = dsaf_read_dev(ddev,
+               p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+               p[418 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
-               p[417 + i] = dsaf_read_dev(ddev,
+               p[421 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
-               p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
-               p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
-               p[426 + i] = dsaf_read_dev(ddev,
+               p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+               p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+               p[430 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
-               p[429 + i] = dsaf_read_dev(ddev,
+               p[433 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
-               p[432 + i] = dsaf_read_dev(ddev,
+               p[436 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
-               p[435 + i] = dsaf_read_dev(ddev,
+               p[439 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
-               p[438 + i] = dsaf_read_dev(ddev,
+               p[442 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_BP_ALL_THRD_0_REG + j);
        }
 
        /* dsaf tbl registers */
-       p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
-       p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
-       p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
-       p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
-       p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
-       p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
-       p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
-       p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
-       p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
-       p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
-       p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
-       p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
-       p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
-       p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
-       p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
-       p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
-       p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
-       p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
-       p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
-       p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
-       p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
-       p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
-       p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+       p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+       p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+       p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+       p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+       p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+       p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+       p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+       p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+       p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+       p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+       p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+       p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+       p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+       p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+       p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+       p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+       p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+       p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+       p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+       p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+       p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+       p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+       p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
 
        for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
                j = i * 0x8;
-               p[464 + 2 * i] = dsaf_read_dev(ddev,
+               p[468 + 2 * i] = dsaf_read_dev(ddev,
                        DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
-               p[465 + 2 * i] = dsaf_read_dev(ddev,
+               p[469 + 2 * i] = dsaf_read_dev(ddev,
                        DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
        }
 
-       p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
-       p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
-       p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
-       p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
-       p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
-       p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
-       p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
-       p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
-       p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
-       p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
-       p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
-       p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+       p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+       p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+       p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+       p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+       p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+       p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+       p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+       p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+       p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+       p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+       p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+       p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
 
        /* dsaf other registers */
-       p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
-       p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
-       p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
-       p[495] = dsaf_read_dev(ddev,
+       p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+       p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+       p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+       p[499] = dsaf_read_dev(ddev,
                DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
-       p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
-       p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+       p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+       p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
 
        if (!is_ver1)
-               p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+               p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
 
        /* mark end of dsaf regs */
-       for (i = 499; i < 504; i++)
+       for (i = 503; i < 504; i++)
                p[i] = 0xdddddddd;
 }
 
@@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
        return DSAF_DUMP_REGS_NUM;
 }
 
-/* Reserve the last TCAM entry for promisc support */
-#define dsaf_promisc_tcam_entry(port) \
-       (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
-void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
-                              u32 port, bool enable)
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
+       struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+       struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+       struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+       struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
        struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
-       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
-       u16 entry_index;
-       struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
-       struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
+       struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+       struct dsaf_drv_mac_single_dest_entry mask_entry;
+       struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct hns_mac_cb *mac_cb;
+       u8 addr[ETH_ALEN] = {0};
+       u8 port_num;
+       u16 mskid;
+
+       /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index != DSAF_INVALID_ENTRY_IDX)
+               return;
+
+       /* put promisc tcam entry in the end. */
+       /* 1. set promisc unicast vague tcam entry. */
+       entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               dev_err(dsaf_dev->dev,
+                       "enable uc promisc failed (port:%#x)\n",
+                       port);
+               return;
+       }
+
+       mac_cb = dsaf_dev->mac_cb[port];
+       (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+       tbl_tcam_ucast.tbl_ucast_out_port = port_num;
 
-       if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
+       /* config uc vague table */
+       hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+                                  &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+       /* update software entry */
+       soft_mac_entry = priv->soft_mac_tbl;
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+       soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+       /* step back to the START for mc. */
+       soft_mac_entry = priv->soft_mac_tbl;
+
+       /* 2. set promisc multicast vague tcam entry. */
+       entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               dev_err(dsaf_dev->dev,
+                       "enable mc promisc failed (port:%#x)\n",
+                       port);
                return;
+       }
+
+       memset(&mask_entry, 0x0, sizeof(mask_entry));
+       memset(&mask_key, 0x0, sizeof(mask_key));
+       memset(&temp_key, 0x0, sizeof(temp_key));
+       mask_entry.addr[0] = 0x01;
+       hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+                            port, mask_entry.addr);
+       tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+       tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-       /* find the tcam entry index for promisc */
-       entry_index = dsaf_promisc_tcam_entry(port);
-
-       memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
-       memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
-
-       /* config key mask */
-       if (enable) {
-               dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
-                              DSAF_TBL_TCAM_KEY_PORT_M,
-                              DSAF_TBL_TCAM_KEY_PORT_S, port);
-               dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
-                              DSAF_TBL_TCAM_KEY_PORT_M,
-                              DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
-
-               /* SUB_QID */
-               dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
-                            DSAF_SERVICE_NW_NUM, true);
-               mac_data.tbl_mcast_item_vld = true;     /* item_vld bit */
+       if (port < DSAF_SERVICE_NW_NUM) {
+               mskid = port;
+       } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
+               mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
        } else {
-               mac_data.tbl_mcast_item_vld = false;    /* item_vld bit */
+               dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, port,
+                       mask_key.high.val, mask_key.low.val);
+               return;
        }
 
-       dev_dbg(dsaf_dev->dev,
-               "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
-               dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
-               tbl_tcam_data.low.val, entry_index);
+       dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+                    mskid % 32, 1);
+       memcpy(&temp_key, &mask_key, sizeof(mask_key));
+       hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+                                  (struct dsaf_tbl_tcam_data *)(&mask_key),
+                                  &tbl_tcam_mcast);
+
+       /* update software entry */
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+       soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
 
-       /* config promisc entry with mask */
-       hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
-                            (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
-                            (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
-                            &mac_data);
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+       struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+       struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+       struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+       struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+       struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       u8 addr[ETH_ALEN] = {0};
 
-       /* config software entry */
+       /* 1. delete uc vague tcam entry. */
+       /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+       if (entry_index == DSAF_INVALID_ENTRY_IDX)
+               return;
+
+       /* config uc vague table */
+       hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+                                  &tbl_tcam_mask, &tbl_tcam_ucast);
+       /* update soft management table. */
+       soft_mac_entry = priv->soft_mac_tbl;
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+       /* step back to the START for mc. */
+       soft_mac_entry = priv->soft_mac_tbl;
+
+       /* 2. delete mc vague tcam entry. */
+       addr[0] = 0x01;
+       memset(&mac_key, 0x0, sizeof(mac_key));
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+       if (entry_index == DSAF_INVALID_ENTRY_IDX)
+               return;
+
+       /* config mc vague table */
+       hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+                                  &tbl_tcam_mask, &tbl_tcam_mcast);
+       /* update soft management table. */
        soft_mac_entry += entry_index;
-       soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+       soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
+/* Reserve the last TCAM entry for promisc support */
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+                              u32 port, bool enable)
+{
+       if (enable)
+               set_promisc_tcam_enable(dsaf_dev, port);
+       else
+               set_promisc_tcam_disable(dsaf_dev, port);
 }
 
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
index 74d935d..b9733b0 100644 (file)
 #define DSAF_INODE_IN_DATA_STP_DISC_0_REG      0x1A50
 #define DSAF_INODE_GE_FC_EN_0_REG              0x1B00
 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG                0x1B50
-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG                0x1C00
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG                0x103C
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG      0x1C00
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET   0x100
 #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET                0x50
 #define RCB_ECC_ERR_ADDR4_REG                  0x460
 #define RCB_ECC_ERR_ADDR5_REG                  0x464
 
-#define RCB_COM_SF_CFG_INTMASK_RING            0x480
-#define RCB_COM_SF_CFG_RING_STS                        0x484
-#define RCB_COM_SF_CFG_RING                    0x488
-#define RCB_COM_SF_CFG_INTMASK_BD              0x48C
-#define RCB_COM_SF_CFG_BD_RINT_STS             0x470
+#define RCB_COM_SF_CFG_INTMASK_RING            0x470
+#define RCB_COM_SF_CFG_RING_STS                        0x474
+#define RCB_COM_SF_CFG_RING                    0x478
+#define RCB_COM_SF_CFG_INTMASK_BD              0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS             0x480
 #define RCB_COM_RCB_RD_BD_BUSY                 0x490
 #define RCB_COM_RCB_FBD_CRT_EN                 0x494
 #define RCB_COM_AXI_WR_ERR_INTMASK             0x498
 #define GMAC_LD_LINK_COUNTER_REG               0x01D0UL
 #define GMAC_LOOP_REG                          0x01DCUL
 #define GMAC_RECV_CONTROL_REG                  0x01E0UL
+#define GMAC_PCS_RX_EN_REG                     0x01E4UL
 #define GMAC_VLAN_CODE_REG                     0x01E8UL
 #define GMAC_RX_OVERRUN_CNT_REG                        0x01ECUL
 #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG                0x01F4UL
index c62378c..5748d3f 100644 (file)
@@ -1188,6 +1188,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
        if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
                phy_dev->autoneg = false;
 
+       if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
+               phy_stop(phy_dev);
+
        return 0;
 }
 
@@ -1283,6 +1286,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
        return cpu;
 }
 
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < q_num * 2; i++) {
+               if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+                       irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+                                             NULL);
+                       free_irq(priv->ring_data[i].ring->irq,
+                                &priv->ring_data[i]);
+                       priv->ring_data[i].ring->irq_init_flag =
+                               RCB_IRQ_NOT_INITED;
+               }
+       }
+}
+
 static int hns_nic_init_irq(struct hns_nic_priv *priv)
 {
        struct hnae_handle *h = priv->ae_handle;
@@ -1308,7 +1327,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
                if (ret) {
                        netdev_err(priv->netdev, "request irq(%d) fail\n",
                                   rd->ring->irq);
-                       return ret;
+                       goto out_free_irq;
                }
                disable_irq(rd->ring->irq);
 
@@ -1323,6 +1342,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
        }
 
        return 0;
+
+out_free_irq:
+       hns_nic_free_irq(h->q_num, priv);
+       return ret;
 }
 
 static int hns_nic_net_up(struct net_device *ndev)
@@ -1332,6 +1355,9 @@ static int hns_nic_net_up(struct net_device *ndev)
        int i, j;
        int ret;
 
+       if (!test_bit(NIC_STATE_DOWN, &priv->state))
+               return 0;
+
        ret = hns_nic_init_irq(priv);
        if (ret != 0) {
                netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1367,6 +1393,7 @@ out_has_some_queues:
        for (j = i - 1; j >= 0; j--)
                hns_nic_ring_close(ndev, j);
 
+       hns_nic_free_irq(h->q_num, priv);
        set_bit(NIC_STATE_DOWN, &priv->state);
 
        return ret;
@@ -1484,11 +1511,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
 }
 
 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
 static void hns_nic_net_timeout(struct net_device *ndev)
 {
        struct hns_nic_priv *priv = netdev_priv(ndev);
 
-       hns_tx_timeout_reset(priv);
+       if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+               ndev->watchdog_timeo *= 2;
+               netdev_info(ndev, "watchdog_timo changed to %d.\n",
+                           ndev->watchdog_timeo);
+       } else {
+               ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+               hns_tx_timeout_reset(priv);
+       }
 }
 
 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2051,11 +2086,11 @@ static void hns_nic_service_task(struct work_struct *work)
                = container_of(work, struct hns_nic_priv, service_task);
        struct hnae_handle *h = priv->ae_handle;
 
+       hns_nic_reset_subtask(priv);
        hns_nic_update_link_status(priv->netdev);
        h->dev->ops->update_led_status(h);
        hns_nic_update_stats(priv->netdev);
 
-       hns_nic_reset_subtask(priv);
        hns_nic_service_event_complete(priv);
 }
 
@@ -2341,7 +2376,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
        ndev->min_mtu = MAC_MIN_MTU;
        switch (priv->enet_ver) {
        case AE_VERSION_2:
-               ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+               ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
                ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                        NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                        NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
index 14d0098..5ecbb1a 100644 (file)
@@ -1936,8 +1936,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_rwi *rwi;
+       unsigned long flags;
 
-       mutex_lock(&adapter->rwi_lock);
+       spin_lock_irqsave(&adapter->rwi_lock, flags);
 
        if (!list_empty(&adapter->rwi_list)) {
                rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1947,7 +1948,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
                rwi = NULL;
        }
 
-       mutex_unlock(&adapter->rwi_lock);
+       spin_unlock_irqrestore(&adapter->rwi_lock, flags);
        return rwi;
 }
 
@@ -2022,6 +2023,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        struct list_head *entry, *tmp_entry;
        struct ibmvnic_rwi *rwi, *tmp;
        struct net_device *netdev = adapter->netdev;
+       unsigned long flags;
        int ret;
 
        if (adapter->state == VNIC_REMOVING ||
@@ -2038,21 +2040,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
                goto err;
        }
 
-       mutex_lock(&adapter->rwi_lock);
+       spin_lock_irqsave(&adapter->rwi_lock, flags);
 
        list_for_each(entry, &adapter->rwi_list) {
                tmp = list_entry(entry, struct ibmvnic_rwi, list);
                if (tmp->reset_reason == reason) {
                        netdev_dbg(netdev, "Skipping matching reset\n");
-                       mutex_unlock(&adapter->rwi_lock);
+                       spin_unlock_irqrestore(&adapter->rwi_lock, flags);
                        ret = EBUSY;
                        goto err;
                }
        }
 
-       rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+       rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
        if (!rwi) {
-               mutex_unlock(&adapter->rwi_lock);
+               spin_unlock_irqrestore(&adapter->rwi_lock, flags);
                ibmvnic_close(netdev);
                ret = ENOMEM;
                goto err;
@@ -2066,7 +2068,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        }
        rwi->reset_reason = reason;
        list_add_tail(&rwi->list, &adapter->rwi_list);
-       mutex_unlock(&adapter->rwi_lock);
+       spin_unlock_irqrestore(&adapter->rwi_lock, flags);
        adapter->resetting = true;
        netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
        schedule_work(&adapter->ibmvnic_reset);
@@ -4756,7 +4758,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
-       mutex_init(&adapter->rwi_lock);
+       spin_lock_init(&adapter->rwi_lock);
        adapter->resetting = false;
 
        adapter->mac_change_pending = false;
index 99c4f8d..f2018db 100644 (file)
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
        struct tasklet_struct tasklet;
        enum vnic_state state;
        enum ibmvnic_reset_reason reset_reason;
-       struct mutex rwi_lock;
+       spinlock_t rwi_lock;
        struct list_head rwi_list;
        struct work_struct ibmvnic_reset;
        bool resetting;
index 5824d74..4d40878 100644 (file)
@@ -1546,17 +1546,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
        /* Copy the address first, so that we avoid a possible race with
-        * .set_rx_mode(). If we copy after changing the address in the filter
-        * list, we might open ourselves to a narrow race window where
-        * .set_rx_mode could delete our dev_addr filter and prevent traffic
-        * from passing.
+        * .set_rx_mode().
+        * - Remove old address from MAC filter
+        * - Copy new address
+        * - Add new address to MAC filter
         */
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        i40e_del_mac_filter(vsi, netdev->dev_addr);
-       i40e_add_mac_filter(vsi, addr->sa_data);
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       i40e_add_mac_filter(vsi, netdev->dev_addr);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
 
index a0b1575..a7e14e9 100644 (file)
@@ -1559,24 +1559,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
 }
 
 /**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring:  rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-void i40e_receive_skb(struct i40e_ring *rx_ring,
-                     struct sk_buff *skb, u16 vlan_tag)
-{
-       struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
-       if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-           (vlan_tag & VLAN_VID_MASK))
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
-       napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
  * i40e_alloc_rx_buffers - Replace used receive buffers
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
@@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
  * other fields within the skb.
  **/
 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
-                            union i40e_rx_desc *rx_desc, struct sk_buff *skb,
-                            u8 rx_ptype)
+                            union i40e_rx_desc *rx_desc, struct sk_buff *skb)
 {
        u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
        u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
@@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
        u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
        u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
                   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
+       u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                     I40E_RXD_QW1_PTYPE_SHIFT;
 
        if (unlikely(tsynvalid))
                i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
 
+       if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+               u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      le16_to_cpu(vlan_tag));
+       }
+
        /* modifies the skb - consumes the enet header */
        skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
@@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                struct i40e_rx_buffer *rx_buffer;
                union i40e_rx_desc *rx_desc;
                unsigned int size;
-               u16 vlan_tag;
-               u8 rx_ptype;
                u64 qword;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
 
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-
                /* populate checksum, VLAN, and protocol */
-               i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
-               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
-                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+               i40e_process_skb_fields(rx_ring, rx_desc, skb);
 
                i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
+               napi_gro_receive(&rx_ring->q_vector->napi, skb);
                skb = NULL;
 
                /* update budget accounting */
index 09809df..8af0e99 100644 (file)
@@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
        union i40e_rx_desc *rx_desc,
        u64 qw);
 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
-                            union i40e_rx_desc *rx_desc, struct sk_buff *skb,
-                            u8 rx_ptype);
-void i40e_receive_skb(struct i40e_ring *rx_ring,
-                     struct sk_buff *skb, u16 vlan_tag);
+                            union i40e_rx_desc *rx_desc, struct sk_buff *skb);
 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
                          unsigned int total_rx_bytes,
index 433c8e6..870cf65 100644 (file)
@@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                struct i40e_rx_buffer *bi;
                union i40e_rx_desc *rx_desc;
                unsigned int size;
-               u16 vlan_tag;
-               u8 rx_ptype;
                u64 qword;
 
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
@@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
-               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
-                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
+               i40e_process_skb_fields(rx_ring, rx_desc, skb);
+               napi_gro_receive(&rx_ring->q_vector->napi, skb);
        }
 
        i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
index 5dacfc8..345701a 100644 (file)
@@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
        u8 num_tcs = adapter->hw_tcs;
        u32 reg_val;
        u32 queue;
-       u32 word;
 
        /* remove VLAN filters beloning to this VF */
        ixgbe_clear_vf_vlans(adapter, vf);
@@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
                }
        }
 
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 word;
+
        /* Clear VF's mailbox memory */
        for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
        /* reset the filters for the device */
        ixgbe_vf_reset_event(adapter, vf);
 
+       ixgbe_vf_clear_mbx(adapter, vf);
+
        /* set vf mac address */
        if (!is_zero_ether_addr(vf_mac))
                ixgbe_set_vf_mac(adapter, vf, vf_mac);
index 46a0f6b..9d4568e 100644 (file)
@@ -408,7 +408,6 @@ struct mvneta_port {
        struct mvneta_pcpu_stats __percpu       *stats;
 
        int pkt_size;
-       unsigned int frag_size;
        void __iomem *base;
        struct mvneta_rx_queue *rxqs;
        struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
        if (!pp->bm_priv) {
                /* Set Offset */
                mvneta_rxq_offset_set(pp, rxq, 0);
-               mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+               mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+                                       PAGE_SIZE :
+                                       MVNETA_RX_BUF_SIZE(pp->pkt_size));
                mvneta_rxq_bm_disable(pp, rxq);
                mvneta_rxq_fill(pp, rxq, rxq->size);
        } else {
@@ -3760,7 +3761,6 @@ static int mvneta_open(struct net_device *dev)
        int ret;
 
        pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
-       pp->frag_size = PAGE_SIZE;
 
        ret = mvneta_setup_rxqs(pp);
        if (ret)
index 125ea99..f1dab0b 100644 (file)
@@ -4405,12 +4405,15 @@ static void mvpp2_phylink_validate(struct net_device *dev,
        case PHY_INTERFACE_MODE_10GKR:
        case PHY_INTERFACE_MODE_XAUI:
        case PHY_INTERFACE_MODE_NA:
-               phylink_set(mask, 10000baseCR_Full);
-               phylink_set(mask, 10000baseSR_Full);
-               phylink_set(mask, 10000baseLR_Full);
-               phylink_set(mask, 10000baseLRM_Full);
-               phylink_set(mask, 10000baseER_Full);
-               phylink_set(mask, 10000baseKR_Full);
+               if (port->gop_id == 0) {
+                       phylink_set(mask, 10000baseT_Full);
+                       phylink_set(mask, 10000baseCR_Full);
+                       phylink_set(mask, 10000baseSR_Full);
+                       phylink_set(mask, 10000baseLR_Full);
+                       phylink_set(mask, 10000baseLRM_Full);
+                       phylink_set(mask, 10000baseER_Full);
+                       phylink_set(mask, 10000baseKR_Full);
+               }
                /* Fall-through */
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4421,7 +4424,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                phylink_set(mask, 10baseT_Full);
                phylink_set(mask, 100baseT_Half);
                phylink_set(mask, 100baseT_Full);
-               phylink_set(mask, 10000baseT_Full);
                /* Fall-through */
        case PHY_INTERFACE_MODE_1000BASEX:
        case PHY_INTERFACE_MODE_2500BASEX:
index 6af587c..6e10120 100644 (file)
@@ -1195,11 +1195,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
                              struct ethtool_ts_info *info)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       int ret;
-
-       ret = ethtool_op_get_ts_info(priv->netdev, info);
-       if (ret)
-               return ret;
 
        info->phc_index = mlx5_clock_get_ptp_index(mdev);
 
@@ -1207,9 +1202,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
            info->phc_index == -1)
                return 0;
 
-       info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
-                                SOF_TIMESTAMPING_RX_HARDWARE |
-                                SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
 
        info->tx_types = BIT(HWTSTAMP_TX_OFF) |
                         BIT(HWTSTAMP_TX_ON);
index cd6872a..bc79140 100644 (file)
@@ -130,6 +130,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
        return !params->lro_en && frag_sz <= PAGE_SIZE;
 }
 
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+                                         MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
                                         struct mlx5e_params *params)
 {
@@ -140,6 +142,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
        if (!mlx5e_rx_is_linear_skb(mdev, params))
                return false;
 
+       if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+               return false;
+
        if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
                return true;
 
@@ -1400,6 +1405,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
        struct mlx5_core_dev *mdev = c->mdev;
        struct mlx5_rate_limit rl = {0};
 
+       cancel_work_sync(&sq->dim.work);
        mlx5e_destroy_sq(mdev, sq->sqn);
        if (sq->rate_limit) {
                rl.rate = sq->rate_limit;
index ed1158b..3a17713 100644 (file)
@@ -47,6 +47,7 @@
 
 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
        max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
 
 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
 
@@ -586,8 +587,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        ASSERT_RTNL();
 
-       if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
-           !ether_addr_equal(e->h_dest, ha))
+       if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
+           (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
                mlx5e_tc_encap_flows_del(priv, e);
 
        if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
@@ -1395,30 +1396,19 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
        netdev->features |= netdev->hw_features;
 }
 
-static int mlx5e_rep_get_default_num_channels(struct mlx5_eswitch_rep *rep,
-                                             struct net_device *netdev)
-{
-       if (rep->vport == FDB_UPLINK_VPORT)
-               return mlx5e_get_netdev_max_channels(netdev);
-       else
-               return 1;
-}
-
 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
                          struct net_device *netdev,
                          const struct mlx5e_profile *profile,
                          void *ppriv)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5e_rep_priv *rpriv = ppriv;
        int err;
 
        err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
        if (err)
                return err;
 
-       priv->channels.params.num_channels =
-                       mlx5e_rep_get_default_num_channels(rpriv->rep, netdev);
+       priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
 
        mlx5e_build_rep_params(netdev);
        mlx5e_build_rep_netdev(netdev);
index a75aad0..cdce30a 100644 (file)
@@ -1190,7 +1190,7 @@ mpwrq_cqe_out:
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-       struct mlx5e_xdpsq *xdpsq;
+       struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
        struct mlx5_cqe64 *cqe;
        int work_done = 0;
 
@@ -1201,10 +1201,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
                work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
        cqe = mlx5_cqwq_get_cqe(&cq->wq);
-       if (!cqe)
+       if (!cqe) {
+               if (unlikely(work_done))
+                       goto out;
                return 0;
-
-       xdpsq = &rq->xdpsq;
+       }
 
        do {
                if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1219,6 +1220,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
                rq->handle_rx_cqe(rq, cqe);
        } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+out:
        if (xdpsq->doorbell) {
                mlx5e_xmit_xdp_doorbell(xdpsq);
                xdpsq->doorbell = false;
index 3071a44..d3fe48f 100644 (file)
@@ -75,7 +75,6 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -199,7 +198,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_nop               += sq_stats->nop;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
-                       s->tx_udp_seg_rem       += sq_stats->udp_seg_rem;
                        s->tx_queue_dropped     += sq_stats->dropped;
                        s->tx_cqe_err           += sq_stats->cqe_err;
                        s->tx_recover           += sq_stats->recover;
index 807e605..fe91ec0 100644 (file)
@@ -87,7 +87,6 @@ struct mlx5e_sw_stats {
        u64 tx_recover;
        u64 tx_cqes;
        u64 tx_queue_wake;
-       u64 tx_udp_seg_rem;
        u64 tx_cqe_err;
        u64 tx_xdp_xmit;
        u64 tx_xdp_full;
@@ -221,7 +220,6 @@ struct mlx5e_sq_stats {
        u64 csum_partial_inner;
        u64 added_vlan_packets;
        u64 nop;
-       u64 udp_seg_rem;
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_ooo;
        u64 tls_resync_bytes;
index c1a9120..59255ae 100644 (file)
@@ -903,9 +903,9 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
        struct mlx5_flow_handle *rule;
 
        memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
-       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-       slow_attr->split_count = 0,
-       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       slow_attr->split_count = 0;
+       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
 
        rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
        if (!IS_ERR(rule))
@@ -920,6 +920,9 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
                                  struct mlx5_esw_flow_attr *slow_attr)
 {
        memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       slow_attr->split_count = 0;
+       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
        mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
        flow->flags &= ~MLX5E_TC_FLOW_SLOW;
 }
@@ -941,11 +944,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        int err = 0, encap_err = 0;
        int out_index;
 
-       /* if prios are not supported, keep the old behaviour of using same prio
-        * for all offloaded rules.
-        */
-       if (!mlx5_eswitch_prios_supported(esw))
-               attr->prio = 1;
+       if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+               NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+               return -EOPNOTSUPP;
+       }
 
        if (attr->chain > max_chain) {
                NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
@@ -1163,10 +1165,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                flow->rule[0] = rule;
        }
 
-       if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
-               e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
-               mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
-       }
+       /* we know that the encap is valid */
+       e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+       mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
 }
 
 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -2653,8 +2654,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
                                return -EOPNOTSUPP;
                        }
-                       action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-                                 MLX5_FLOW_CONTEXT_ACTION_COUNT;
+                       action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        attr->dest_chain = dest_chain;
 
                        continue;
@@ -2667,6 +2667,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
+       if (attr->dest_chain) {
+               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+                       NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+                       return -EOPNOTSUPP;
+               }
+               attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       }
+
        if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "current firmware doesn't support split rule for port mirroring");
index f21277e..79f122b 100644 (file)
@@ -452,7 +452,7 @@ static void del_sw_hw_rule(struct fs_node *node)
 
        if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
            --fte->dests_size) {
-               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
                update_fte = true;
        }
 out:
index 281aeb1..ddedf8a 100644 (file)
@@ -81,6 +81,7 @@ struct mlxsw_core {
        struct mlxsw_core_port *ports;
        unsigned int max_ports;
        bool reload_fail;
+       bool fw_flash_in_progress;
        unsigned long driver_priv[0];
        /* driver_priv has to be always the last item */
 };
@@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
        struct rcu_head rcu;
 };
 
-#define MLXSW_EMAD_TIMEOUT_MS 200
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS  3000
+#define MLXSW_EMAD_TIMEOUT_MS                  200
 
 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
        unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 
+       if (trans->core->fw_flash_in_progress)
+               timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
        queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 }
 
@@ -1891,6 +1896,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
 }
 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
+{
+       mlxsw_core->fw_flash_in_progress = true;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
+
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
+{
+       mlxsw_core->fw_flash_in_progress = false;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+
 static int __init mlxsw_core_module_init(void)
 {
        int err;
index d811be8..4e114f3 100644 (file)
@@ -294,6 +294,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
                             u64 *p_single_size, u64 *p_double_size,
                             u64 *p_linear_size);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
                          enum mlxsw_res_id res_id);
 
index 4197b29..9bfcb9c 100644 (file)
@@ -316,8 +316,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
                },
                .mlxsw_sp = mlxsw_sp
        };
+       int err;
+
+       mlxsw_core_fw_flash_start(mlxsw_sp->core);
+       err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+       mlxsw_core_fw_flash_end(mlxsw_sp->core);
 
-       return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+       return err;
 }
 
 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -3671,6 +3676,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
        MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
        /* NVE traps */
        MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
+       MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
 };
 
 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
index 3b1e826..0a31fff 100644 (file)
@@ -1033,6 +1033,6 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
 {
        WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
        rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
-       mlxsw_sp->nve = NULL;
        kfree(mlxsw_sp->nve);
+       mlxsw_sp->nve = NULL;
 }
index 6f18f4d..451216d 100644 (file)
@@ -60,6 +60,7 @@ enum {
        MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
        MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
        MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+       MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
        MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
        MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
        MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
index e8ca98c..20c9377 100644 (file)
@@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
        u32 mac_addr_hi = 0;
        u32 mac_addr_lo = 0;
        u32 data;
-       int ret;
 
        netdev = adapter->netdev;
-       lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
-       ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
-                                      0, 1000, 20000, 100);
-       if (ret)
-               return ret;
 
        /* setup auto duplex, and speed detection */
        data = lan743x_csr_read(adapter, MAC_CR);
@@ -2719,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
        snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
                 "pci-%s", pci_name(adapter->pdev));
 
-       /* set to internal PHY id */
-       adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+       if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+               /* LAN7430 uses internal phy at address 1 */
+               adapter->mdiobus->phy_mask = ~(u32)BIT(1);
 
        /* register mdiobus */
        ret = mdiobus_register(adapter->mdiobus);
index 4c1fb7e..7cde387 100644 (file)
@@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
        struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
        struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
        struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
-       u64 data0, data1 = 0, steer_ctrl = 0;
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
        enum vxge_hw_status status;
 
        status = vxge_hw_vpath_fw_api(vpath,
index c642fd8..2cdbf29 100644 (file)
@@ -343,13 +343,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
                        return -EOPNOTSUPP;
 
-               /* We need to store TCP flags in the IPv4 key space, thus
-                * we need to ensure we include a IPv4 key layer if we have
-                * not done so already.
+               /* We need to store TCP flags in the either the IPv4 or IPv6 key
+                * space, thus we need to ensure we include a IPv4/IPv6 key
+                * layer if we have not done so already.
                 */
-               if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
-                       key_layer |= NFP_FLOWER_LAYER_IPV4;
-                       key_size += sizeof(struct nfp_flower_ipv4);
+               if (!key_basic)
+                       return -EOPNOTSUPP;
+
+               if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+                   !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+                       switch (key_basic->n_proto) {
+                       case cpu_to_be16(ETH_P_IP):
+                               key_layer |= NFP_FLOWER_LAYER_IPV4;
+                               key_size += sizeof(struct nfp_flower_ipv4);
+                               break;
+
+                       case cpu_to_be16(ETH_P_IPV6):
+                               key_layer |= NFP_FLOWER_LAYER_IPV6;
+                               key_size += sizeof(struct nfp_flower_ipv6);
+                               break;
+
+                       default:
+                               return -EOPNOTSUPP;
+                       }
                }
        }
 
index 052b3d2..c662c6f 100644 (file)
@@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-static void __init get_mac_address(struct net_device *dev)
+static void get_mac_address(struct net_device *dev)
 {
        struct w90p910_ether *ether = netdev_priv(dev);
        struct platform_device *pdev;
index 7e120b5..b13cfb4 100644 (file)
@@ -12837,8 +12837,9 @@ enum MFW_DRV_MSG_TYPE {
        MFW_DRV_MSG_BW_UPDATE10,
        MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
        MFW_DRV_MSG_BW_UPDATE11,
-       MFW_DRV_MSG_OEM_CFG_UPDATE,
+       MFW_DRV_MSG_RESERVED,
        MFW_DRV_MSG_GET_TLV_REQ,
+       MFW_DRV_MSG_OEM_CFG_UPDATE,
        MFW_DRV_MSG_MAX
 };
 
index 504c8f7..90afd51 100644 (file)
@@ -2506,6 +2506,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
                if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
                        DP_NOTICE(cdev,
                                  "Unable to map frag - dropping packet\n");
+                       rc = -ENOMEM;
                        goto err;
                }
 
index 18e39e5..5b0c32b 100644 (file)
@@ -6415,7 +6415,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                goto out;
        }
 
-       if (status & LinkChg)
+       if (status & LinkChg && tp->dev->phydev)
                phy_mac_interrupt(tp->dev->phydev);
 
        if (unlikely(status & RxFIFOOver &&
index e821ccc..0e0a078 100644 (file)
@@ -4224,6 +4224,7 @@ int stmmac_dvr_probe(struct device *device,
        priv->wq = create_singlethread_workqueue("stmmac_wq");
        if (!priv->wq) {
                dev_err(priv->device, "failed to create workqueue\n");
+               ret = -ENOMEM;
                goto error_wq;
        }
 
index 0ff5a40..b2ff903 100644 (file)
@@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
 static void ca8210_rx_done(struct cas_control *cas_ctl)
 {
        u8 *buf;
-       u8 len;
+       unsigned int len;
        struct work_priv_container *mlme_reset_wpc;
        struct ca8210_priv *priv = cas_ctl->priv;
 
@@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
        if (len > CA8210_SPI_BUF_SIZE) {
                dev_crit(
                        &priv->spi->dev,
-                       "Received packet len (%d) erroneously long\n",
+                       "Received packet len (%u) erroneously long\n",
                        len
                );
                goto finish;
index 51b5198..b6743f0 100644 (file)
@@ -492,7 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
-       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
                             info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
                             hwsim_edge_policy, NULL))
                return -EINVAL;
@@ -542,7 +542,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
-       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
                             info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
                             hwsim_edge_policy, NULL))
                return -EINVAL;
index 54af2bd..5199000 100644 (file)
@@ -315,11 +315,8 @@ static int mdio_bus_phy_restore(struct device *dev)
        if (ret < 0)
                return ret;
 
-       /* The PHY needs to renegotiate. */
-       phydev->link = 0;
-       phydev->state = PHY_UP;
-
-       phy_start_machine(phydev);
+       if (phydev->attached_dev && phydev->adjust_link)
+               phy_start_machine(phydev);
 
        return 0;
 }
index 184c24b..d6916f7 100644 (file)
@@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
                return -EIO;
        }
 
+       /* check if we have a valid interface */
+       if (if_num > 16) {
+               kfree(config_data);
+               return -EINVAL;
+       }
+
        switch (config_data[if_num]) {
        case 0x0:
                result = 0;
@@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
 
        /* Get the interface/port specification from either driver_info or from
         * the device itself */
-       if (id->driver_info)
+       if (id->driver_info) {
+               /* if_num is controlled by the device, driver_info is a 0 terminated
+                * array. Make sure, the access is in bounds! */
+               for (i = 0; i <= if_num; ++i)
+                       if (((u32 *)(id->driver_info))[i] == 0)
+                               goto exit;
                port_spec = ((u32 *)(id->driver_info))[if_num];
-       else
+       } else {
                port_spec = hso_get_config_data(interface);
+               if (port_spec < 0)
+                       goto exit;
+       }
 
        /* Check if we need to switch to alt interfaces prior to port
         * configuration */
index 3c8bdac..e96bc0c 100644 (file)
@@ -2325,6 +2325,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
        ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
        ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
 
+       /* Added to support MAC address changes */
+       ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+       ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
        return 0;
 }
 
index 72a55b6..c8872dd 100644 (file)
@@ -1117,6 +1117,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1435, 0xd181, 4)},    /* Wistron NeWeb D18Q1 */
        {QMI_FIXED_INTF(0x1435, 0xd181, 5)},    /* Wistron NeWeb D18Q1 */
        {QMI_FIXED_INTF(0x1435, 0xd191, 4)},    /* Wistron NeWeb D19Q1 */
+       {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
        {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
        {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
@@ -1229,6 +1230,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
        {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},    /* Telewell TW-3G HSPA+ */
        {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},    /* Telewell TW-3G HSPA+ */
        {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G Systems */
index f1b5201..60dd1ec 100644 (file)
 #define USB_UPS_CTRL           0xd800
 #define USB_POWER_CUT          0xd80a
 #define USB_MISC_0             0xd81a
+#define USB_MISC_1             0xd81f
 #define USB_AFE_CTRL2          0xd824
 #define USB_UPS_CFG            0xd842
 #define USB_UPS_FLAGS          0xd848
@@ -555,6 +556,7 @@ enum spd_duplex {
 
 /* MAC PASSTHRU */
 #define AD_MASK                        0xfee0
+#define BND_MASK               0x0004
 #define EFUSE                  0xcfdb
 #define PASS_THRU_MASK         0x1
 
@@ -1150,7 +1152,7 @@ out1:
        return ret;
 }
 
-/* Devices containing RTL8153-AD can support a persistent
+/* Devices containing proper chips can support a persistent
  * host system provided MAC address.
  * Examples of this are Dell TB15 and Dell WD15 docks
  */
@@ -1165,13 +1167,23 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
 
        /* test for -AD variant of RTL8153 */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
-       if ((ocp_data & AD_MASK) != 0x1000)
-               return -ENODEV;
-
-       /* test for MAC address pass-through bit */
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
-       if ((ocp_data & PASS_THRU_MASK) != 1)
-               return -ENODEV;
+       if ((ocp_data & AD_MASK) == 0x1000) {
+               /* test for MAC address pass-through bit */
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+               if ((ocp_data & PASS_THRU_MASK) != 1) {
+                       netif_dbg(tp, probe, tp->netdev,
+                                 "No efuse for RTL8153-AD MAC pass through\n");
+                       return -ENODEV;
+               }
+       } else {
+               /* test for RTL8153-BND */
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+               if ((ocp_data & BND_MASK) == 0) {
+                       netif_dbg(tp, probe, tp->netdev,
+                                 "Invalid variant for MAC pass through\n");
+                       return -ENODEV;
+               }
+       }
 
        /* returns _AUXMAC_#AABBCCDDEEFF# */
        status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
@@ -1217,9 +1229,8 @@ static int set_ethernet_addr(struct r8152 *tp)
        if (tp->version == RTL_VER_01) {
                ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
        } else {
-               /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
-                * or system doesn't provide valid _SB.AMAC this will be
-                * be expected to non-zero
+               /* if device doesn't support MAC pass through this will
+                * be expected to be non-zero
                 */
                ret = vendor_mac_passthru_addr_read(tp, &sa);
                if (ret < 0)
index 71c3b7b..3d77363 100644 (file)
@@ -642,6 +642,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
        rd->remote_port = port;
        rd->remote_vni = vni;
        rd->remote_ifindex = ifindex;
+       rd->offloaded = false;
        return 1;
 }
 
@@ -3444,6 +3445,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f = NULL;
+       bool unregister = false;
        int err;
 
        err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3469,12 +3471,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        err = register_netdevice(dev);
        if (err)
                goto errout;
+       unregister = true;
 
        err = rtnl_configure_link(dev, NULL);
-       if (err) {
-               unregister_netdevice(dev);
+       if (err)
                goto errout;
-       }
 
        /* notify default fdb entry */
        if (f)
@@ -3483,9 +3484,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 
        list_add(&vxlan->next, &vn->vxlan_list);
        return 0;
+
 errout:
+       /* unregister_netdevice() destroys the default FDB entry with deletion
+        * notification. But the addition notification was not sent yet, so
+        * destroy the entry by hand here.
+        */
        if (f)
                vxlan_fdb_destroy(vxlan, f, false, false);
+       if (unregister)
+               unregister_netdevice(dev);
        return err;
 }
 
@@ -3722,7 +3730,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        unsigned long old_age_interval;
        struct vxlan_rdst old_dst;
        struct vxlan_config conf;
-       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_nl2conf(tb, data,
@@ -3753,20 +3760,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                                           true);
 
                if (!vxlan_addr_any(&dst->remote_ip)) {
-                       err = vxlan_fdb_create(vxlan, all_zeros_mac,
+                       err = vxlan_fdb_update(vxlan, all_zeros_mac,
                                               &dst->remote_ip,
                                               NUD_REACHABLE | NUD_PERMANENT,
+                                              NLM_F_APPEND | NLM_F_CREATE,
                                               vxlan->cfg.dst_port,
                                               dst->remote_vni,
                                               dst->remote_vni,
                                               dst->remote_ifindex,
-                                              NTF_SELF, &f);
+                                              NTF_SELF, false);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock);
                                return err;
                        }
-                       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
-                                        RTM_NEWNEIGH, true);
                }
                spin_unlock_bh(&vxlan->hash_lock);
        }
index 22cbe9a..399b501 100644 (file)
@@ -2422,6 +2422,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_core_compat_services(struct ath10k *ar)
+{
+       struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+       /* all 10.x firmware versions support thermal throttling but don't
+        * advertise the support via service flags so we have to hardcode
+        * it here
+        */
+       switch (fw_file->wmi_op_version) {
+       case ATH10K_FW_WMI_OP_VERSION_10_1:
+       case ATH10K_FW_WMI_OP_VERSION_10_2:
+       case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                      const struct ath10k_fw_components *fw)
 {
@@ -2621,6 +2643,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                goto err_hif_stop;
        }
 
+       status = ath10k_core_compat_services(ar);
+       if (status) {
+               ath10k_err(ar, "compat services failed: %d\n", status);
+               goto err_hif_stop;
+       }
+
        /* Some firmware revisions do not properly set up hardware rx filter
         * registers.
         *
index 15964b3..02988fc 100644 (file)
@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
                            &fops_pktlog_filter);
 
-       debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
-                           &fops_quiet_period);
+       if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+                                   &fops_quiet_period);
 
        debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
                            &fops_tpc_stats);
index aa8978a..fe35edc 100644 (file)
@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               return;
+
        if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
                return;
 
@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
        struct device *hwmon_dev;
        int ret;
 
+       if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               return 0;
+
        cdev = thermal_cooling_device_register("ath10k_thermal", ar,
                                               &ath10k_thermal_ops);
 
@@ -216,6 +222,9 @@ err_cooling_destroy:
 
 void ath10k_thermal_unregister(struct ath10k *ar)
 {
+       if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               return;
+
        sysfs_remove_link(&ar->dev->kobj, "cooling_device");
        thermal_cooling_device_unregister(ar->thermal.cdev);
 }
index bf8a432..e07e990 100644 (file)
@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
        SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
               WMI_SERVICE_SPOOF_MAC_SUPPORT,
               WMI_TLV_MAX_SERVICE);
+       SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
+              WMI_SERVICE_THERM_THROT,
+              WMI_TLV_MAX_SERVICE);
 }
 
 #undef SVCMAP
index 58e33ab..66222ee 100644 (file)
@@ -205,6 +205,7 @@ enum wmi_service {
        WMI_SERVICE_SPOOF_MAC_SUPPORT,
        WMI_SERVICE_TX_DATA_ACK_RSSI,
        WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+       WMI_SERVICE_THERM_THROT,
 
        /* keep last */
        WMI_SERVICE_MAX,
index c5168ab..1a9edd8 100644 (file)
@@ -885,6 +885,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
        int ret, i, j;
        u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
+       /*
+        * This command is not supported on earlier firmware versions.
+        * Unfortunately, we don't have a TLV API flag to rely on, so
+        * rely on the major version which is in the first byte of
+        * ucode_ver.
+        */
+       if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+               return 0;
+
        ret = iwl_mvm_sar_get_wgds_table(mvm);
        if (ret < 0) {
                IWL_DEBUG_RADIO(mvm,
index e2addd8..5d75c97 100644 (file)
@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
                                "Send delba to tid=%d, %pM\n",
                                tid, rx_reor_tbl_ptr->ta);
                        mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
-                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-                                              flags);
-                       return;
+                       goto exit;
                }
        }
+exit:
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
index 8e63d14..5380fba 100644 (file)
@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
  * There could be holes in the buffer, which are skipped by the function.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
 {
        int pkt_to_send, i;
        void *rx_tmp_ptr;
+       unsigned long flags;
 
        pkt_to_send = (start_win > tbl->start_win) ?
                      min((start_win - tbl->start_win), tbl->win_size) :
                      tbl->win_size;
 
        for (i = 0; i < pkt_to_send; ++i) {
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                rx_tmp_ptr = NULL;
                if (tbl->rx_reorder_ptr[i]) {
                        rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                        tbl->rx_reorder_ptr[i] = NULL;
                }
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                if (rx_tmp_ptr)
                        mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
        }
 
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        /*
         * We don't have a circular buffer, hence use rotation to simulate
         * circular buffer
@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
        }
 
        tbl->start_win = start_win;
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
  * The start window is adjusted automatically when a hole is located.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
 {
        int i, j, xchg;
        void *rx_tmp_ptr;
+       unsigned long flags;
 
        for (i = 0; i < tbl->win_size; ++i) {
-               if (!tbl->rx_reorder_ptr[i])
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               if (!tbl->rx_reorder_ptr[i]) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        break;
+               }
                rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                tbl->rx_reorder_ptr[i] = NULL;
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
        }
 
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        /*
         * We don't have a circular buffer, hence use rotation to simulate
         * circular buffer
@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                }
        }
        tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
  *
  * The function stops the associated timer and dispatches all the
  * pending packets in the Rx reorder table before deletion.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
 
        del_timer_sync(&tbl->timer_context.timer);
        tbl->timer_context.timer_is_set = false;
+
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        list_del(&tbl->list);
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
        kfree(tbl->rx_reorder_ptr);
        kfree(tbl);
 
@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
 /*
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
        struct mwifiex_rx_reorder_tbl *tbl;
+       unsigned long flags;
 
-       list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
-               if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        return tbl;
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        return NULL;
 }
@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
                return;
 
        spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
-               if (!memcmp(tbl->ta, ta, ETH_ALEN))
+       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        mwifiex_del_rx_reorder_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               }
+       }
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        return;
@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static int
 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
 {
        struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+       struct mwifiex_private *priv = ctx->priv;
+       unsigned long flags;
        int i;
 
-       for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
-               if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+               if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        return i;
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        return -1;
 }
@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
        struct reorder_tmr_cnxt *ctx =
                from_timer(ctx, t, timer);
        int start_win, seq_num;
-       unsigned long flags;
 
        ctx->timer_is_set = false;
-       spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
        seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
-       if (seq_num < 0) {
-               spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
+       if (seq_num < 0)
                return;
-       }
 
        mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
        start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
        mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
                                                 start_win);
-       spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
         * If we get a TID, ta pair which is already present dispatch all the
         * the packets and move the window size until the ssn
         */
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (tbl) {
                mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                return;
        }
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
        /* if !tbl then create one */
        new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
        if (!new_node)
@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
        int prev_start_win, start_win, end_win, win_size;
        u16 pkt_index;
        bool init_window_shift = false;
-       unsigned long flags;
        int ret = 0;
 
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                if (pkt_type != PKT_TYPE_BAR)
                        mwifiex_11n_dispatch_pkt(priv, payload);
                return ret;
        }
 
        if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                mwifiex_11n_dispatch_pkt(priv, payload);
                return ret;
        }
@@ -651,8 +666,6 @@ done:
        if (!tbl->timer_context.timer_is_set ||
            prev_start_win != tbl->start_win)
                mwifiex_11n_rxreorder_timer_restart(tbl);
-
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
        return ret;
 }
 
@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
                    peer_mac, tid, initiator);
 
        if (cleanup_rx_reorder_tbl) {
-               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                                                 peer_mac);
                if (!tbl) {
-                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-                                              flags);
                        mwifiex_dbg(priv->adapter, EVENT,
                                    "event: TID, TA not found in table\n");
                        return;
                }
                mwifiex_del_rx_reorder_entry(priv, tbl);
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
        } else {
                ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
                if (!ptx_tbl) {
@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
        int tid, win_size;
        struct mwifiex_rx_reorder_tbl *tbl;
        uint16_t block_ack_param_set;
-       unsigned long flags;
 
        block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
                mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
                            add_ba_rsp->peer_mac_addr, tid);
 
-               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                                     add_ba_rsp->peer_mac_addr);
                if (tbl)
                        mwifiex_del_rx_reorder_entry(priv, tbl);
 
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                return 0;
        }
 
        win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
                    >> BLOCKACKPARAM_WINSIZE_POS;
 
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                             add_ba_rsp->peer_mac_addr);
        if (tbl) {
@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
                else
                        tbl->amsdu = false;
        }
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        mwifiex_dbg(priv->adapter, CMD,
                    "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
 
        spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        list_for_each_entry_safe(del_tbl_ptr, tmp_node,
-                                &priv->rx_reorder_tbl_ptr, list)
+                                &priv->rx_reorder_tbl_ptr, list) {
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       }
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
        int tlv_buf_left = len;
        int ret;
        u8 *tmp;
-       unsigned long flags;
 
        mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
                         event_buf, len);
@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
                            tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
                            tlv_bitmap_len);
 
-               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                rx_reor_tbl_ptr =
                        mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
                                                       tlv_rxba->mac);
                if (!rx_reor_tbl_ptr) {
-                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-                                              flags);
                        mwifiex_dbg(priv->adapter, ERROR,
                                    "Can not find rx_reorder_tbl!");
                        return;
                }
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
                for (i = 0; i < tlv_bitmap_len; i++) {
                        for (j = 0 ; j < 8; j++) {
index a83c5af..5ce85d5 100644 (file)
@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
                spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
        }
 
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        if (!priv->ap_11n_enabled ||
            (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
            (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
                ret = mwifiex_handle_uap_rx_forward(priv, skb);
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                return ret;
        }
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        /* Reorder and send to kernel */
        pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
index 7cbce03..aa426b8 100644 (file)
@@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
                struct ieee80211_txq *txq = sta->txq[i];
-               struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+               struct mt76_txq *mtxq;
+
+               if (!txq)
+                       continue;
+
+               mtxq = (struct mt76_txq *)txq->drv_priv;
 
                spin_lock_bh(&mtxq->hwq->lock);
                mtxq->send_bar = mtxq->aggr && send_bar;
index f4122c8..ef9b502 100644 (file)
@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (rtl_c2h_fast_cmd(hw, skb)) {
                rtl_c2h_content_parsing(hw, skb);
+               kfree_skb(skb);
                return;
        }
 
index a8303af..c914c24 100644 (file)
@@ -903,7 +903,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
                        unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-                       BUG_ON(pull_to <= skb_headlen(skb));
+                       BUG_ON(pull_to < skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
index a90a919..fed29de 100644 (file)
@@ -1064,7 +1064,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
                .regs           = aer_regs,
        };
 
-       if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+       if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
                                 &aer_recover_ring_lock))
                schedule_work(&aer_recover_work);
        else
index 53d4490..ea87d73 100644 (file)
@@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
                case PIN_CONFIG_BIAS_DISABLE:
                        dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
 
-                       meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+                       meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
+                                              &bit);
                        ret = regmap_update_bits(pc->reg_pullen, reg,
                                                 BIT(bit), 0);
                        if (ret)
index 6838b38..1bfb0ae 100644 (file)
@@ -33,7 +33,7 @@ enum {
        }
 
 
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9)        \
        {                                               \
                .name = "gpio" #id,                     \
                .pins = gpio##id##_pins,                \
@@ -51,11 +51,12 @@ enum {
                        msm_mux_##f9                    \
                },                                      \
                .nfuncs = 10,                           \
-               .ctl_reg = base + REG_SIZE * id,        \
-               .io_reg = base + 0x4 + REG_SIZE * id,           \
-               .intr_cfg_reg = base + 0x8 + REG_SIZE * id,             \
-               .intr_status_reg = base + 0xc + REG_SIZE * id,  \
-               .intr_target_reg = base + 0x8 + REG_SIZE * id,  \
+               .ctl_reg = REG_SIZE * id,               \
+               .io_reg = 0x4 + REG_SIZE * id,          \
+               .intr_cfg_reg = 0x8 + REG_SIZE * id,    \
+               .intr_status_reg = 0xc + REG_SIZE * id, \
+               .intr_target_reg = 0x8 + REG_SIZE * id, \
+               .tile = _tile,                  \
                .mux_bit = 2,                   \
                .pull_bit = 0,                  \
                .drv_bit = 6,                   \
@@ -82,6 +83,7 @@ enum {
                .intr_cfg_reg = 0,                      \
                .intr_status_reg = 0,                   \
                .intr_target_reg = 0,                   \
+               .tile = NORTH,                          \
                .mux_bit = -1,                          \
                .pull_bit = pull,                       \
                .drv_bit = drv,                         \
@@ -1397,13 +1399,13 @@ static const struct msm_pingroup sdm660_groups[] = {
        PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
        PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
        PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _),
-       SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
-       SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
-       SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
-       SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
-       SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
-       SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
-       SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+       SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+       SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+       SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+       SDC_QDSD_PINGROUP(sdc2_clk, 0x9b000, 14, 6),
+       SDC_QDSD_PINGROUP(sdc2_cmd, 0x9b000, 11, 3),
+       SDC_QDSD_PINGROUP(sdc2_data, 0x9b000, 9, 0),
+       SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
 };
 
 static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
index 6624499..4ada803 100644 (file)
@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),  /* PH_EINT11 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
 };
 
 static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
index cd160f2..bcd30e2 100644 (file)
@@ -2364,7 +2364,7 @@ static int _bnx2fc_create(struct net_device *netdev,
        if (!interface) {
                printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
                rc = -ENOMEM;
-               goto ifput_err;
+               goto netdev_err;
        }
 
        if (is_vlan_dev(netdev)) {
index b658b9a..d0ecc72 100644 (file)
@@ -4886,10 +4886,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
                        fcport->d_id = e->u.new_sess.id;
                        fcport->flags |= FCF_FABRIC_DEVICE;
                        fcport->fw_login_state = DSC_LS_PLOGI_PEND;
-                       if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+                       if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
                                fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 
-                       if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+                       if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
                                fcport->fc4_type = FC4_TYPE_OTHER;
                                fcport->fc4f_nvme = FC4_TYPE_NVME;
                        }
index a7a34e8..3252efa 100644 (file)
@@ -3,6 +3,7 @@ config VIDEO_SUNXI_CEDRUS
        depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
        depends on HAS_DMA
        depends on OF
+       depends on MEDIA_CONTROLLER_REQUEST_API
        select SUNXI_SRAM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index 32adbcb..07520a2 100644 (file)
@@ -255,10 +255,10 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
 
        res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
        dev->base = devm_ioremap_resource(dev->dev, res);
-       if (!dev->base) {
+       if (IS_ERR(dev->base)) {
                v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
 
-               ret = -ENOMEM;
+               ret = PTR_ERR(dev->base);
                goto err_sram;
        }
 
index c4111a9..2d26ae8 100644 (file)
@@ -424,7 +424,7 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
        struct platform_device *pdev = data->pdev;
        struct device *dev = &pdev->dev;
 
-       data->nr_sensors = 2;
+       data->nr_sensors = 1;
 
        data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
                                    data->nr_sensors, GFP_KERNEL);
@@ -589,7 +589,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
                        return ret;
                }
 
-               ret = platform_get_irq_byname(pdev, sensor->irq_name);
+               ret = platform_get_irq(pdev, 0);
                if (ret < 0)
                        return ret;
 
index 47623da..bbd73c5 100644 (file)
@@ -241,8 +241,8 @@ static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
                sensor->t0 = TS1_T0_VAL1;
 
        /* Retrieve fmt0 and put it on Hz */
-       sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
-                                             & TS1_FMT0_MASK;
+       sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base +
+                                DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK);
 
        /* Retrieve ramp coefficient */
        sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
@@ -532,6 +532,10 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
        if (ret)
                return ret;
 
+       ret = stm_thermal_read_factory_settings(sensor);
+       if (ret)
+               goto thermal_unprepare;
+
        ret = stm_thermal_calibration(sensor);
        if (ret)
                goto thermal_unprepare;
@@ -636,10 +640,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
        /* Populate sensor */
        sensor->base = base;
 
-       ret = stm_thermal_read_factory_settings(sensor);
-       if (ret)
-               return ret;
-
        sensor->clk = devm_clk_get(&pdev->dev, "pclk");
        if (IS_ERR(sensor->clk)) {
                dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
index f776b3e..3f779d2 100644 (file)
@@ -552,30 +552,11 @@ static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
  */
 static void serial8250_clear_fifos(struct uart_8250_port *p)
 {
-       unsigned char fcr;
-       unsigned char clr_mask = UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT;
-
        if (p->capabilities & UART_CAP_FIFO) {
-               /*
-                * Make sure to avoid changing FCR[7:3] and ENABLE_FIFO bits.
-                * In case ENABLE_FIFO is not set, there is nothing to flush
-                * so just return. Furthermore, on certain implementations of
-                * the 8250 core, the FCR[7:3] bits may only be changed under
-                * specific conditions and changing them if those conditions
-                * are not met can have nasty side effects. One such core is
-                * the 8250-omap present in TI AM335x.
-                */
-               fcr = serial_in(p, UART_FCR);
-
-               /* FIFO is not enabled, there's nothing to clear. */
-               if (!(fcr & UART_FCR_ENABLE_FIFO))
-                       return;
-
-               fcr |= clr_mask;
-               serial_out(p, UART_FCR, fcr);
-
-               fcr &= ~clr_mask;
-               serial_out(p, UART_FCR, fcr);
+               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+                              UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+               serial_out(p, UART_FCR, 0);
        }
 }
 
@@ -1467,7 +1448,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
         * Enable previously disabled RX interrupts.
         */
        if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
-               serial8250_clear_fifos(p);
+               serial8250_clear_and_reinit_fifos(p);
 
                p->ier |= UART_IER_RLSI | UART_IER_RDI;
                serial_port_out(&p->port, UART_IER, p->ier);
index c2493d0..3c5169e 100644 (file)
@@ -204,9 +204,11 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
        if (atomic_inc_return(&pdata->refcnt) != 1)
                return 0;
 
+       vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
+       vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+
        ret = vmbus_connect_ring(dev->channel,
                                 hv_uio_channel_cb, dev->channel);
-
        if (ret == 0)
                dev->channel->inbound.ring_buffer->interrupt_mask = 1;
        else
@@ -334,9 +336,6 @@ hv_uio_probe(struct hv_device *dev,
                goto fail_close;
        }
 
-       vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
-       vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
-
        ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
        if (ret)
                dev_notice(&dev->device,
index 94aca1b..01b5818 100644 (file)
@@ -1507,7 +1507,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                portsc_buf[port_index] = 0;
 
                /* Bail out if a USB3 port has a new device in link training */
-               if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+               if ((hcd->speed >= HCD_USB3) &&
+                   (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
                        bus_state->bus_suspended = 0;
                        spin_unlock_irqrestore(&xhci->lock, flags);
                        xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
index c3515ba..011dd45 100644 (file)
@@ -1863,6 +1863,8 @@ struct xhci_hcd {
        unsigned                sw_lpm_support:1;
        /* support xHCI 1.0 spec USB2 hardware LPM */
        unsigned                hw_lpm_support:1;
+       /* Broken Suspend flag for SNPS Suspend resume issue */
+       unsigned                broken_suspend:1;
        /* cached usb2 extened protocol capabilites */
        u32                     *ext_caps;
        unsigned int            num_ext_caps;
@@ -1880,8 +1882,6 @@ struct xhci_hcd {
        void                    *dbc;
        /* platform-specific data -- must come last */
        unsigned long           priv[0] __aligned(sizeof(s64));
-       /* Broken Suspend flag for SNPS Suspend resume issue */
-       u8                      broken_suspend;
 };
 
 /* Platform specific overrides to generic XHCI hc_driver ops */
index e24ff16..1ce27f3 100644 (file)
@@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x1900),                          /* Telit LN940 (QMI) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
+         .driver_info = NCTRL(0) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
@@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) },    /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
          .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(2) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
          .driver_info = RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },    /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
        { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
          .driver_info = RSVD(5) | RSVD(6) },
+       { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },   /* Simcom SIM7500/SIM7600 MBIM mode */
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) },    /* HP lt2523 (Novatel E371) */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) },    /* HP lt4132 (Huawei ME906s-158) */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+       { USB_DEVICE(0x1508, 0x1001),                                           /* Fibocom NL668 */
+         .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index d919284..02b699a 100644 (file)
@@ -517,7 +517,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
        struct socket *sock;
        struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
 
-       mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+       /* Try to hold the vq mutex of the paired virtqueue. We can't
+        * use mutex_lock() here since we could not guarantee a
+        * consistenet lock ordering.
+        */
+       if (!mutex_trylock(&vq->mutex))
+               return;
+
        vhost_disable_notify(&net->dev, vq);
        sock = rvq->private_data;
 
index 6b98d8e..55e5aa6 100644 (file)
@@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
 {
        int i;
 
-       for (i = 0; i < d->nvqs; ++i) {
-               mutex_lock(&d->vqs[i]->mutex);
+       for (i = 0; i < d->nvqs; ++i)
                __vhost_vq_meta_reset(d->vqs[i]);
-               mutex_unlock(&d->vqs[i]->mutex);
-       }
 }
 
 static void vhost_vq_reset(struct vhost_dev *dev,
@@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 #define vhost_get_used(vq, x, ptr) \
        vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+       int i = 0;
+       for (i = 0; i < d->nvqs; ++i)
+               mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+       int i = 0;
+       for (i = 0; i < d->nvqs; ++i)
+               mutex_unlock(&d->vqs[i]->mutex);
+}
+
 static int vhost_new_umem_range(struct vhost_umem *umem,
                                u64 start, u64 size, u64 end,
                                u64 userspace_addr, int perm)
@@ -976,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
        int ret = 0;
 
        mutex_lock(&dev->mutex);
+       vhost_dev_lock_vqs(dev);
        switch (msg->type) {
        case VHOST_IOTLB_UPDATE:
                if (!dev->iotlb) {
@@ -1009,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
                break;
        }
 
+       vhost_dev_unlock_vqs(dev);
        mutex_unlock(&dev->mutex);
 
        return ret;
@@ -2220,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
                return -EFAULT;
        }
        if (unlikely(vq->log_used)) {
+               /* Make sure used idx is seen before log. */
+               smp_wmb();
                /* Log used index update. */
                log_write(vq->log_base,
                          vq->log_addr + offsetof(struct vring_used, idx),
index 678b270..f9ef067 100644 (file)
@@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                goto err_alloc;
        }
 
-       if (!data->levels) {
+       if (data->levels) {
+               /*
+                * For the DT case, only when brightness levels is defined
+                * data->levels is filled. For the non-DT case, data->levels
+                * can come from platform data, however is not usual.
+                */
+               for (i = 0; i <= data->max_brightness; i++) {
+                       if (data->levels[i] > pb->scale)
+                               pb->scale = data->levels[i];
+
+                       pb->levels = data->levels;
+               }
+       } else if (!data->max_brightness) {
+               /*
+                * If no brightness levels are provided and max_brightness is
+                * not set, use the default brightness table. For the DT case,
+                * max_brightness is set to 0 when brightness levels is not
+                * specified. For the non-DT case, max_brightness is usually
+                * set to some value.
+                */
+
+               /* Get the PWM period (in nanoseconds) */
+               pwm_get_state(pb->pwm, &state);
+
                ret = pwm_backlight_brightness_default(&pdev->dev, data,
                                                       state.period);
                if (ret < 0) {
@@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                                "failed to setup default brightness table\n");
                        goto err_alloc;
                }
-       }
 
-       for (i = 0; i <= data->max_brightness; i++) {
-               if (data->levels[i] > pb->scale)
-                       pb->scale = data->levels[i];
+               for (i = 0; i <= data->max_brightness; i++) {
+                       if (data->levels[i] > pb->scale)
+                               pb->scale = data->levels[i];
 
-               pb->levels = data->levels;
+                       pb->levels = data->levels;
+               }
+       } else {
+               /*
+                * That only happens for the non-DT case, where platform data
+                * sets the max_brightness value.
+                */
+               pb->scale = data->max_brightness;
        }
 
        pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
index 97f9835..aac9659 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -45,6 +45,7 @@
 
 #include <asm/kmap_types.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 
 #include "internal.h"
 
@@ -1038,6 +1039,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
        if (!table || id >= table->nr)
                goto out;
 
+       id = array_index_nospec(id, table->nr);
        ctx = rcu_dereference(table->table[id]);
        if (ctx && ctx->user_id == ctx_id) {
                if (percpu_ref_tryget_live(&ctx->users))
index b5ecd6f..4e9a7cc 100644 (file)
@@ -563,8 +563,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",noacl");
 #endif
 
-       if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM)
-               seq_puts(m, ",nocopyfrom");
+       if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
+               seq_puts(m, ",copyfrom");
 
        if (fsopt->mds_namespace)
                seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
index c005a54..79a265b 100644 (file)
@@ -42,7 +42,9 @@
 #define CEPH_MOUNT_OPT_NOQUOTADF       (1<<13) /* no root dir quota in statfs */
 #define CEPH_MOUNT_OPT_NOCOPYFROM      (1<<14) /* don't use RADOS 'copy-from' op */
 
-#define CEPH_MOUNT_OPT_DEFAULT    CEPH_MOUNT_OPT_DCACHE
+#define CEPH_MOUNT_OPT_DEFAULT                 \
+       (CEPH_MOUNT_OPT_DCACHE |                \
+        CEPH_MOUNT_OPT_NOCOPYFROM)
 
 #define ceph_set_mount_opt(fsc, opt) \
        (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
index 47395b0..e909678 100644 (file)
@@ -1119,8 +1119,10 @@ static int fuse_permission(struct inode *inode, int mask)
        if (fc->default_permissions ||
            ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
                struct fuse_inode *fi = get_fuse_inode(inode);
+               u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
 
-               if (time_before64(fi->i_time, get_jiffies_64())) {
+               if (perm_mask & READ_ONCE(fi->inval_mask) ||
+                   time_before64(fi->i_time, get_jiffies_64())) {
                        refreshed = true;
 
                        err = fuse_perm_getattr(inode, mask);
@@ -1241,7 +1243,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
 
 static int fuse_dir_release(struct inode *inode, struct file *file)
 {
-       fuse_release_common(file, FUSE_RELEASEDIR);
+       fuse_release_common(file, true);
 
        return 0;
 }
@@ -1249,7 +1251,25 @@ static int fuse_dir_release(struct inode *inode, struct file *file)
 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
                          int datasync)
 {
-       return fuse_fsync_common(file, start, end, datasync, 1);
+       struct inode *inode = file->f_mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       int err;
+
+       if (is_bad_inode(inode))
+               return -EIO;
+
+       if (fc->no_fsyncdir)
+               return 0;
+
+       inode_lock(inode);
+       err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
+       if (err == -ENOSYS) {
+               fc->no_fsyncdir = 1;
+               err = 0;
+       }
+       inode_unlock(inode);
+
+       return err;
 }
 
 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
index b52f9ba..ffaffe1 100644 (file)
@@ -89,12 +89,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
        iput(req->misc.release.inode);
 }
 
-static void fuse_file_put(struct fuse_file *ff, bool sync)
+static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
 {
        if (refcount_dec_and_test(&ff->count)) {
                struct fuse_req *req = ff->reserved_req;
 
-               if (ff->fc->no_open) {
+               if (ff->fc->no_open && !isdir) {
                        /*
                         * Drop the release request when client does not
                         * implement 'open'
@@ -247,10 +247,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
        req->in.args[0].value = inarg;
 }
 
-void fuse_release_common(struct file *file, int opcode)
+void fuse_release_common(struct file *file, bool isdir)
 {
        struct fuse_file *ff = file->private_data;
        struct fuse_req *req = ff->reserved_req;
+       int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
 
        fuse_prepare_release(ff, file->f_flags, opcode);
 
@@ -272,7 +273,7 @@ void fuse_release_common(struct file *file, int opcode)
         * synchronous RELEASE is allowed (and desirable) in this case
         * because the server can be trusted not to screw up.
         */
-       fuse_file_put(ff, ff->fc->destroy_req != NULL);
+       fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
 }
 
 static int fuse_open(struct inode *inode, struct file *file)
@@ -288,7 +289,7 @@ static int fuse_release(struct inode *inode, struct file *file)
        if (fc->writeback_cache)
                write_inode_now(inode, 1);
 
-       fuse_release_common(file, FUSE_RELEASE);
+       fuse_release_common(file, false);
 
        /* return value is ignored by VFS */
        return 0;
@@ -302,7 +303,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
         * iput(NULL) is a no-op and since the refcount is 1 and everything's
         * synchronous, we are fine with not doing igrab() here"
         */
-       fuse_file_put(ff, true);
+       fuse_file_put(ff, true, false);
 }
 EXPORT_SYMBOL_GPL(fuse_sync_release);
 
@@ -441,13 +442,30 @@ static int fuse_flush(struct file *file, fl_owner_t id)
 }
 
 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
-                     int datasync, int isdir)
+                     int datasync, int opcode)
 {
        struct inode *inode = file->f_mapping->host;
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_file *ff = file->private_data;
        FUSE_ARGS(args);
        struct fuse_fsync_in inarg;
+
+       memset(&inarg, 0, sizeof(inarg));
+       inarg.fh = ff->fh;
+       inarg.fsync_flags = datasync ? 1 : 0;
+       args.in.h.opcode = opcode;
+       args.in.h.nodeid = get_node_id(inode);
+       args.in.numargs = 1;
+       args.in.args[0].size = sizeof(inarg);
+       args.in.args[0].value = &inarg;
+       return fuse_simple_request(fc, &args);
+}
+
+static int fuse_fsync(struct file *file, loff_t start, loff_t end,
+                     int datasync)
+{
+       struct inode *inode = file->f_mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
 
        if (is_bad_inode(inode))
@@ -479,34 +497,18 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
        if (err)
                goto out;
 
-       if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
+       if (fc->no_fsync)
                goto out;
 
-       memset(&inarg, 0, sizeof(inarg));
-       inarg.fh = ff->fh;
-       inarg.fsync_flags = datasync ? 1 : 0;
-       args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
-       args.in.h.nodeid = get_node_id(inode);
-       args.in.numargs = 1;
-       args.in.args[0].size = sizeof(inarg);
-       args.in.args[0].value = &inarg;
-       err = fuse_simple_request(fc, &args);
+       err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
        if (err == -ENOSYS) {
-               if (isdir)
-                       fc->no_fsyncdir = 1;
-               else
-                       fc->no_fsync = 1;
+               fc->no_fsync = 1;
                err = 0;
        }
 out:
        inode_unlock(inode);
-       return err;
-}
 
-static int fuse_fsync(struct file *file, loff_t start, loff_t end,
-                     int datasync)
-{
-       return fuse_fsync_common(file, start, end, datasync, 0);
+       return err;
 }
 
 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
@@ -807,7 +809,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
                put_page(page);
        }
        if (req->ff)
-               fuse_file_put(req->ff, false);
+               fuse_file_put(req->ff, false, false);
 }
 
 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1460,7 +1462,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
                __free_page(req->pages[i]);
 
        if (req->ff)
-               fuse_file_put(req->ff, false);
+               fuse_file_put(req->ff, false, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1619,7 +1621,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
        ff = __fuse_write_file_get(fc, fi);
        err = fuse_flush_times(inode, ff);
        if (ff)
-               fuse_file_put(ff, 0);
+               fuse_file_put(ff, false, false);
 
        return err;
 }
@@ -1940,7 +1942,7 @@ static int fuse_writepages(struct address_space *mapping,
                err = 0;
        }
        if (data.ff)
-               fuse_file_put(data.ff, false);
+               fuse_file_put(data.ff, false, false);
 
        kfree(data.orig_pages);
 out:
index e9f712e..2f2c92e 100644 (file)
@@ -822,13 +822,13 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
 /**
  * Send RELEASE or RELEASEDIR request
  */
-void fuse_release_common(struct file *file, int opcode);
+void fuse_release_common(struct file *file, bool isdir);
 
 /**
  * Send FSYNC or FSYNCDIR request
  */
 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
-                     int datasync, int isdir);
+                     int datasync, int opcode);
 
 /**
  * Notify poll wakeup
index 0b94b23..568abed 100644 (file)
@@ -115,7 +115,7 @@ static void fuse_i_callback(struct rcu_head *head)
 static void fuse_destroy_inode(struct inode *inode)
 {
        struct fuse_inode *fi = get_fuse_inode(inode);
-       if (S_ISREG(inode->i_mode)) {
+       if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
                WARN_ON(!list_empty(&fi->write_files));
                WARN_ON(!list_empty(&fi->queued_writes));
        }
@@ -1068,6 +1068,7 @@ void fuse_dev_free(struct fuse_dev *fud)
 
                fuse_conn_put(fc);
        }
+       kfree(fud->pq.processing);
        kfree(fud);
 }
 EXPORT_SYMBOL_GPL(fuse_dev_free);
index c628914..82c129b 100644 (file)
@@ -651,6 +651,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry,
        return ovl_create_object(dentry, S_IFLNK, 0, link);
 }
 
+static int ovl_set_link_redirect(struct dentry *dentry)
+{
+       const struct cred *old_cred;
+       int err;
+
+       old_cred = ovl_override_creds(dentry->d_sb);
+       err = ovl_set_redirect(dentry, false);
+       revert_creds(old_cred);
+
+       return err;
+}
+
 static int ovl_link(struct dentry *old, struct inode *newdir,
                    struct dentry *new)
 {
@@ -670,7 +682,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
                goto out_drop_write;
 
        if (ovl_is_metacopy_dentry(old)) {
-               err = ovl_set_redirect(old, false);
+               err = ovl_set_link_redirect(old);
                if (err)
                        goto out_drop_write;
        }
index 8fa37cd..54e5d17 100644 (file)
@@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
                goto out;
        }
 
-       /* Otherwise, get a connected non-upper dir or disconnected non-dir */
-       if (d_is_dir(origin.dentry) &&
-           (origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
+       /* Find origin.dentry again with ovl_acceptable() layer check */
+       if (d_is_dir(origin.dentry)) {
                dput(origin.dentry);
                origin.dentry = NULL;
                err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
@@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
                        goto out_err;
        }
 
+       /* Get a connected non-upper dir or disconnected non-dir */
        dentry = ovl_get_dentry(sb, NULL, &origin, index);
 
 out:
index 6bcc9de..3b7ed5d 100644 (file)
@@ -286,22 +286,13 @@ int ovl_permission(struct inode *inode, int mask)
        if (err)
                return err;
 
-       /* No need to do any access on underlying for special files */
-       if (special_file(realinode->i_mode))
-               return 0;
-
-       /* No need to access underlying for execute */
-       mask &= ~MAY_EXEC;
-       if ((mask & (MAY_READ | MAY_WRITE)) == 0)
-               return 0;
-
-       /* Lower files get copied up, so turn write access into read */
-       if (!upperinode && mask & MAY_WRITE) {
+       old_cred = ovl_override_creds(inode->i_sb);
+       if (!upperinode &&
+           !special_file(realinode->i_mode) && mask & MAY_WRITE) {
                mask &= ~(MAY_WRITE | MAY_APPEND);
+               /* Make sure mounter can read file for copy up later */
                mask |= MAY_READ;
        }
-
-       old_cred = ovl_override_creds(inode->i_sb);
        err = inode_permission(realinode, mask);
        revert_creds(old_cred);
 
index cd58939..7a85e60 100644 (file)
@@ -1566,7 +1566,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                cond_resched();
 
                BUG_ON(!vma_can_userfault(vma));
-               WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
 
                /*
                 * Nothing to do: this vma is already registered into this
@@ -1575,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                if (!vma->vm_userfaultfd_ctx.ctx)
                        goto skip;
 
+               WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
                if (vma->vm_start > start)
                        start = vma->vm_start;
                vma_end = min(end, vma->vm_end);
index 827e4d3..8cc7b09 100644 (file)
@@ -16,6 +16,7 @@
 #define __ASM_GENERIC_FIXMAP_H
 
 #include <linux/bug.h>
+#include <linux/mm_types.h>
 
 #define __fix_to_virt(x)       (FIXADDR_TOP - ((x) << PAGE_SHIFT))
 #define __virt_to_fix(x)       ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
index 537e9e7..8c8544b 100644 (file)
@@ -854,7 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
 extern int bpf_jit_enable;
 extern int bpf_jit_harden;
 extern int bpf_jit_kallsyms;
-extern int bpf_jit_limit;
+extern long bpf_jit_limit;
 
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 
index 57fd376..821b751 100644 (file)
@@ -593,11 +593,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
 };
 
 struct mlx5_ifc_flow_table_eswitch_cap_bits {
-       u8      reserved_at_0[0x1c];
-       u8      fdb_multi_path_to_table[0x1];
-       u8      reserved_at_1d[0x1];
+       u8      reserved_at_0[0x1a];
        u8      multi_fdb_encap[0x1];
-       u8      reserved_at_1f[0x1e1];
+       u8      reserved_at_1b[0x1];
+       u8      fdb_multi_path_to_table[0x1];
+       u8      reserved_at_1d[0x3];
+
+       u8      reserved_at_20[0x1e0];
 
        struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
 
index 5ed8f62..2c471a2 100644 (file)
@@ -206,6 +206,11 @@ struct page {
 #endif
 } _struct_page_alignment;
 
+/*
+ * Used for sizing the vmemmap region on some architectures
+ */
+#define STRUCT_PAGE_MAX_SHIFT  (order_base_2(sizeof(struct page)))
+
 #define PAGE_FRAG_CACHE_MAX_SIZE       __ALIGN_MASK(32768, ~PAGE_MASK)
 #define PAGE_FRAG_CACHE_MAX_ORDER      get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 
index 847705a..db023a9 100644 (file)
@@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
 static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
 #endif
 
+#if defined(CONFIG_SPARSEMEM)
+void memblocks_present(void);
+#else
+static inline void memblocks_present(void) {}
+#endif
+
 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 int local_memory_node(int node_id);
 #else
index 01797cb..a0dcc9b 100644 (file)
@@ -565,7 +565,7 @@ struct platform_device_id {
 /**
  * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
  * @phy_id: The result of
- *     (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ *     (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
  *     for this PHY type
  * @phy_id_mask: Defines the significant bits of @phy_id.  A value of 0
  *     is used to terminate an array of struct mdio_device_id.
index 4a520d3..cf09ab3 100644 (file)
@@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss)                                        \
-       rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
        MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
 
index b9626aa..3e2a80c 100644 (file)
@@ -39,12 +39,13 @@ struct t10_pi_tuple {
 
 static inline u32 t10_pi_ref_tag(struct request *rq)
 {
+       unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-       return blk_rq_pos(rq) >>
-               (rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
-       return -1U;
+       if (rq->q->integrity.interval_exp)
+               shift = rq->q->integrity.interval_exp;
 #endif
+       return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
 }
 
 extern const struct blk_integrity_profile t10_pi_type1_crc;
index 564892e..f492e21 100644 (file)
@@ -554,6 +554,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
 }
 
 /**
+ * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context.  Takes and releases the xa_lock while
+ * disabling softirqs.  May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
+                       void *old, void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock_bh(xa);
+       curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+       xa_unlock_bh(xa);
+
+       return curr;
+}
+
+/**
+ * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context.  Takes and releases the xa_lock while
+ * disabling interrupts.  May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
+                       void *old, void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock_irq(xa);
+       curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+       xa_unlock_irq(xa);
+
+       return curr;
+}
+
+/**
  * xa_insert() - Store this entry in the XArray unless another entry is
  *                     already present.
  * @xa: XArray.
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
new file mode 100644 (file)
index 0000000..d21f40e
--- /dev/null
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the MPEG2 state controls for use with stateless MPEG-2
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _MPEG2_CTRLS_H_
+#define _MPEG2_CTRLS_H_
+
+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS         (V4L2_CID_MPEG_BASE+250)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION         (V4L2_CID_MPEG_BASE+251)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
+#define        V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
+
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I       1
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P       2
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B       3
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D       4
+
+struct v4l2_mpeg2_sequence {
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
+       __u16   horizontal_size;
+       __u16   vertical_size;
+       __u32   vbv_buffer_size;
+
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
+       __u8    profile_and_level_indication;
+       __u8    progressive_sequence;
+       __u8    chroma_format;
+       __u8    pad;
+};
+
+struct v4l2_mpeg2_picture {
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
+       __u8    picture_coding_type;
+
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
+       __u8    f_code[2][2];
+       __u8    intra_dc_precision;
+       __u8    picture_structure;
+       __u8    top_field_first;
+       __u8    frame_pred_frame_dct;
+       __u8    concealment_motion_vectors;
+       __u8    q_scale_type;
+       __u8    intra_vlc_format;
+       __u8    alternate_scan;
+       __u8    repeat_first_field;
+       __u8    progressive_frame;
+       __u8    pad;
+};
+
+struct v4l2_ctrl_mpeg2_slice_params {
+       __u32   bit_size;
+       __u32   data_bit_offset;
+
+       struct v4l2_mpeg2_sequence sequence;
+       struct v4l2_mpeg2_picture picture;
+
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
+       __u8    quantiser_scale_code;
+
+       __u8    backward_ref_index;
+       __u8    forward_ref_index;
+       __u8    pad;
+};
+
+struct v4l2_ctrl_mpeg2_quantization {
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
+       __u8    load_intra_quantiser_matrix;
+       __u8    load_non_intra_quantiser_matrix;
+       __u8    load_chroma_intra_quantiser_matrix;
+       __u8    load_chroma_non_intra_quantiser_matrix;
+
+       __u8    intra_quantiser_matrix[64];
+       __u8    non_intra_quantiser_matrix[64];
+       __u8    chroma_intra_quantiser_matrix[64];
+       __u8    chroma_non_intra_quantiser_matrix[64];
+};
+
+#endif
index 83ce059..d63cf22 100644 (file)
 #include <linux/videodev2.h>
 #include <media/media-request.h>
 
+/*
+ * Include the mpeg2 stateless codec compound control definitions.
+ * This will move to the public headers once this API is fully stable.
+ */
+#include <media/mpeg2-ctrls.h>
+
 /* forward references */
 struct file;
 struct v4l2_ctrl_handler;
index e86981d..4a737b2 100644 (file)
@@ -239,6 +239,7 @@ struct vb2_queue;
  * @num_planes:                number of planes in the buffer
  *                     on an internal driver queue.
  * @timestamp:         frame timestamp in ns.
+ * @request:           the request this buffer is associated with.
  * @req_obj:           used to bind this buffer to a request. This
  *                     request object has a refcount.
  */
@@ -249,6 +250,7 @@ struct vb2_buffer {
        unsigned int            memory;
        unsigned int            num_planes;
        u64                     timestamp;
+       struct media_request    *request;
        struct media_request_object     req_obj;
 
        /* private: internal use only
index db6b221..cbcf35c 100644 (file)
@@ -144,25 +144,6 @@ struct ip_tunnel {
        bool                    ignore_df;
 };
 
-#define TUNNEL_CSUM            __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING         __cpu_to_be16(0x02)
-#define TUNNEL_KEY             __cpu_to_be16(0x04)
-#define TUNNEL_SEQ             __cpu_to_be16(0x08)
-#define TUNNEL_STRICT          __cpu_to_be16(0x10)
-#define TUNNEL_REC             __cpu_to_be16(0x20)
-#define TUNNEL_VERSION         __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY          __cpu_to_be16(0x80)
-#define TUNNEL_DONT_FRAGMENT    __cpu_to_be16(0x0100)
-#define TUNNEL_OAM             __cpu_to_be16(0x0200)
-#define TUNNEL_CRIT_OPT                __cpu_to_be16(0x0400)
-#define TUNNEL_GENEVE_OPT      __cpu_to_be16(0x0800)
-#define TUNNEL_VXLAN_OPT       __cpu_to_be16(0x1000)
-#define TUNNEL_NOCACHE         __cpu_to_be16(0x2000)
-#define TUNNEL_ERSPAN_OPT      __cpu_to_be16(0x4000)
-
-#define TUNNEL_OPTIONS_PRESENT \
-               (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
-
 struct tnl_ptk_info {
        __be16 flags;
        __be16 proto;
index df390a3..a6235c2 100644 (file)
@@ -2350,22 +2350,39 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
 
 /**
- * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
  * @sk:                socket sending this packet
  * @tsflags:   timestamping flags to use
  * @tx_flags:  completed with instructions for time stamping
+ * @tskey:      filled in with next sk_tskey (not for TCP, which uses seqno)
  *
  * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
  */
-static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
-                                    __u8 *tx_flags)
+static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+                                     __u8 *tx_flags, __u32 *tskey)
 {
-       if (unlikely(tsflags))
+       if (unlikely(tsflags)) {
                __sock_tx_timestamp(tsflags, tx_flags);
+               if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
+                   tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
+                       *tskey = sk->sk_tskey++;
+       }
        if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
                *tx_flags |= SKBTX_WIFI_STATUS;
 }
 
+static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+                                    __u8 *tx_flags)
+{
+       _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+}
+
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+{
+       _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+                          &skb_shinfo(skb)->tskey);
+}
+
 /**
  * sk_eat_skb - Release a skb if it is no longer needed
  * @sk: socket to eat this skb from
index bab5627..3cbcd12 100644 (file)
  *
  * void (*unhash)(struct tls_device *device, struct sock *sk);
  *     This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ *     Release the registered device and allocated resources
+ * @kref: Number of reference to tls_device
  */
 struct tls_device {
        char name[TLS_DEVICE_NAME_MAX];
@@ -83,6 +87,8 @@ struct tls_device {
        int  (*feature)(struct tls_device *device);
        int  (*hash)(struct tls_device *device, struct sock *sk);
        void (*unhash)(struct tls_device *device, struct sock *sk);
+       void (*release)(struct kref *kref);
+       struct kref kref;
 };
 
 enum {
index 38c2328..7298a53 100644 (file)
@@ -1526,6 +1526,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                    int (*func)(struct xfrm_state *, int, void*), void *);
 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
 struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
                                   const xfrm_address_t *saddr,
                                   const struct flowi *fl,
index 2138144..355c4ac 100644 (file)
@@ -3,6 +3,7 @@
 #
 mandatory-y += auxvec.h
 mandatory-y += bitsperlong.h
+mandatory-y += bpf_perf_event.h
 mandatory-y += byteorder.h
 mandatory-y += errno.h
 mandatory-y += fcntl.h
index 8f08ff9..6fa38d0 100644 (file)
@@ -141,7 +141,7 @@ struct blk_zone_range {
  */
 #define BLKREPORTZONE  _IOWR(0x12, 130, struct blk_zone_report)
 #define BLKRESETZONE   _IOW(0x12, 131, struct blk_zone_range)
-#define BLKGETZONESZ   _IOW(0x12, 132, __u32)
-#define BLKGETNRZONES  _IOW(0x12, 133, __u32)
+#define BLKGETZONESZ   _IOR(0x12, 132, __u32)
+#define BLKGETNRZONES  _IOR(0x12, 133, __u32)
 
 #endif /* _UAPI_BLKZONED_H */
index 1b3d148..7d91055 100644 (file)
@@ -160,4 +160,24 @@ enum {
 };
 
 #define IFLA_VTI_MAX   (__IFLA_VTI_MAX - 1)
+
+#define TUNNEL_CSUM            __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING         __cpu_to_be16(0x02)
+#define TUNNEL_KEY             __cpu_to_be16(0x04)
+#define TUNNEL_SEQ             __cpu_to_be16(0x08)
+#define TUNNEL_STRICT          __cpu_to_be16(0x10)
+#define TUNNEL_REC             __cpu_to_be16(0x20)
+#define TUNNEL_VERSION         __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY          __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT    __cpu_to_be16(0x0100)
+#define TUNNEL_OAM             __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT                __cpu_to_be16(0x0400)
+#define TUNNEL_GENEVE_OPT      __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT       __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE         __cpu_to_be16(0x2000)
+#define TUNNEL_ERSPAN_OPT      __cpu_to_be16(0x4000)
+
+#define TUNNEL_OPTIONS_PRESENT \
+               (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+
 #endif /* _UAPI_IF_TUNNEL_H_ */
index 48e8a22..f6052e7 100644 (file)
@@ -266,10 +266,14 @@ struct sockaddr_in {
 
 #define        IN_CLASSD(a)            ((((long int) (a)) & 0xf0000000) == 0xe0000000)
 #define        IN_MULTICAST(a)         IN_CLASSD(a)
-#define IN_MULTICAST_NET       0xF0000000
+#define        IN_MULTICAST_NET        0xe0000000
 
-#define        IN_EXPERIMENTAL(a)      ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define        IN_BADCLASS(a)          IN_EXPERIMENTAL((a))
+#define        IN_BADCLASS(a)          ((((long int) (a) ) == 0xffffffff)
+#define        IN_EXPERIMENTAL(a)      IN_BADCLASS((a))
+
+#define        IN_CLASSE(a)            ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define        IN_CLASSE_NET           0xffffffff
+#define        IN_CLASSE_NSHIFT        0
 
 /* Address to accept any incoming messages. */
 #define        INADDR_ANY              ((unsigned long int) 0x00000000)
index 3eb5a4c..ae366b8 100644 (file)
 
 #define ABS_MISC               0x28
 
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED           0x2e
+
 #define ABS_MT_SLOT            0x2f    /* MT slot being modified */
 #define ABS_MT_TOUCH_MAJOR     0x30    /* Major axis of touching ellipse */
 #define ABS_MT_TOUCH_MINOR     0x31    /* Minor axis (omit if circular) */
index 97ff3c1..e5b3972 100644 (file)
@@ -155,8 +155,8 @@ enum txtime_flags {
 };
 
 struct sock_txtime {
-       clockid_t       clockid;        /* reference clockid */
-       __u32           flags;          /* as defined by enum txtime_flags */
+       __kernel_clockid_t      clockid;/* reference clockid */
+       __u32                   flags;  /* as defined by enum txtime_flags */
 };
 
 #endif /* _NET_TIMESTAMPING_H */
index 486ed1f..0a4d733 100644 (file)
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
 #define NETLINK_LIST_MEMBERSHIPS       9
 #define NETLINK_CAP_ACK                        10
 #define NETLINK_EXT_ACK                        11
-#define NETLINK_DUMP_STRICT_CHK                12
+#define NETLINK_GET_STRICT_CHK         12
 
 struct nl_pktinfo {
        __u32   group;
index 998983a..3dcfc61 100644 (file)
@@ -404,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode {
 #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE          (V4L2_CID_MPEG_BASE+228)
 #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME            (V4L2_CID_MPEG_BASE+229)
 
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS         (V4L2_CID_MPEG_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION         (V4L2_CID_MPEG_BASE+251)
-
 #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP            (V4L2_CID_MPEG_BASE+300)
 #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP            (V4L2_CID_MPEG_BASE+301)
 #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP            (V4L2_CID_MPEG_BASE+302)
@@ -1097,69 +1094,4 @@ enum v4l2_detect_md_mode {
 #define V4L2_CID_DETECT_MD_THRESHOLD_GRID      (V4L2_CID_DETECT_CLASS_BASE + 3)
 #define V4L2_CID_DETECT_MD_REGION_GRID         (V4L2_CID_DETECT_CLASS_BASE + 4)
 
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I       1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P       2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B       3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D       4
-
-struct v4l2_mpeg2_sequence {
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
-       __u16   horizontal_size;
-       __u16   vertical_size;
-       __u32   vbv_buffer_size;
-
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
-       __u8    profile_and_level_indication;
-       __u8    progressive_sequence;
-       __u8    chroma_format;
-       __u8    pad;
-};
-
-struct v4l2_mpeg2_picture {
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
-       __u8    picture_coding_type;
-
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
-       __u8    f_code[2][2];
-       __u8    intra_dc_precision;
-       __u8    picture_structure;
-       __u8    top_field_first;
-       __u8    frame_pred_frame_dct;
-       __u8    concealment_motion_vectors;
-       __u8    q_scale_type;
-       __u8    intra_vlc_format;
-       __u8    alternate_scan;
-       __u8    repeat_first_field;
-       __u8    progressive_frame;
-       __u8    pad;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
-       __u32   bit_size;
-       __u32   data_bit_offset;
-
-       struct v4l2_mpeg2_sequence sequence;
-       struct v4l2_mpeg2_picture picture;
-
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
-       __u8    quantiser_scale_code;
-
-       __u8    backward_ref_index;
-       __u8    forward_ref_index;
-       __u8    pad;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
-       __u8    load_intra_quantiser_matrix;
-       __u8    load_non_intra_quantiser_matrix;
-       __u8    load_chroma_intra_quantiser_matrix;
-       __u8    load_chroma_non_intra_quantiser_matrix;
-
-       __u8    intra_quantiser_matrix[64];
-       __u8    non_intra_quantiser_matrix[64];
-       __u8    chroma_intra_quantiser_matrix[64];
-       __u8    chroma_non_intra_quantiser_matrix[64];
-};
-
 #endif
index c8e8ff8..2ba2ad0 100644 (file)
@@ -1622,8 +1622,6 @@ struct v4l2_ext_control {
                __u8 __user *p_u8;
                __u16 __user *p_u16;
                __u32 __user *p_u32;
-               struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params;
-               struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization;
                void __user *ptr;
        };
 } __attribute__ ((packed));
@@ -1669,8 +1667,6 @@ enum v4l2_ctrl_type {
        V4L2_CTRL_TYPE_U8            = 0x0100,
        V4L2_CTRL_TYPE_U16           = 0x0101,
        V4L2_CTRL_TYPE_U32           = 0x0102,
-       V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103,
-       V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104,
 };
 
 /*  Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
index cf5b5a0..ed93525 100644 (file)
@@ -515,8 +515,8 @@ config PSI_DEFAULT_DISABLED
        depends on PSI
        help
          If set, pressure stall information tracking will be disabled
-         per default but can be enabled through passing psi_enable=1
-         on the kernel commandline during boot.
+         per default but can be enabled through passing psi=1 on the
+         kernel commandline during boot.
 
 endmenu # "CPU/Task time and stats accounting"
 
index 5cdd8da..38de580 100644 (file)
@@ -474,13 +474,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 }
 
 #ifdef CONFIG_BPF_JIT
-# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
-
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 int bpf_jit_harden   __read_mostly;
 int bpf_jit_kallsyms __read_mostly;
-int bpf_jit_limit    __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+long bpf_jit_limit   __read_mostly;
 
 static __always_inline void
 bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -701,16 +699,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
 static atomic_long_t bpf_jit_current;
 
+/* Can be overridden by an arch's JIT compiler if it has a custom,
+ * dedicated BPF backend memory area, or if neither of the two
+ * below apply.
+ */
+u64 __weak bpf_jit_alloc_exec_limit(void)
+{
 #if defined(MODULES_VADDR)
+       return MODULES_END - MODULES_VADDR;
+#else
+       return VMALLOC_END - VMALLOC_START;
+#endif
+}
+
 static int __init bpf_jit_charge_init(void)
 {
        /* Only used as heuristic here to derive limit. */
-       bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
-                                           PAGE_SIZE), INT_MAX);
+       bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+                                           PAGE_SIZE), LONG_MAX);
        return 0;
 }
 pure_initcall(bpf_jit_charge_init);
-#endif
 
 static int bpf_jit_charge_modmem(u32 pages)
 {
index 8b511a4..5b3c0a9 100644 (file)
@@ -5381,9 +5381,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        }
        new_sl->next = env->explored_states[insn_idx];
        env->explored_states[insn_idx] = new_sl;
-       /* connect new state to parentage chain */
-       for (i = 0; i < BPF_REG_FP; i++)
-               cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
+       /* connect new state to parentage chain. Current frame needs all
+        * registers connected. Only r6 - r9 of the callers are alive (pushed
+        * to the stack implicitly by JITs) so in callers' frames connect just
+        * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
+        * the state of the call instruction (with WRITTEN set), and r0 comes
+        * from callee with its full parentage chain, anyway.
+        */
+       for (j = 0; j <= cur->curframe; j++)
+               for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+                       cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
        /* clear write marks in current state: the writes we did are not writes
         * our child did, so they don't screen off its reads from us.
         * (There are no read marks in current state, because reads always mark
index 22a12ab..375c77e 100644 (file)
@@ -309,7 +309,12 @@ int dma_direct_supported(struct device *dev, u64 mask)
 
        min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
 
-       return mask >= phys_to_dma(dev, min_mask);
+       /*
+        * This check needs to be against the actual bit mask value, so
+        * use __phys_to_dma() here so that the SME encryption mask isn't
+        * part of the check.
+        */
+       return mask >= __phys_to_dma(dev, min_mask);
 }
 
 int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
index 7773445..e23eb9f 100644 (file)
@@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
        if (ops->flags & FTRACE_OPS_FL_ENABLED)
                ftrace_shutdown(ops, 0);
        ops->flags |= FTRACE_OPS_FL_DELETED;
+       ftrace_free_filter(ops);
        mutex_unlock(&ftrace_lock);
 }
 
index 84a6517..5574e86 100644 (file)
@@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
                }
        }
 
+       kfree(op_stack);
+       kfree(inverts);
        return prog;
 out_free:
        kfree(op_stack);
-       kfree(prog_stack);
        kfree(inverts);
+       kfree(prog_stack);
        return ERR_PTR(ret);
 }
 
@@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
        err = process_preds(call, filter_string, *filterp, pe);
        if (err && set_str)
                append_filter_err(pe, *filterp);
+       create_filter_finish(pe);
 
        return err;
 }
index 2152d1e..cd12ecb 100644 (file)
@@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
 
        /* The filter is for the 'trigger' event, not the triggered event */
        ret = create_event_filter(file->event_call, filter_str, false, &filter);
-       if (ret)
-               goto out;
+       /*
+        * If create_event_filter() fails, filter still needs to be freed.
+        * Which the calling code will do with data->filter.
+        */
  assign:
        tmp = rcu_access_pointer(data->filter);
 
index 1106bb6..14d5154 100644 (file)
@@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
        while (radix_tree_is_internal_node(node)) {
                unsigned offset;
 
-               if (node == RADIX_TREE_RETRY)
-                       goto restart;
                parent = entry_to_node(node);
                offset = radix_tree_descend(parent, &node, index);
                slot = parent->slots + offset;
+               if (node == RADIX_TREE_RETRY)
+                       goto restart;
                if (parent->shift == 0)
                        break;
        }
index 0598e86..4676c0a 100644 (file)
@@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }
 } while (0)
 #endif
 
+static void *xa_mk_index(unsigned long index)
+{
+       return xa_mk_value(index & LONG_MAX);
+}
+
 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
-       return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
+       return xa_store(xa, index, xa_mk_index(index), gfp);
 }
 
 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
        u32 id = 0;
 
-       XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX),
+       XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
                                gfp) != 0);
        XA_BUG_ON(xa, id != index);
 }
 
 static void xa_erase_index(struct xarray *xa, unsigned long index)
 {
-       XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
+       XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
        XA_BUG_ON(xa, xa_load(xa, index) != NULL);
 }
 
@@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)
 
        xas_set(&xas, 0);
        xas_for_each(&xas, entry, ULONG_MAX) {
-               xas_store(&xas, xa_mk_value(xas.xa_index));
+               xas_store(&xas, xa_mk_index(xas.xa_index));
        }
        xas_unlock(&xas);
 
@@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
                XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
                xa_set_mark(xa, index + 2, XA_MARK_1);
                XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
-               xa_store_order(xa, index, order, xa_mk_value(index),
+               xa_store_order(xa, index, order, xa_mk_index(index),
                                GFP_KERNEL);
                for (i = base; i < next; i++) {
                        XA_STATE(xas, xa, i);
@@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)
                        xas_set(&xas, j);
                        do {
                                xas_lock(&xas);
-                               xas_store(&xas, xa_mk_value(j));
+                               xas_store(&xas, xa_mk_index(j));
                                xas_unlock(&xas);
                        } while (xas_nomem(&xas, GFP_KERNEL));
                }
@@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)
                xas_set(&xas, 0);
                j = i;
                xas_for_each(&xas, entry, ULONG_MAX) {
-                       XA_BUG_ON(xa, entry != xa_mk_value(j));
+                       XA_BUG_ON(xa, entry != xa_mk_index(j));
                        xas_store(&xas, NULL);
                        j++;
                }
@@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
        unsigned long min = index & ~((1UL << order) - 1);
        unsigned long max = min + (1UL << order);
 
-       xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL);
-       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index));
-       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index));
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
+       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
        XA_BUG_ON(xa, xa_load(xa, max) != NULL);
        XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
        xas_lock(&xas);
-       XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+       XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
        xas_unlock(&xas);
-       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
-       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
+       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
+       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
        XA_BUG_ON(xa, xa_load(xa, max) != NULL);
        XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
@@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
        xas_unlock(&xas);
        XA_BUG_ON(xa, !xa_empty(xa));
 }
+
+static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
+               unsigned int order)
+{
+       XA_STATE(xas, xa, 0);
+       void *entry;
+       int n = 0;
+
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+       xas_lock(&xas);
+       xas_for_each(&xas, entry, ULONG_MAX) {
+               XA_BUG_ON(xa, entry != xa_mk_index(index));
+               n++;
+       }
+       XA_BUG_ON(xa, n != 1);
+       xas_set(&xas, index + 1);
+       xas_for_each(&xas, entry, ULONG_MAX) {
+               XA_BUG_ON(xa, entry != xa_mk_index(index));
+               n++;
+       }
+       XA_BUG_ON(xa, n != 2);
+       xas_unlock(&xas);
+
+       xa_destroy(xa);
+}
 #endif
 
 static noinline void check_multi_store(struct xarray *xa)
@@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa)
 
        for (i = 0; i < max_order; i++) {
                for (j = 0; j < max_order; j++) {
-                       xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
-                       xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
+                       xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
+                       xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
 
                        for (k = 0; k < max_order; k++) {
                                void *entry = xa_load(xa, (1UL << k) - 1);
                                if ((i < k) && (j < k))
                                        XA_BUG_ON(xa, entry != NULL);
                                else
-                                       XA_BUG_ON(xa, entry != xa_mk_value(j));
+                                       XA_BUG_ON(xa, entry != xa_mk_index(j));
                        }
 
                        xa_erase(xa, 0);
@@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa)
                check_multi_store_1(xa, (1UL << i) + 1, i);
        }
        check_multi_store_2(xa, 4095, 9);
+
+       for (i = 1; i < 20; i++) {
+               check_multi_store_3(xa, 0, i);
+               check_multi_store_3(xa, 1UL << i, i);
+       }
 #endif
 }
 
@@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void)
        xa_destroy(&xa0);
 
        id = 0xfffffffeU;
-       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
                                GFP_KERNEL) != 0);
        XA_BUG_ON(&xa0, id != 0xfffffffeU);
-       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
                                GFP_KERNEL) != 0);
        XA_BUG_ON(&xa0, id != 0xffffffffU);
-       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
                                GFP_KERNEL) != -ENOSPC);
        XA_BUG_ON(&xa0, id != 0xffffffffU);
        xa_destroy(&xa0);
+
+       id = 10;
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
+                               GFP_KERNEL) != -ENOSPC);
+       XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
+                               GFP_KERNEL) != -ENOSPC);
+       xa_erase_index(&xa0, 3);
+       XA_BUG_ON(&xa0, !xa_empty(&xa0));
 }
 
 static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -610,11 +655,11 @@ retry:
        xas_lock(&xas);
        xas_for_each_conflict(&xas, entry) {
                XA_BUG_ON(xa, !xa_is_value(entry));
-               XA_BUG_ON(xa, entry < xa_mk_value(start));
-               XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
+               XA_BUG_ON(xa, entry < xa_mk_index(start));
+               XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
                count++;
        }
-       xas_store(&xas, xa_mk_value(start));
+       xas_store(&xas, xa_mk_index(start));
        xas_unlock(&xas);
        if (xas_nomem(&xas, GFP_KERNEL)) {
                count = 0;
@@ -622,9 +667,9 @@ retry:
        }
        XA_BUG_ON(xa, xas_error(&xas));
        XA_BUG_ON(xa, count != present);
-       XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
+       XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
        XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
-                       xa_mk_value(start));
+                       xa_mk_index(start));
        xa_erase_index(xa, start);
 }
 
@@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
                for (j = 0; j < index; j++) {
                        XA_STATE(xas, xa, j + index);
                        xa_store_index(xa, index - 1, GFP_KERNEL);
-                       xa_store_order(xa, index, i, xa_mk_value(index),
+                       xa_store_order(xa, index, i, xa_mk_index(index),
                                        GFP_KERNEL);
                        rcu_read_lock();
                        xas_for_each(&xas, entry, ULONG_MAX) {
@@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa)
                j = 0;
                index = 0;
                xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
-                       XA_BUG_ON(xa, xa_mk_value(index) != entry);
+                       XA_BUG_ON(xa, xa_mk_index(index) != entry);
                        XA_BUG_ON(xa, index != j++);
                }
        }
@@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa)
        xa_destroy(xa);
 }
 
+static noinline void check_find_3(struct xarray *xa)
+{
+       XA_STATE(xas, xa, 0);
+       unsigned long i, j, k;
+       void *entry;
+
+       for (i = 0; i < 100; i++) {
+               for (j = 0; j < 100; j++) {
+                       for (k = 0; k < 100; k++) {
+                               xas_set(&xas, j);
+                               xas_for_each_marked(&xas, entry, k, XA_MARK_0)
+                                       ;
+                               if (j > k)
+                                       XA_BUG_ON(xa,
+                                               xas.xa_node != XAS_RESTART);
+                       }
+               }
+               xa_store_index(xa, i, GFP_KERNEL);
+               xa_set_mark(xa, i, XA_MARK_0);
+       }
+       xa_destroy(xa);
+}
+
 static noinline void check_find(struct xarray *xa)
 {
        check_find_1(xa);
        check_find_2(xa);
+       check_find_3(xa);
        check_multi_find(xa);
        check_multi_find_2(xa);
 }
@@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa)
                        for (index = 0; index < (1UL << (order + 5));
                             index += (1UL << order)) {
                                xa_store_order(xa, index, order,
-                                               xa_mk_value(index), GFP_KERNEL);
+                                               xa_mk_index(index), GFP_KERNEL);
                                XA_BUG_ON(xa, xa_load(xa, index) !=
-                                               xa_mk_value(index));
+                                               xa_mk_index(index));
                                XA_BUG_ON(xa, xa_find_entry(xa,
-                                               xa_mk_value(index)) != index);
+                                               xa_mk_index(index)) != index);
                        }
                        XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
                        xa_destroy(xa);
@@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa)
        XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
        xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
        XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
-       XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1);
+       XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
        xa_erase_index(xa, ULONG_MAX);
        XA_BUG_ON(xa, !xa_empty(xa));
 }
@@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
                        XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
                XA_BUG_ON(xa, xas.xa_index != i);
                if (i == 0 || i == idx)
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
        }
@@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
                        XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
                XA_BUG_ON(xa, xas.xa_index != i);
                if (i == 0 || i == idx)
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
        } while (i > 0);
@@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa)
        do {
                void *entry = xas_prev(&xas);
                i--;
-               XA_BUG_ON(xa, entry != xa_mk_value(i));
+               XA_BUG_ON(xa, entry != xa_mk_index(i));
                XA_BUG_ON(xa, i != xas.xa_index);
        } while (i != 0);
 
@@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa)
 
        do {
                void *entry = xas_next(&xas);
-               XA_BUG_ON(xa, entry != xa_mk_value(i));
+               XA_BUG_ON(xa, entry != xa_mk_index(i));
                XA_BUG_ON(xa, i != xas.xa_index);
                i++;
        } while (i < (1 << 16));
@@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa)
                void *entry = xas_prev(&xas);
                i--;
                if ((i < (1 << 8)) || (i >= (1 << 15)))
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
                XA_BUG_ON(xa, i != xas.xa_index);
@@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa)
        do {
                void *entry = xas_next(&xas);
                if ((i < (1 << 8)) || (i >= (1 << 15)))
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
                XA_BUG_ON(xa, i != xas.xa_index);
@@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa,
                if (xas_error(&xas))
                        goto unlock;
                for (i = 0; i < (1U << order); i++) {
-                       XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
+                       XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
                        xas_next(&xas);
                }
 unlock:
@@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa,
                if (xas_error(&xas))
                        goto unlock;
                for (i = 0; i < (1UL << order); i++) {
-                       void *old = xas_store(&xas, xa_mk_value(base + i));
+                       void *old = xas_store(&xas, xa_mk_index(base + i));
                        if (xas.xa_index == index)
-                               XA_BUG_ON(xa, old != xa_mk_value(base + i));
+                               XA_BUG_ON(xa, old != xa_mk_index(base + i));
                        else
                                XA_BUG_ON(xa, old != NULL);
                        xas_next(&xas);
@@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,
                unsigned long last)
 {
 #ifdef CONFIG_XARRAY_MULTI
-       xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+       xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
 
-       XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
-       XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+       XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
+       XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
        XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
        XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
 
@@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa)
                XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
                rcu_read_unlock();
 
-               xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
+               xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
                                GFP_KERNEL);
                XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
 
index bbacca5..5f3f931 100644 (file)
@@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
                entry = xa_head(xas->xa);
                xas->xa_node = NULL;
                if (xas->xa_index > max_index(entry))
-                       goto bounds;
+                       goto out;
                if (!xa_is_node(entry)) {
                        if (xa_marked(xas->xa, mark))
                                return entry;
@@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
        }
 
 out:
-       if (!max)
+       if (xas->xa_index > max)
                goto max;
-bounds:
-       xas->xa_node = XAS_BOUNDS;
-       return NULL;
+       return set_bounds(xas);
 max:
        xas->xa_node = XAS_RESTART;
        return NULL;
index 705a3e9..a808324 100644 (file)
@@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page)
                (struct hugepage_subpool *)page_private(page);
        bool restore_reserve;
 
-       set_page_private(page, 0);
-       page->mapping = NULL;
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
+
+       set_page_private(page, 0);
+       page->mapping = NULL;
        restore_reserve = PagePrivate(page);
        ClearPagePrivate(page);
 
index 9a2d5ae..81ae63c 100644 (file)
@@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr
        return -1;
 }
 
-bool __init memblock_is_reserved(phys_addr_t addr)
+bool __init_memblock memblock_is_reserved(phys_addr_t addr)
 {
        return memblock_search(&memblock.reserved, addr) != -1;
 }
index 921f804..5d07e0b 100644 (file)
@@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping,
 {
        void *old;
 
-       xa_lock_irq(&mapping->i_pages);
-       old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0);
-       xa_unlock_irq(&mapping->i_pages);
+       old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
        if (old != radswap)
                return -ENOENT;
        free_swap_and_cache(radix_to_swp_entry(radswap));
index 33307fc..3abc8cc 100644 (file)
@@ -240,6 +240,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
 }
 
 /*
+ * Mark all memblocks as present using memory_present(). This is a
+ * convienence function that is useful for a number of arches
+ * to mark all of the systems memory as present during initialization.
+ */
+void __init memblocks_present(void)
+{
+       struct memblock_region *reg;
+
+       for_each_memblock(memory, reg) {
+               memory_present(memblock_get_region_node(reg),
+                              memblock_region_memory_base_pfn(reg),
+                              memblock_region_memory_end_pfn(reg));
+       }
+}
+
+/*
  * Subtle, we encode the real pfn into the mem_map such that
  * the identity pfn - section_mem_map will return the actual
  * physical page frame number.
index 3aab766..c702075 100644 (file)
@@ -771,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err < 0)
                goto free_skb;
 
-       sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sk->sk_tsflags);
 
        skb->dev = dev;
        skb->sk  = sk;
index 2e8d91e..9f28405 100644 (file)
@@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
                /* Pass parameters to the BPF program */
                cb->qdisc_cb.flow_keys = &flow_keys;
                flow_keys.nhoff = nhoff;
+               flow_keys.thoff = nhoff;
 
                bpf_compute_data_pointers((struct sk_buff *)skb);
                result = BPF_PROG_RUN(attached, skb);
@@ -790,9 +791,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
                /* Restore state */
                memcpy(cb, &cb_saved, sizeof(cb_saved));
 
+               flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
+               flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
+                                         flow_keys.nhoff, skb->len);
+
                __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
                                         target_container);
-               key_control->thoff = min_t(u16, key_control->thoff, skb->len);
                rcu_read_unlock();
                return result == BPF_OK;
        }
index 4b54e5f..acf45dd 100644 (file)
@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
        for_each_possible_cpu(i) {
                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
+               napi_disable(&cell->napi);
                netif_napi_del(&cell->napi);
                __skb_queue_purge(&cell->napi_skbs);
        }
index 8baa9ab..fa384f7 100644 (file)
@@ -2629,11 +2629,16 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
 
                ndm = nlmsg_data(nlh);
                if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
-                   ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
+                   ndm->ndm_state || ndm->ndm_type) {
                        NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
                        return -EINVAL;
                }
 
+               if (ndm->ndm_flags & ~NTF_PROXY) {
+                       NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
+                       return -EINVAL;
+               }
+
                err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
                                         nda_policy, extack);
        } else {
index 37b4667..d67ec17 100644 (file)
@@ -28,6 +28,8 @@ static int two __maybe_unused = 2;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
 static int max_skb_frags = MAX_SKB_FRAGS;
+static long long_one __maybe_unused = 1;
+static long long_max __maybe_unused = LONG_MAX;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -289,6 +291,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
 
        return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 }
+
+static int
+proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+                                    void __user *buffer, size_t *lenp,
+                                    loff_t *ppos)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+}
 #endif
 
 static struct ctl_table net_core_table[] = {
@@ -398,10 +411,11 @@ static struct ctl_table net_core_table[] = {
        {
                .procname       = "bpf_jit_limit",
                .data           = &bpf_jit_limit,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0600,
-               .proc_handler   = proc_dointvec_minmax_bpf_restricted,
-               .extra1         = &one,
+               .proc_handler   = proc_dolongvec_minmax_bpf_restricted,
+               .extra1         = &long_one,
+               .extra2         = &long_max,
        },
 #endif
        {
index 5b9b6d4..04ba321 100644 (file)
@@ -952,17 +952,18 @@ static int inet_abc_len(__be32 addr)
 {
        int rc = -1;    /* Something else, probably a multicast. */
 
-       if (ipv4_is_zeronet(addr))
+       if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
                rc = 0;
        else {
                __u32 haddr = ntohl(addr);
-
                if (IN_CLASSA(haddr))
                        rc = 8;
                else if (IN_CLASSB(haddr))
                        rc = 16;
                else if (IN_CLASSC(haddr))
                        rc = 24;
+               else if (IN_CLASSE(haddr))
+                       rc = 32;
        }
 
        return rc;
index 06ee469..00ec819 100644 (file)
@@ -79,6 +79,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
+       skb->tstamp = 0;
        return dst_output(net, sk, skb);
 }
 
index aa0b226..867be8f 100644 (file)
@@ -346,10 +346,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
        struct rb_node **rbn, *parent;
        struct sk_buff *skb1, *prev_tail;
+       int ihl, end, skb1_run_end;
        struct net_device *dev;
        unsigned int fragsize;
        int flags, offset;
-       int ihl, end;
        int err = -ENOENT;
        u8 ecn;
 
@@ -419,7 +419,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
         *   overlapping fragment, the entire datagram (and any constituent
         *   fragments) MUST be silently discarded.
         *
-        * We do the same here for IPv4 (and increment an snmp counter).
+        * We do the same here for IPv4 (and increment an snmp counter) but
+        * we do not want to drop the whole queue in response to a duplicate
+        * fragment.
         */
 
        err = -EINVAL;
@@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                do {
                        parent = *rbn;
                        skb1 = rb_to_skb(parent);
+                       skb1_run_end = skb1->ip_defrag_offset +
+                                      FRAG_CB(skb1)->frag_run_len;
                        if (end <= skb1->ip_defrag_offset)
                                rbn = &parent->rb_left;
-                       else if (offset >= skb1->ip_defrag_offset +
-                                               FRAG_CB(skb1)->frag_run_len)
+                       else if (offset >= skb1_run_end)
                                rbn = &parent->rb_right;
-                       else /* Found an overlap with skb1. */
-                               goto overlap;
+                       else if (offset >= skb1->ip_defrag_offset &&
+                                end <= skb1_run_end)
+                               goto err; /* No new data, potential duplicate */
+                       else
+                               goto overlap; /* Found an overlap */
                } while (*rbn);
                /* Here we have parent properly set, and rbn pointing to
                 * one of its NULL left/right children. Insert skb.
index 208a5b4..b9a9873 100644 (file)
@@ -429,6 +429,8 @@ static int __init ic_defaults(void)
                        ic_netmask = htonl(IN_CLASSB_NET);
                else if (IN_CLASSC(ntohl(ic_myaddr)))
                        ic_netmask = htonl(IN_CLASSC_NET);
+               else if (IN_CLASSE(ntohl(ic_myaddr)))
+                       ic_netmask = htonl(IN_CLASSE_NET);
                else {
                        pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
                               &ic_myaddr);
index 75c6549..ddbf8c9 100644 (file)
@@ -69,6 +69,8 @@
 #include <net/nexthop.h>
 #include <net/switchdev.h>
 
+#include <linux/nospec.h>
+
 struct ipmr_rule {
        struct fib_rule         common;
 };
@@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.vifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.vifi];
                if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.vifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.vifi];
                if (VIF_EXISTS(mrt, vr.vifi)) {
index 076f516..c55a543 100644 (file)
@@ -390,7 +390,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->ip_summed = CHECKSUM_NONE;
 
-       sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc->tsflags);
 
        if (flags & MSG_CONFIRM)
                skb_set_dst_pending_confirm(skb, 1);
index 703a8e8..5f9fa03 100644 (file)
@@ -385,6 +385,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
        }
 #endif
 
+       skb->tstamp = 0;
        return dst_output(net, sk, skb);
 }
 
index 3965d53..ad1a9cc 100644 (file)
@@ -15,7 +15,7 @@
 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
                     struct socket **sockp)
 {
-       struct sockaddr_in6 udp6_addr;
+       struct sockaddr_in6 udp6_addr = {};
        int err;
        struct socket *sock = NULL;
 
@@ -58,6 +58,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
                goto error;
 
        if (cfg->peer_udp_port) {
+               memset(&udp6_addr, 0, sizeof(udp6_addr));
                udp6_addr.sin6_family = AF_INET6;
                memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
                       sizeof(udp6_addr.sin6_addr));
index 34b8a90..8276f12 100644 (file)
@@ -52,6 +52,8 @@
 #include <net/ip6_checksum.h>
 #include <linux/netconf.h>
 
+#include <linux/nospec.h>
+
 struct ip6mr_rule {
        struct fib_rule         common;
 };
@@ -1841,6 +1843,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.mifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.mifi];
                if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1915,6 +1918,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.mifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.mifi];
                if (VIF_EXISTS(mrt, vr.mifi)) {
index aed7eb5..5a42622 100644 (file)
@@ -657,6 +657,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       skb_setup_tx_timestamp(skb, sockc->tsflags);
+
        if (flags & MSG_CONFIRM)
                skb_set_dst_pending_confirm(skb, 1);
 
index e9652e6..4a6ff14 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (c) 2016        Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1949,6 +1950,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        WARN(local->open_count, "%s: open count remains %d\n",
             wiphy_name(local->hw.wiphy), local->open_count);
 
+       ieee80211_txq_teardown_flows(local);
+
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
                list_del(&sdata->list);
index ada8e16..87a7299 100644 (file)
@@ -1264,7 +1264,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        rtnl_unlock();
        ieee80211_led_exit(local);
        ieee80211_wep_free(local);
-       ieee80211_txq_teardown_flows(local);
  fail_flows:
        destroy_workqueue(local->workqueue);
  fail_workqueue:
@@ -1290,7 +1289,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
-       ieee80211_txq_teardown_flows(local);
 
        rtnl_lock();
 
index a794ca7..3f0b96e 100644 (file)
@@ -556,6 +556,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
        }
 
        ieee80211_led_tx(local);
+
+       if (skb_has_frag_list(skb)) {
+               kfree_skb_list(skb_shinfo(skb)->frag_list);
+               skb_shinfo(skb)->frag_list = NULL;
+       }
 }
 
 /*
index 4eef55d..8da228d 100644 (file)
@@ -531,8 +531,8 @@ nla_put_failure:
                ret = -EMSGSIZE;
        } else {
                cb->args[IPSET_CB_ARG0] = i;
+               ipset_nest_end(skb, atd);
        }
-       ipset_nest_end(skb, atd);
 out:
        rcu_read_unlock();
        return ret;
index b6d0f6d..9cd180b 100644 (file)
@@ -427,7 +427,7 @@ insert_tree(struct net *net,
        count = 1;
        rbconn->list.count = count;
 
-       rb_link_node(&rbconn->node, parent, rbnode);
+       rb_link_node_rcu(&rbconn->node, parent, rbnode);
        rb_insert_color(&rbconn->node, root);
 out_unlock:
        spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
index a975efd..9da3034 100644 (file)
@@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
 /* TCP SACK sequence number adjustment */
 static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
                                      unsigned int protoff,
-                                     struct tcphdr *tcph,
                                      struct nf_conn *ct,
                                      enum ip_conntrack_info ctinfo)
 {
-       unsigned int dir, optoff, optend;
+       struct tcphdr *tcph = (void *)skb->data + protoff;
        struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+       unsigned int dir, optoff, optend;
 
        optoff = protoff + sizeof(struct tcphdr);
        optend = protoff + tcph->doff * 4;
@@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
        if (!skb_make_writable(skb, optend))
                return 0;
 
+       tcph = (void *)skb->data + protoff;
        dir = CTINFO2DIR(ctinfo);
 
        while (optoff < optend) {
@@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
                 ntohl(newack));
        tcph->ack_seq = newack;
 
-       res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+       res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
 out:
        spin_unlock_bh(&ct->lock);
 
index e2b1960..2268b10 100644 (file)
@@ -117,7 +117,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
        dst = skb_dst(skb);
        if (dst->xfrm)
                dst = ((struct xfrm_dst *)dst)->route;
-       dst_hold(dst);
+       if (!dst_hold_safe(dst))
+               return -EHOSTUNREACH;
 
        if (sk && !net_eq(net, sock_net(sk)))
                sk = NULL;
index 2e61aab..6e548d7 100644 (file)
@@ -1216,7 +1216,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
                        goto nla_put_failure;
 
-               if (basechain->stats && nft_dump_stats(skb, basechain->stats))
+               if (rcu_access_pointer(basechain->stats) &&
+                   nft_dump_stats(skb, rcu_dereference(basechain->stats)))
                        goto nla_put_failure;
        }
 
@@ -1392,7 +1393,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        return newstats;
 }
 
-static void nft_chain_stats_replace(struct nft_base_chain *chain,
+static void nft_chain_stats_replace(struct net *net,
+                                   struct nft_base_chain *chain,
                                    struct nft_stats __percpu *newstats)
 {
        struct nft_stats __percpu *oldstats;
@@ -1400,8 +1402,9 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
        if (newstats == NULL)
                return;
 
-       if (chain->stats) {
-               oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
+       if (rcu_access_pointer(chain->stats)) {
+               oldstats = rcu_dereference_protected(chain->stats,
+                                       lockdep_commit_lock_is_held(net));
                rcu_assign_pointer(chain->stats, newstats);
                synchronize_rcu();
                free_percpu(oldstats);
@@ -1439,9 +1442,10 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
                struct nft_base_chain *basechain = nft_base_chain(chain);
 
                module_put(basechain->type->owner);
-               free_percpu(basechain->stats);
-               if (basechain->stats)
+               if (rcu_access_pointer(basechain->stats)) {
                        static_branch_dec(&nft_counters_enabled);
+                       free_percpu(rcu_dereference_raw(basechain->stats));
+               }
                kfree(chain->name);
                kfree(basechain);
        } else {
@@ -1590,7 +1594,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                                kfree(basechain);
                                return PTR_ERR(stats);
                        }
-                       basechain->stats = stats;
+                       rcu_assign_pointer(basechain->stats, stats);
                        static_branch_inc(&nft_counters_enabled);
                }
 
@@ -6180,7 +6184,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
                return;
 
        basechain = nft_base_chain(trans->ctx.chain);
-       nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+       nft_chain_stats_replace(trans->ctx.net, basechain,
+                               nft_trans_chain_stats(trans));
 
        switch (nft_trans_chain_policy(trans)) {
        case NF_DROP:
index 3fbce3b..a505002 100644 (file)
@@ -101,7 +101,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
        struct nft_stats *stats;
 
        base_chain = nft_base_chain(chain);
-       if (!base_chain->stats)
+       if (!rcu_access_pointer(base_chain->stats))
                return;
 
        local_bh_disable();
index 6bb9f3c..3c023d6 100644 (file)
@@ -1706,7 +1706,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                        nlk->flags &= ~NETLINK_F_EXT_ACK;
                err = 0;
                break;
-       case NETLINK_DUMP_STRICT_CHK:
+       case NETLINK_GET_STRICT_CHK:
                if (val)
                        nlk->flags |= NETLINK_F_STRICT_CHK;
                else
@@ -1806,7 +1806,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                        return -EFAULT;
                err = 0;
                break;
-       case NETLINK_DUMP_STRICT_CHK:
+       case NETLINK_GET_STRICT_CHK:
                if (len < sizeof(int))
                        return -EINVAL;
                len = sizeof(int);
index a74650e..6655793 100644 (file)
@@ -1965,7 +1965,7 @@ retry:
        skb->mark = sk->sk_mark;
        skb->tstamp = sockc.transmit_time;
 
-       sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc.tsflags);
 
        if (unlikely(extra_len == 4))
                skb->no_fcs = 1;
@@ -2460,7 +2460,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        skb->priority = po->sk.sk_priority;
        skb->mark = po->sk.sk_mark;
        skb->tstamp = sockc->transmit_time;
-       sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc->tsflags);
        skb_zcopy_set_nouarg(skb, ph.raw);
 
        skb_reserve(skb, hlen);
@@ -2898,7 +2898,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                goto out_free;
        }
 
-       sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc.tsflags);
 
        if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
            !packet_extra_vlan_len_allowed(dev, skb)) {
index 4b00b11..f139420 100644 (file)
@@ -308,16 +308,27 @@ out:
 /*
  * RDS ops use this to grab SG entries from the rm's sg pool.
  */
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+                                         int *ret)
 {
        struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
        struct scatterlist *sg_ret;
 
-       WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
-       WARN_ON(!nents);
+       if (WARN_ON(!ret))
+               return NULL;
 
-       if (rm->m_used_sgs + nents > rm->m_total_sgs)
+       if (nents <= 0) {
+               pr_warn("rds: alloc sgs failed! nents <= 0\n");
+               *ret = -EINVAL;
                return NULL;
+       }
+
+       if (rm->m_used_sgs + nents > rm->m_total_sgs) {
+               pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
+                       rm->m_total_sgs, rm->m_used_sgs, nents);
+               *ret = -ENOMEM;
+               return NULL;
+       }
 
        sg_ret = &sg_first[rm->m_used_sgs];
        sg_init_table(sg_ret, nents);
@@ -332,6 +343,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        unsigned int i;
        int num_sgs = ceil(total_len, PAGE_SIZE);
        int extra_bytes = num_sgs * sizeof(struct scatterlist);
+       int ret;
 
        rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
        if (!rm)
@@ -340,10 +352,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
        rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
        rm->data.op_nents = ceil(total_len, PAGE_SIZE);
-       rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+       rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
        if (!rm->data.op_sg) {
                rds_message_put(rm);
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(ret);
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index 98237fe..182ab84 100644 (file)
@@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
        return tot_pages;
 }
 
-int rds_rdma_extra_size(struct rds_rdma_args *args)
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+                       struct rds_iov_vector *iov)
 {
-       struct rds_iovec vec;
+       struct rds_iovec *vec;
        struct rds_iovec __user *local_vec;
        int tot_pages = 0;
        unsigned int nr_pages;
@@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
        if (args->nr_local == 0)
                return -EINVAL;
 
+       iov->iov = kcalloc(args->nr_local,
+                          sizeof(struct rds_iovec),
+                          GFP_KERNEL);
+       if (!iov->iov)
+               return -ENOMEM;
+
+       vec = &iov->iov[0];
+
+       if (copy_from_user(vec, local_vec, args->nr_local *
+                          sizeof(struct rds_iovec)))
+               return -EFAULT;
+       iov->len = args->nr_local;
+
        /* figure out the number of pages in the vector */
-       for (i = 0; i < args->nr_local; i++) {
-               if (copy_from_user(&vec, &local_vec[i],
-                                  sizeof(struct rds_iovec)))
-                       return -EFAULT;
+       for (i = 0; i < args->nr_local; i++, vec++) {
 
-               nr_pages = rds_pages_in_vec(&vec);
+               nr_pages = rds_pages_in_vec(vec);
                if (nr_pages == 0)
                        return -EINVAL;
 
@@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
  * Extract all arguments and set up the rdma_op
  */
 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-                         struct cmsghdr *cmsg)
+                      struct cmsghdr *cmsg,
+                      struct rds_iov_vector *vec)
 {
        struct rds_rdma_args *args;
        struct rm_rdma_op *op = &rm->rdma;
        int nr_pages;
        unsigned int nr_bytes;
        struct page **pages = NULL;
-       struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
-       int iov_size;
+       struct rds_iovec *iovs;
        unsigned int i, j;
        int ret = 0;
 
@@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                goto out_ret;
        }
 
-       /* Check whether to allocate the iovec area */
-       iov_size = args->nr_local * sizeof(struct rds_iovec);
-       if (args->nr_local > UIO_FASTIOV) {
-               iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
-               if (!iovs) {
-                       ret = -ENOMEM;
-                       goto out_ret;
-               }
+       if (vec->len != args->nr_local) {
+               ret = -EINVAL;
+               goto out_ret;
        }
 
-       if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+       iovs = vec->iov;
 
        nr_pages = rds_rdma_pages(iovs, args->nr_local);
        if (nr_pages < 0) {
                ret = -EINVAL;
-               goto out;
+               goto out_ret;
        }
 
        pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
        if (!pages) {
                ret = -ENOMEM;
-               goto out;
+               goto out_ret;
        }
 
        op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
@@ -620,11 +623,9 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
        op->op_active = 1;
        op->op_recverr = rs->rs_recverr;
        WARN_ON(!nr_pages);
-       op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
-       if (!op->op_sg) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
+       if (!op->op_sg)
+               goto out_pages;
 
        if (op->op_notify || op->op_recverr) {
                /* We allocate an uninitialized notifier here, because
@@ -635,7 +636,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
                if (!op->op_notifier) {
                        ret = -ENOMEM;
-                       goto out;
+                       goto out_pages;
                }
                op->op_notifier->n_user_token = args->user_token;
                op->op_notifier->n_status = RDS_RDMA_SUCCESS;
@@ -681,7 +682,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                 */
                ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
                if (ret < 0)
-                       goto out;
+                       goto out_pages;
                else
                        ret = 0;
 
@@ -714,13 +715,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                                nr_bytes,
                                (unsigned int) args->remote_vec.bytes);
                ret = -EINVAL;
-               goto out;
+               goto out_pages;
        }
        op->op_bytes = nr_bytes;
 
-out:
-       if (iovs != iovstack)
-               sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
+out_pages:
        kfree(pages);
 out_ret:
        if (ret)
@@ -838,11 +837,9 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
        rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
        rm->atomic.op_active = 1;
        rm->atomic.op_recverr = rs->rs_recverr;
-       rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
-       if (!rm->atomic.op_sg) {
-               ret = -ENOMEM;
+       rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
+       if (!rm->atomic.op_sg)
                goto err;
-       }
 
        /* verify 8 byte-aligned */
        if (args->local_addr & 0x7) {
index 6bfaf05..02ec4a3 100644 (file)
@@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
        INIT_LIST_HEAD(&q->zcookie_head);
 }
 
+struct rds_iov_vector {
+       struct rds_iovec *iov;
+       int               len;
+};
+
+struct rds_iov_vector_arr {
+       struct rds_iov_vector *vec;
+       int                    len;
+       int                    indx;
+       int                    incr;
+};
+
 struct rds_message {
        refcount_t              m_refcount;
        struct list_head        m_sock_item;
@@ -827,7 +839,8 @@ rds_conn_connecting(struct rds_connection *conn)
 
 /* message.c */
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+                                         int *ret);
 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
                               bool zcopy);
 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
@@ -904,13 +917,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
 void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_rdma_extra_size(struct rds_rdma_args *args);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-                         struct cmsghdr *cmsg);
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+                       struct rds_iov_vector *iov);
 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
                          struct cmsghdr *cmsg);
 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-                         struct cmsghdr *cmsg);
+                         struct cmsghdr *cmsg,
+                         struct rds_iov_vector *vec);
 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
                          struct cmsghdr *cmsg);
 void rds_rdma_free_op(struct rm_rdma_op *ro);
index fe785ee..3d822ba 100644 (file)
@@ -876,13 +876,18 @@ out:
  * rds_message is getting to be quite complicated, and we'd like to allocate
  * it all in one go. This figures out how big it needs to be up front.
  */
-static int rds_rm_size(struct msghdr *msg, int num_sgs)
+static int rds_rm_size(struct msghdr *msg, int num_sgs,
+                      struct rds_iov_vector_arr *vct)
 {
        struct cmsghdr *cmsg;
        int size = 0;
        int cmsg_groups = 0;
        int retval;
        bool zcopy_cookie = false;
+       struct rds_iov_vector *iov, *tmp_iov;
+
+       if (num_sgs < 0)
+               return -EINVAL;
 
        for_each_cmsghdr(cmsg, msg) {
                if (!CMSG_OK(msg, cmsg))
@@ -893,8 +898,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
 
                switch (cmsg->cmsg_type) {
                case RDS_CMSG_RDMA_ARGS:
+                       if (vct->indx >= vct->len) {
+                               vct->len += vct->incr;
+                               tmp_iov =
+                                       krealloc(vct->vec,
+                                                vct->len *
+                                                sizeof(struct rds_iov_vector),
+                                                GFP_KERNEL);
+                               if (!tmp_iov) {
+                                       vct->len -= vct->incr;
+                                       return -ENOMEM;
+                               }
+                               vct->vec = tmp_iov;
+                       }
+                       iov = &vct->vec[vct->indx];
+                       memset(iov, 0, sizeof(struct rds_iov_vector));
+                       vct->indx++;
                        cmsg_groups |= 1;
-                       retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+                       retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
                        if (retval < 0)
                                return retval;
                        size += retval;
@@ -951,10 +972,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
 }
 
 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
-                        struct msghdr *msg, int *allocated_mr)
+                        struct msghdr *msg, int *allocated_mr,
+                        struct rds_iov_vector_arr *vct)
 {
        struct cmsghdr *cmsg;
-       int ret = 0;
+       int ret = 0, ind = 0;
 
        for_each_cmsghdr(cmsg, msg) {
                if (!CMSG_OK(msg, cmsg))
@@ -968,7 +990,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
                 */
                switch (cmsg->cmsg_type) {
                case RDS_CMSG_RDMA_ARGS:
-                       ret = rds_cmsg_rdma_args(rs, rm, cmsg);
+                       if (ind >= vct->indx)
+                               return -ENOMEM;
+                       ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
+                       ind++;
                        break;
 
                case RDS_CMSG_RDMA_DEST:
@@ -1084,6 +1109,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                      sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
        int num_sgs = ceil(payload_len, PAGE_SIZE);
        int namelen;
+       struct rds_iov_vector_arr vct;
+       int ind;
+
+       memset(&vct, 0, sizeof(vct));
+
+       /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
+       vct.incr = 1;
 
        /* Mirror Linux UDP mirror of BSD error message compatibility */
        /* XXX: Perhaps MSG_MORE someday */
@@ -1220,7 +1252,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
        }
        /* size of rm including all sgs */
-       ret = rds_rm_size(msg, num_sgs);
+       ret = rds_rm_size(msg, num_sgs, &vct);
        if (ret < 0)
                goto out;
 
@@ -1232,11 +1264,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        /* Attach data to the rm */
        if (payload_len) {
-               rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
-               if (!rm->data.op_sg) {
-                       ret = -ENOMEM;
+               rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
+               if (!rm->data.op_sg)
                        goto out;
-               }
                ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
                if (ret)
                        goto out;
@@ -1270,7 +1300,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        rm->m_conn_path = cpath;
 
        /* Parse any control messages the user may have included. */
-       ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+       ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
        if (ret) {
                /* Trigger connection so that its ready for the next retry */
                if (ret ==  -EAGAIN)
@@ -1348,9 +1378,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        if (ret)
                goto out;
        rds_message_put(rm);
+
+       for (ind = 0; ind < vct.indx; ind++)
+               kfree(vct.vec[ind].iov);
+       kfree(vct.vec);
+
        return payload_len;
 
 out:
+       for (ind = 0; ind < vct.indx; ind++)
+               kfree(vct.vec[ind].iov);
+       kfree(vct.vec);
+
        /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
         * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
         * or in any other way, we need to destroy the MR again */
index 1eb2e2c..dad04e7 100644 (file)
@@ -1372,10 +1372,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
        if (fold) {
-               if (!tc_skip_sw(fold->flags))
-                       rhashtable_remove_fast(&fold->mask->ht,
-                                              &fold->ht_node,
-                                              fold->mask->filter_ht_params);
+               rhashtable_remove_fast(&fold->mask->ht,
+                                      &fold->ht_node,
+                                      fold->mask->filter_ht_params);
                if (!tc_skip_hw(fold->flags))
                        fl_hw_destroy_filter(tp, fold, NULL);
        }
index 6e27c62..b9ed271 100644 (file)
@@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
+                       addr->a.v6.sin6_flowinfo = 0;
                        addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
index 63f08b4..c4da4a7 100644 (file)
@@ -147,8 +147,14 @@ static int smc_release(struct socket *sock)
                sk->sk_shutdown |= SHUTDOWN_MASK;
        }
        if (smc->clcsock) {
+               if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+                       /* wake up clcsock accept */
+                       rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+               }
+               mutex_lock(&smc->clcsock_release_lock);
                sock_release(smc->clcsock);
                smc->clcsock = NULL;
+               mutex_unlock(&smc->clcsock_release_lock);
        }
        if (smc->use_fallback) {
                if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
@@ -205,6 +211,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        spin_lock_init(&smc->conn.send_lock);
        sk->sk_prot->hash(sk);
        sk_refcnt_debug_inc(sk);
+       mutex_init(&smc->clcsock_release_lock);
 
        return sk;
 }
@@ -824,7 +831,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        struct socket *new_clcsock = NULL;
        struct sock *lsk = &lsmc->sk;
        struct sock *new_sk;
-       int rc;
+       int rc = -EINVAL;
 
        release_sock(lsk);
        new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
@@ -837,7 +844,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        }
        *new_smc = smc_sk(new_sk);
 
-       rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+       mutex_lock(&lsmc->clcsock_release_lock);
+       if (lsmc->clcsock)
+               rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+       mutex_unlock(&lsmc->clcsock_release_lock);
        lock_sock(lsk);
        if  (rc < 0)
                lsk->sk_err = -rc;
index 08786ac..5721416 100644 (file)
@@ -219,6 +219,10 @@ struct smc_sock {                          /* smc sock container */
                                                 * started, waiting for unsent
                                                 * data to be sent
                                                 */
+       struct mutex            clcsock_release_lock;
+                                               /* protects clcsock of a listen
+                                                * socket
+                                                * */
 };
 
 static inline struct smc_sock *smc_sk(const struct sock *sk)
index c6782aa..24cbddc 100644 (file)
@@ -1952,6 +1952,7 @@ call_connect_status(struct rpc_task *task)
                /* retry with existing socket, after a delay */
                rpc_delay(task, 3*HZ);
                /* fall through */
+       case -ENOTCONN:
        case -EAGAIN:
                /* Check for timeouts before looping back to call_bind */
        case -ETIMEDOUT:
index ce92700..73547d1 100644 (file)
@@ -67,7 +67,6 @@
  */
 static void     xprt_init(struct rpc_xprt *xprt, struct net *net);
 static __be32  xprt_alloc_xid(struct rpc_xprt *xprt);
-static void    xprt_connect_status(struct rpc_task *task);
 static void     xprt_destroy(struct rpc_xprt *xprt);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
@@ -680,7 +679,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
        /* Try to schedule an autoclose RPC call */
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
                queue_work(xprtiod_workqueue, &xprt->task_cleanup);
-       xprt_wake_pending_tasks(xprt, -EAGAIN);
+       else if (xprt->snd_task)
+               rpc_wake_up_queued_task_set_status(&xprt->pending,
+                               xprt->snd_task, -ENOTCONN);
        spin_unlock_bh(&xprt->transport_lock);
 }
 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
@@ -820,7 +821,7 @@ void xprt_connect(struct rpc_task *task)
        if (!xprt_connected(xprt)) {
                task->tk_timeout = task->tk_rqstp->rq_timeout;
                task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
-               rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
+               rpc_sleep_on(&xprt->pending, task, NULL);
 
                if (test_bit(XPRT_CLOSING, &xprt->state))
                        return;
@@ -839,34 +840,6 @@ void xprt_connect(struct rpc_task *task)
        xprt_release_write(xprt, task);
 }
 
-static void xprt_connect_status(struct rpc_task *task)
-{
-       switch (task->tk_status) {
-       case 0:
-               dprintk("RPC: %5u xprt_connect_status: connection established\n",
-                               task->tk_pid);
-               break;
-       case -ECONNREFUSED:
-       case -ECONNRESET:
-       case -ECONNABORTED:
-       case -ENETUNREACH:
-       case -EHOSTUNREACH:
-       case -EPIPE:
-       case -EAGAIN:
-               dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
-               break;
-       case -ETIMEDOUT:
-               dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
-                               "out\n", task->tk_pid);
-               break;
-       default:
-               dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
-                               "server %s\n", task->tk_pid, -task->tk_status,
-                               task->tk_rqstp->rq_xprt->servername);
-               task->tk_status = -EIO;
-       }
-}
-
 enum xprt_xid_rb_cmp {
        XID_RB_EQUAL,
        XID_RB_LEFT,
index 8a5e823..f0b3700 100644 (file)
@@ -1217,6 +1217,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
 
        trace_rpc_socket_close(xprt, sock);
        sock_release(sock);
+
+       xprt_disconnect_done(xprt);
 }
 
 /**
@@ -1237,8 +1239,6 @@ static void xs_close(struct rpc_xprt *xprt)
 
        xs_reset_transport(transport);
        xprt->reestablish_timeout = 0;
-
-       xprt_disconnect_done(xprt);
 }
 
 static void xs_inject_disconnect(struct rpc_xprt *xprt)
@@ -1489,8 +1489,6 @@ static void xs_tcp_state_change(struct sock *sk)
                                        &transport->sock_state))
                        xprt_clear_connecting(xprt);
                clear_bit(XPRT_CLOSING, &xprt->state);
-               if (sk->sk_err)
-                       xprt_wake_pending_tasks(xprt, -sk->sk_err);
                /* Trigger the socket release */
                xs_tcp_force_close(xprt);
        }
@@ -2092,8 +2090,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
        trace_rpc_socket_connect(xprt, sock, 0);
        status = 0;
 out:
-       xprt_unlock_connect(xprt, transport);
        xprt_clear_connecting(xprt);
+       xprt_unlock_connect(xprt, transport);
        xprt_wake_pending_tasks(xprt, status);
 }
 
@@ -2329,8 +2327,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        }
        status = -EAGAIN;
 out:
-       xprt_unlock_connect(xprt, transport);
        xprt_clear_connecting(xprt);
+       xprt_unlock_connect(xprt, transport);
        xprt_wake_pending_tasks(xprt, status);
 }
 
index 291d6bb..1217c90 100644 (file)
@@ -889,7 +889,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        int blks = tsk_blocks(GROUP_H_SIZE + dlen);
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_group *grp = tsk->group;
        struct net *net = sock_net(sk);
        struct tipc_member *mb = NULL;
        u32 node, port;
@@ -903,7 +902,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
        /* Block or return if destination link or member is congested */
        rc = tipc_wait_for_cond(sock, &timeout,
                                !tipc_dest_find(&tsk->cong_links, node, 0) &&
-                               !tipc_group_cong(grp, node, port, blks, &mb));
+                               tsk->group &&
+                               !tipc_group_cong(tsk->group, node, port, blks,
+                                                &mb));
        if (unlikely(rc))
                return rc;
 
@@ -933,7 +934,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
        struct tipc_sock *tsk = tipc_sk(sk);
        struct list_head *cong_links = &tsk->cong_links;
        int blks = tsk_blocks(GROUP_H_SIZE + dlen);
-       struct tipc_group *grp = tsk->group;
        struct tipc_msg *hdr = &tsk->phdr;
        struct tipc_member *first = NULL;
        struct tipc_member *mbr = NULL;
@@ -950,9 +950,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
        type = msg_nametype(hdr);
        inst = dest->addr.name.name.instance;
        scope = msg_lookup_scope(hdr);
-       exclude = tipc_group_exclude(grp);
 
        while (++lookups < 4) {
+               exclude = tipc_group_exclude(tsk->group);
+
                first = NULL;
 
                /* Look for a non-congested destination member, if any */
@@ -961,7 +962,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
                                                 &dstcnt, exclude, false))
                                return -EHOSTUNREACH;
                        tipc_dest_pop(&dsts, &node, &port);
-                       cong = tipc_group_cong(grp, node, port, blks, &mbr);
+                       cong = tipc_group_cong(tsk->group, node, port, blks,
+                                              &mbr);
                        if (!cong)
                                break;
                        if (mbr == first)
@@ -980,7 +982,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
                /* Block or return if destination link or member is congested */
                rc = tipc_wait_for_cond(sock, &timeout,
                                        !tipc_dest_find(cong_links, node, 0) &&
-                                       !tipc_group_cong(grp, node, port,
+                                       tsk->group &&
+                                       !tipc_group_cong(tsk->group, node, port,
                                                         blks, &mbr));
                if (unlikely(rc))
                        return rc;
@@ -1015,8 +1018,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_group *grp = tsk->group;
-       struct tipc_nlist *dsts = tipc_group_dests(grp);
+       struct tipc_nlist *dsts;
        struct tipc_mc_method *method = &tsk->mc_method;
        bool ack = method->mandatory && method->rcast;
        int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1025,15 +1027,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
        struct sk_buff_head pkts;
        int rc = -EHOSTUNREACH;
 
-       if (!dsts->local && !dsts->remote)
-               return -EHOSTUNREACH;
-
        /* Block or return if any destination link or member is congested */
-       rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
-                               !tipc_group_bc_cong(grp, blks));
+       rc = tipc_wait_for_cond(sock, &timeout,
+                               !tsk->cong_link_cnt && tsk->group &&
+                               !tipc_group_bc_cong(tsk->group, blks));
        if (unlikely(rc))
                return rc;
 
+       dsts = tipc_group_dests(tsk->group);
+       if (!dsts->local && !dsts->remote)
+               return -EHOSTUNREACH;
+
        /* Complete message header */
        if (dest) {
                msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1045,7 +1049,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
        msg_set_hdr_sz(hdr, GROUP_H_SIZE);
        msg_set_destport(hdr, 0);
        msg_set_destnode(hdr, 0);
-       msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
+       msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
 
        /* Avoid getting stuck with repeated forced replicasts */
        msg_set_grp_bc_ack_req(hdr, ack);
@@ -2757,11 +2761,15 @@ void tipc_sk_reinit(struct net *net)
                rhashtable_walk_start(&iter);
 
                while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
-                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       sock_hold(&tsk->sk);
+                       rhashtable_walk_stop(&iter);
+                       lock_sock(&tsk->sk);
                        msg = &tsk->phdr;
                        msg_set_prevnode(msg, tipc_own_addr(net));
                        msg_set_orignode(msg, tipc_own_addr(net));
-                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+                       release_sock(&tsk->sk);
+                       rhashtable_walk_start(&iter);
+                       sock_put(&tsk->sk);
                }
 
                rhashtable_walk_stop(&iter);
index 10dc59c..4d85d71 100644 (file)
@@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                }
 
                err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
-               if (err) {
-                       kfree_skb(_skb);
+               if (err)
                        goto out;
-               }
        }
        err = 0;
 out:
@@ -681,6 +679,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
        if (err)
                goto err;
 
+       if (remote.proto != local.proto) {
+               err = -EINVAL;
+               goto err;
+       }
+
        /* Checking remote ip address */
        rmcast = tipc_udp_is_mcast_addr(&remote);
 
index 311cec8..28887cf 100644 (file)
@@ -56,7 +56,7 @@ enum {
 static struct proto *saved_tcpv6_prot;
 static DEFINE_MUTEX(tcpv6_prot_mutex);
 static LIST_HEAD(device_list);
-static DEFINE_MUTEX(device_mutex);
+static DEFINE_SPINLOCK(device_spinlock);
 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
 static struct proto_ops tls_sw_proto_ops;
 
@@ -538,11 +538,14 @@ static struct tls_context *create_ctx(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tls_context *ctx;
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
        if (!ctx)
                return NULL;
 
        icsk->icsk_ulp_data = ctx;
+       ctx->setsockopt = sk->sk_prot->setsockopt;
+       ctx->getsockopt = sk->sk_prot->getsockopt;
+       ctx->sk_proto_close = sk->sk_prot->close;
        return ctx;
 }
 
@@ -552,7 +555,7 @@ static int tls_hw_prot(struct sock *sk)
        struct tls_device *dev;
        int rc = 0;
 
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_for_each_entry(dev, &device_list, dev_list) {
                if (dev->feature && dev->feature(dev)) {
                        ctx = create_ctx(sk);
@@ -570,7 +573,7 @@ static int tls_hw_prot(struct sock *sk)
                }
        }
 out:
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
        return rc;
 }
 
@@ -579,12 +582,17 @@ static void tls_hw_unhash(struct sock *sk)
        struct tls_context *ctx = tls_get_ctx(sk);
        struct tls_device *dev;
 
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_for_each_entry(dev, &device_list, dev_list) {
-               if (dev->unhash)
+               if (dev->unhash) {
+                       kref_get(&dev->kref);
+                       spin_unlock_bh(&device_spinlock);
                        dev->unhash(dev, sk);
+                       kref_put(&dev->kref, dev->release);
+                       spin_lock_bh(&device_spinlock);
+               }
        }
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
        ctx->unhash(sk);
 }
 
@@ -595,12 +603,17 @@ static int tls_hw_hash(struct sock *sk)
        int err;
 
        err = ctx->hash(sk);
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_for_each_entry(dev, &device_list, dev_list) {
-               if (dev->hash)
+               if (dev->hash) {
+                       kref_get(&dev->kref);
+                       spin_unlock_bh(&device_spinlock);
                        err |= dev->hash(dev, sk);
+                       kref_put(&dev->kref, dev->release);
+                       spin_lock_bh(&device_spinlock);
+               }
        }
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
 
        if (err)
                tls_hw_unhash(sk);
@@ -675,9 +688,6 @@ static int tls_init(struct sock *sk)
                rc = -ENOMEM;
                goto out;
        }
-       ctx->setsockopt = sk->sk_prot->setsockopt;
-       ctx->getsockopt = sk->sk_prot->getsockopt;
-       ctx->sk_proto_close = sk->sk_prot->close;
 
        /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
        if (ip_ver == TLSV6 &&
@@ -699,17 +709,17 @@ out:
 
 void tls_register_device(struct tls_device *device)
 {
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_add_tail(&device->dev_list, &device_list);
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
 }
 EXPORT_SYMBOL(tls_register_device);
 
 void tls_unregister_device(struct tls_device *device)
 {
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_del(&device->dev_list);
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
 }
 EXPORT_SYMBOL(tls_unregister_device);
 
index ab27a28..43a1dec 100644 (file)
 #include <linux/mutex.h>
 #include <linux/net.h>
 #include <linux/poll.h>
+#include <linux/random.h>
 #include <linux/skbuff.h>
 #include <linux/smp.h>
 #include <linux/socket.h>
@@ -504,9 +505,13 @@ out:
 static int __vsock_bind_stream(struct vsock_sock *vsk,
                               struct sockaddr_vm *addr)
 {
-       static u32 port = LAST_RESERVED_PORT + 1;
+       static u32 port = 0;
        struct sockaddr_vm new_addr;
 
+       if (!port)
+               port = LAST_RESERVED_PORT + 1 +
+                       prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+
        vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
 
        if (addr->svm_port == VMADDR_PORT_ANY) {
index cb332ad..c361ce7 100644 (file)
@@ -264,6 +264,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
 }
 
 static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+                                     struct sockaddr_vm *dst,
+                                     enum vmci_transport_packet_type type,
+                                     u64 size,
+                                     u64 mode,
+                                     struct vmci_transport_waiting_info *wait,
+                                     u16 proto,
+                                     struct vmci_handle handle)
+{
+       struct vmci_transport_packet *pkt;
+       int err;
+
+       pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+       if (!pkt)
+               return -ENOMEM;
+
+       err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+                                               mode, wait, proto, handle,
+                                               true);
+       kfree(pkt);
+
+       return err;
+}
+
+static int
 vmci_transport_send_control_pkt(struct sock *sk,
                                enum vmci_transport_packet_type type,
                                u64 size,
@@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
                                u16 proto,
                                struct vmci_handle handle)
 {
-       struct vmci_transport_packet *pkt;
        struct vsock_sock *vsk;
-       int err;
 
        vsk = vsock_sk(sk);
 
@@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
        if (!vsock_addr_bound(&vsk->remote_addr))
                return -EINVAL;
 
-       pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
-       if (!pkt)
-               return -ENOMEM;
-
-       err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
-                                               &vsk->remote_addr, type, size,
-                                               mode, wait, proto, handle,
-                                               true);
-       kfree(pkt);
-
-       return err;
+       return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+                                                    &vsk->remote_addr,
+                                                    type, size, mode,
+                                                    wait, proto, handle);
 }
 
 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
 static int vmci_transport_send_reset(struct sock *sk,
                                     struct vmci_transport_packet *pkt)
 {
+       struct sockaddr_vm *dst_ptr;
+       struct sockaddr_vm dst;
+       struct vsock_sock *vsk;
+
        if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
                return 0;
-       return vmci_transport_send_control_pkt(sk,
-                                       VMCI_TRANSPORT_PACKET_TYPE_RST,
-                                       0, 0, NULL, VSOCK_PROTO_INVALID,
-                                       VMCI_INVALID_HANDLE);
+
+       vsk = vsock_sk(sk);
+
+       if (!vsock_addr_bound(&vsk->local_addr))
+               return -EINVAL;
+
+       if (vsock_addr_bound(&vsk->remote_addr)) {
+               dst_ptr = &vsk->remote_addr;
+       } else {
+               vsock_addr_init(&dst, pkt->dg.src.context,
+                               pkt->src_port);
+               dst_ptr = &dst;
+       }
+       return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+                                            VMCI_TRANSPORT_PACKET_TYPE_RST,
+                                            0, 0, NULL, VSOCK_PROTO_INVALID,
+                                            VMCI_INVALID_HANDLE);
 }
 
 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
index 10ec055..5e49492 100644 (file)
@@ -9152,8 +9152,10 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
        if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
                int r = validate_pae_over_nl80211(rdev, info);
 
-               if (r < 0)
+               if (r < 0) {
+                       kzfree(connkeys);
                        return r;
+               }
 
                ibss.control_port_over_nl80211 = true;
        }
index 6bc8173..b3b6136 100644 (file)
@@ -315,6 +315,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
                sp->xvec[sp->len++] = x;
 
+               skb_dst_force(skb);
+               if (!skb_dst(skb)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+                       goto drop;
+               }
+
 lock:
                spin_lock(&x->lock);
 
@@ -354,7 +360,6 @@ lock:
                XFRM_SKB_CB(skb)->seq.input.low = seq;
                XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
-               skb_dst_force(skb);
                dev_hold(skb->dev);
 
                if (crypto_done)
index 757c4d1..9333153 100644 (file)
@@ -102,6 +102,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                skb_dst_force(skb);
                if (!skb_dst(skb)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       err = -EHOSTUNREACH;
                        goto error_nolock;
                }
 
index dc4a9f1..23c9289 100644 (file)
@@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
        module_put(mode->owner);
 }
 
+void xfrm_state_free(struct xfrm_state *x)
+{
+       kmem_cache_free(xfrm_state_cache, x);
+}
+EXPORT_SYMBOL(xfrm_state_free);
+
 static void xfrm_state_gc_destroy(struct xfrm_state *x)
 {
        tasklet_hrtimer_cancel(&x->mtimer);
@@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
        }
        xfrm_dev_state_free(x);
        security_xfrm_state_free(x);
-       kmem_cache_free(xfrm_state_cache, x);
+       xfrm_state_free(x);
 }
 
 static void xfrm_state_gc_task(struct work_struct *work)
@@ -788,7 +794,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 {
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
        si->sadcnt = net->xfrm.state_num;
-       si->sadhcnt = net->xfrm.state_hmask;
+       si->sadhcnt = net->xfrm.state_hmask + 1;
        si->sadhmcnt = xfrm_state_hashmax;
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 }
index c9a84e2..277c1c4 100644 (file)
@@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        }
 
-       kfree(x);
+       xfrm_state_free(x);
        kfree(xp);
 
        return 0;
 
 free_state:
-       kfree(x);
+       xfrm_state_free(x);
 nomem:
        return err;
 }
index 8081b6c..34414c6 100755 (executable)
@@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
        $xs     = "[0-9a-f ]";  # hex character or space
        $funcre = qr/^$x* <(.*)>:$/;
        if ($arch eq 'aarch64') {
-               #ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp,#-80]!
-               $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+               #ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp, #-80]!
+               $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
        } elsif ($arch eq 'arm') {
                #c0008ffc:      e24dd064        sub     sp, sp, #100    ; 0x64
                $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
index 5056fb3..e559c62 100755 (executable)
@@ -168,6 +168,7 @@ class id_parser(object):
         self.curline = 0
         try:
             for line in fd:
+                line = line.decode(locale.getpreferredencoding(False), errors='ignore')
                 self.curline += 1
                 if self.curline > maxlines:
                     break
@@ -249,12 +250,13 @@ if __name__ == '__main__':
 
     try:
         if len(args.path) and args.path[0] == '-':
-            parser.parse_lines(sys.stdin, args.maxlines, '-')
+            stdin = os.fdopen(sys.stdin.fileno(), 'rb')
+            parser.parse_lines(stdin, args.maxlines, '-')
         else:
             if args.path:
                 for p in args.path:
                     if os.path.isfile(p):
-                        parser.parse_lines(open(p), args.maxlines, p)
+                        parser.parse_lines(open(p, 'rb'), args.maxlines, p)
                     elif os.path.isdir(p):
                         scan_git_subtree(repo.head.reference.commit.tree, p)
                     else:
index 8c94998..7489cb7 100644 (file)
@@ -580,9 +580,9 @@ void ima_update_policy(void)
        ima_update_policy_flag();
 }
 
+/* Keep the enumeration in sync with the policy_tokens! */
 enum {
-       Opt_err = -1,
-       Opt_measure = 1, Opt_dont_measure,
+       Opt_measure, Opt_dont_measure,
        Opt_appraise, Opt_dont_appraise,
        Opt_audit, Opt_hash, Opt_dont_hash,
        Opt_obj_user, Opt_obj_role, Opt_obj_type,
@@ -592,10 +592,10 @@ enum {
        Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
        Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
        Opt_appraise_type, Opt_permit_directio,
-       Opt_pcr
+       Opt_pcr, Opt_err
 };
 
-static match_table_t policy_tokens = {
+static const match_table_t policy_tokens = {
        {Opt_measure, "measure"},
        {Opt_dont_measure, "dont_measure"},
        {Opt_appraise, "appraise"},
@@ -1103,7 +1103,7 @@ void ima_policy_stop(struct seq_file *m, void *v)
 {
 }
 
-#define pt(token)      policy_tokens[token + Opt_err].pattern
+#define pt(token)      policy_tokens[token].pattern
 #define mt(token)      mask_tokens[token]
 
 /*
index 7839788..70e65a2 100644 (file)
@@ -25,7 +25,7 @@ static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
 }
 
 enum {
-       Opt_err = -1,
+       Opt_err,
        Opt_enc,                /* "enc=<encoding>" eg. "enc=oaep" */
        Opt_hash,               /* "hash=<digest-name>" eg. "hash=sha1" */
 };
index ff67893..697bfc6 100644 (file)
@@ -711,7 +711,7 @@ static int key_unseal(struct trusted_key_payload *p,
 }
 
 enum {
-       Opt_err = -1,
+       Opt_err,
        Opt_new, Opt_load, Opt_update,
        Opt_keyhandle, Opt_keyauth, Opt_blobauth,
        Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
index 64c3cb0..654a503 100644 (file)
@@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
        int err;
 
        err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
-                                FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
+                                FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
        if (err < 0)
                return err;
        data = le32_to_cpu(reg);
index 8d75597..15021c8 100644 (file)
@@ -5520,6 +5520,9 @@ enum {
        ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
        ALC295_FIXUP_HP_AUTO_MUTE,
        ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+       ALC294_FIXUP_ASUS_MIC,
+       ALC294_FIXUP_ASUS_HEADSET_MIC,
+       ALC294_FIXUP_ASUS_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6392,6 +6395,8 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_invalidate_dacs,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_THINKPAD_ACPI
        },
        [ALC295_FIXUP_HP_AUTO_MUTE] = {
                .type = HDA_FIXUP_FUNC,
@@ -6406,6 +6411,36 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
+       [ALC294_FIXUP_ASUS_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x13, 0x90a60160 }, /* use as internal mic */
+                       { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
+       [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
+       [ALC294_FIXUP_ASUS_SPK] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* Set EAPD high */
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6548,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+       SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7155,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x13, 0x90a60140}),
+       SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
+               {0x14, 0x90170110},
+               {0x1b, 0x90a70130},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x04211020}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC295_STANDARD_PINS,
                {0x17, 0x21014020},
@@ -7227,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
        alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
+static void alc294_hp_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       int i, val;
+
+       if (!hp_pin)
+               return;
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+       msleep(100);
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+       alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+       /* Wait for depop procedure finish  */
+       val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       for (i = 0; i < 20 && val & 0x0080; i++) {
+               msleep(50);
+               val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       }
+       /* Set HP depop to auto mode */
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+       msleep(50);
+}
+
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7352,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC294;
                spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
+               alc294_hp_init(codec);
                break;
        case 0x10ec0300:
                spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7363,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
+               alc294_hp_init(codec);
                break;
 
        }
index 486ed1f..0a4d733 100644 (file)
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
 #define NETLINK_LIST_MEMBERSHIPS       9
 #define NETLINK_CAP_ACK                        10
 #define NETLINK_EXT_ACK                        11
-#define NETLINK_DUMP_STRICT_CHK                12
+#define NETLINK_GET_STRICT_CHK         12
 
 struct nl_pktinfo {
        __u32   group;
index acf1afa..397d6b6 100644 (file)
@@ -7,6 +7,7 @@ LDLIBS+= -lpthread -lurcu
 TARGETS = main idr-test multiorder xarray
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
+        regression4.o \
         tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
 
 ifndef SHIFT
index 77a44c5..7a22d6e 100644 (file)
@@ -308,6 +308,7 @@ int main(int argc, char **argv)
        regression1_test();
        regression2_test();
        regression3_test();
+       regression4_test();
        iteration_test(0, 10 + 90 * long_run);
        iteration_test(7, 10 + 90 * long_run);
        single_thread_tests(long_run);
index 3c8a158..135145a 100644 (file)
@@ -5,5 +5,6 @@
 void regression1_test(void);
 void regression2_test(void);
 void regression3_test(void);
+void regression4_test(void);
 
 #endif
diff --git a/tools/testing/radix-tree/regression4.c b/tools/testing/radix-tree/regression4.c
new file mode 100644 (file)
index 0000000..cf4e5ab
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "regression.h"
+
+static pthread_barrier_t worker_barrier;
+static int obj0, obj1;
+static RADIX_TREE(mt_tree, GFP_KERNEL);
+
+static void *reader_fn(void *arg)
+{
+       int i;
+       void *entry;
+
+       rcu_register_thread();
+       pthread_barrier_wait(&worker_barrier);
+
+       for (i = 0; i < 1000000; i++) {
+               rcu_read_lock();
+               entry = radix_tree_lookup(&mt_tree, 0);
+               rcu_read_unlock();
+               if (entry != &obj0) {
+                       printf("iteration %d bad entry = %p\n", i, entry);
+                       abort();
+               }
+       }
+
+       rcu_unregister_thread();
+
+       return NULL;
+}
+
+static void *writer_fn(void *arg)
+{
+       int i;
+
+       rcu_register_thread();
+       pthread_barrier_wait(&worker_barrier);
+
+       for (i = 0; i < 1000000; i++) {
+               radix_tree_insert(&mt_tree, 1, &obj1);
+               radix_tree_delete(&mt_tree, 1);
+       }
+
+       rcu_unregister_thread();
+
+       return NULL;
+}
+
+void regression4_test(void)
+{
+       pthread_t reader, writer;
+
+       printv(1, "regression test 4 starting\n");
+
+       radix_tree_insert(&mt_tree, 0, &obj0);
+       pthread_barrier_init(&worker_barrier, NULL, 2);
+
+       if (pthread_create(&reader, NULL, reader_fn, NULL) ||
+           pthread_create(&writer, NULL, writer_fn, NULL)) {
+               perror("pthread_create");
+               exit(1);
+       }
+
+       if (pthread_join(reader, NULL) || pthread_join(writer, NULL)) {
+               perror("pthread_join");
+               exit(1);
+       }
+
+       printv(1, "regression test 4 passed\n");
+}
index b9798f5..284660f 100644 (file)
@@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
 {
        void *data_end = (void *)(long)skb->data_end;
        void *data = (void *)(long)skb->data;
-       __u16 nhoff = skb->flow_keys->nhoff;
+       __u16 thoff = skb->flow_keys->thoff;
        __u8 *hdr;
 
        /* Verifies this variable offset does not overflow */
-       if (nhoff > (USHRT_MAX - hdr_size))
+       if (thoff > (USHRT_MAX - hdr_size))
                return NULL;
 
-       hdr = data + nhoff;
+       hdr = data + thoff;
        if (hdr + hdr_size <= data_end)
                return hdr;
 
-       if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size))
+       if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
                return NULL;
 
        return buffer;
@@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                        /* Only inspect standard GRE packets with version 0 */
                        return BPF_OK;
 
-               keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */
+               keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
                if (GRE_IS_CSUM(gre->flags))
-                       keys->nhoff += 4; /* Step over chksum and Padding */
+                       keys->thoff += 4; /* Step over chksum and Padding */
                if (GRE_IS_KEY(gre->flags))
-                       keys->nhoff += 4; /* Step over key */
+                       keys->thoff += 4; /* Step over key */
                if (GRE_IS_SEQ(gre->flags))
-                       keys->nhoff += 4; /* Step over sequence number */
+                       keys->thoff += 4; /* Step over sequence number */
 
                keys->is_encap = true;
 
@@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                        if (!eth)
                                return BPF_DROP;
 
-                       keys->nhoff += sizeof(*eth);
+                       keys->thoff += sizeof(*eth);
 
                        return parse_eth_proto(skb, eth->h_proto);
                } else {
@@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
                        return BPF_DROP;
 
-               keys->thoff = keys->nhoff;
                keys->sport = tcp->source;
                keys->dport = tcp->dest;
                return BPF_OK;
@@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                if (!udp)
                        return BPF_DROP;
 
-               keys->thoff = keys->nhoff;
                keys->sport = udp->source;
                keys->dport = udp->dest;
                return BPF_OK;
@@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb)
        keys->ipv4_src = iph->saddr;
        keys->ipv4_dst = iph->daddr;
 
-       keys->nhoff += iph->ihl << 2;
-       if (data + keys->nhoff > data_end)
+       keys->thoff += iph->ihl << 2;
+       if (data + keys->thoff > data_end)
                return BPF_DROP;
 
        if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
@@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb)
        keys->addr_proto = ETH_P_IPV6;
        memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
 
-       keys->nhoff += sizeof(struct ipv6hdr);
+       keys->thoff += sizeof(struct ipv6hdr);
 
        return parse_ipv6_proto(skb, ip6h->nexthdr);
 }
@@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb)
        /* hlen is in 8-octets and does not include the first 8 bytes
         * of the header
         */
-       skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3;
+       skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3;
 
        return parse_ipv6_proto(skb, ip6h->nexthdr);
 }
@@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
        if (!fragh)
                return BPF_DROP;
 
-       keys->nhoff += sizeof(*fragh);
+       keys->thoff += sizeof(*fragh);
        keys->is_frag = true;
        if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))
                keys->is_first_frag = true;
@@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb)
        __be16 proto;
 
        /* Peek back to see if single or double-tagging */
-       if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto,
+       if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
                               sizeof(proto)))
                return BPF_DROP;
 
@@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
                if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
                        return BPF_DROP;
 
-               keys->nhoff += sizeof(*vlan);
+               keys->thoff += sizeof(*vlan);
        }
 
        vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
        if (!vlan)
                return BPF_DROP;
 
-       keys->nhoff += sizeof(*vlan);
+       keys->thoff += sizeof(*vlan);
        /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
        if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
            vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
index a08c67c..c3b799c 100644 (file)
@@ -14099,6 +14099,33 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R1 leaks addr",
                .result = REJECT,
        },
+               "calls: cross frame pruning",
+               .insns = {
+                       /* r8 = !!random();
+                        * call pruner()
+                        * if (r8)
+                        *     do something bad;
+                        */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_get_prandom_u32),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .errstr = "!read_ok",
+               .result = REJECT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -14124,7 +14151,7 @@ static int create_map(uint32_t type, uint32_t size_key,
        return fd;
 }
 
-static int create_prog_dummy1(enum bpf_map_type prog_type)
+static int create_prog_dummy1(enum bpf_prog_type prog_type)
 {
        struct bpf_insn prog[] = {
                BPF_MOV64_IMM(BPF_REG_0, 42),
@@ -14135,7 +14162,7 @@ static int create_prog_dummy1(enum bpf_map_type prog_type)
                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 }
 
-static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
+static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
 {
        struct bpf_insn prog[] = {
                BPF_MOV64_IMM(BPF_REG_3, idx),
@@ -14150,7 +14177,7 @@ static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 }
 
-static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
+static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
                             int p1key)
 {
        int p2key = 1;
@@ -14221,7 +14248,7 @@ static int create_cgroup_storage(bool percpu)
 
 static char bpf_vlog[UINT_MAX >> 8];
 
-static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
+static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                          struct bpf_insn *prog, int *map_fds)
 {
        int *fixup_map_hash_8b = test->fixup_map_hash_8b;
@@ -14350,7 +14377,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
                do {
                        prog[*fixup_map_stacktrace].imm = map_fds[12];
                        fixup_map_stacktrace++;
-               } while (fixup_map_stacktrace);
+               } while (*fixup_map_stacktrace);
        }
 }
 
index 9543a4c..f8f3e90 100644 (file)
@@ -9,6 +9,7 @@ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \
 TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
 TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
 TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
+TEST_PROGS += test_vxlan_fdb_changelink.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
new file mode 100755 (executable)
index 0000000..2d442cd
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Check FDB default-remote handling across "ip link set".
+
+check_remotes()
+{
+       local what=$1; shift
+       local N=$(bridge fdb sh dev vx | grep 00:00:00:00:00:00 | wc -l)
+
+       echo -ne "expected two remotes after $what\t"
+       if [[ $N != 2 ]]; then
+               echo "[FAIL]"
+               EXIT_STATUS=1
+       else
+               echo "[ OK ]"
+       fi
+}
+
+ip link add name vx up type vxlan id 2000 dstport 4789
+bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent
+bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent
+check_remotes "fdb append"
+
+ip link set dev vx type vxlan remote 192.0.2.30
+check_remotes "link set"
+
+ip link del dev vx
+exit $EXIT_STATUS
index e147323..c9a2abf 100644 (file)
@@ -2731,9 +2731,14 @@ TEST(syscall_restart)
        ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
        ASSERT_EQ(true, WIFSTOPPED(status));
        ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
-       /* Verify signal delivery came from parent now. */
        ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
-       EXPECT_EQ(getpid(), info.si_pid);
+       /*
+        * There is no siginfo on SIGSTOP any more, so we can't verify
+        * signal delivery came from parent now (getpid() == info.si_pid).
+        * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
+        * At least verify the SIGSTOP via PTRACE_GETSIGINFO.
+        */
+       EXPECT_EQ(SIGSTOP, info.si_signo);
 
        /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
        ASSERT_EQ(0, kill(child_pid, SIGCONT));
index fb22bcc..7ef45a4 100644 (file)
 #define PAGE_MASK (~(PAGE_SIZE-1))
 #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
 
+/* generic data direction definitions */
+#define READ                    0
+#define WRITE                   1
+
 typedef unsigned long long phys_addr_t;
 typedef unsigned long long dma_addr_t;
 typedef size_t __kernel_size_t;
index 3710342..6855cce 100644 (file)
@@ -175,10 +175,14 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
 {
        struct kvm_coalesced_mmio_dev *dev, *tmp;
 
+       if (zone->pio != 1 && zone->pio != 0)
+               return -EINVAL;
+
        mutex_lock(&kvm->slots_lock);
 
        list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
-               if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+               if (zone->pio == dev->zone.pio &&
+                   coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
                        kvm_io_bus_unregister_dev(kvm,
                                zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
                        kvm_iodevice_destructor(&dev->dev);