OSDN Git Service

Merge tag 'usb-6.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Feb 2023 19:18:57 +0000 (11:18 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Feb 2023 19:18:57 +0000 (11:18 -0800)
Pull USB fixes from Greg KH:
 "Here are 2 small USB driver fixes that resolve some reported
  regressions and one new device quirk. Specifically these are:

   - new quirk for Alcor Link AK9563 smartcard reader

   - revert of u_ether gadget change in 6.2-rc1 that caused problems

   - typec pin probe fix

  All of these have been in linux-next with no reported problems"

* tag 'usb-6.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb:
  usb: core: add quirk for Alcor Link AK9563 smartcard reader
  usb: typec: altmodes/displayport: Fix probe pin assign check
  Revert "usb: gadget: u_ether: Do not make UDC parent of the net device"

179 files changed:
Documentation/devicetree/bindings/.gitignore
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
MAINTAINERS
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/stihxxx-b2120.dtsi
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/mediatek/mt8195.dtsi
arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
arch/arm64/boot/dts/rockchip/rk356x.dtsi
arch/powerpc/Kconfig
arch/powerpc/kernel/interrupt.c
arch/powerpc/kexec/file_load_64.c
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/probes/kprobes.c
arch/riscv/kernel/stacktrace.c
arch/riscv/mm/cacheflush.c
arch/riscv/mm/pgtable.c
arch/x86/include/asm/intel-family.h
arch/x86/kernel/kprobes/core.c
drivers/acpi/nfit/core.c
drivers/clk/ingenic/jz4760-cgu.c
drivers/clk/microchip/clk-mpfs-ccc.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/cxl/core/region.c
drivers/dax/super.c
drivers/firmware/efi/libstub/arm64.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/skl_watermark.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.h
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-quirks.c
drivers/infiniband/core/umem_dmabuf.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/user_exp_rcv.c
drivers/infiniband/hw/irdma/cm.c
drivers/infiniband/hw/mana/qp.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
drivers/net/bonding/bond_debugfs.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_vf_mbx.c
drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/mscc/ocelot_flower.c
drivers/net/ethernet/mscc/ocelot_ptp.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_port.h
drivers/net/ethernet/pensando/ionic/ionic_dev.c
drivers/net/ethernet/pensando/ionic/ionic_dev.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/hyperv/netvsc.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/phylink.c
drivers/net/usb/plusb.c
drivers/nvdimm/Kconfig
drivers/nvdimm/nd.h
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/auth.c
drivers/of/address.c
drivers/of/platform.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aspm.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/mediatek/pinctrl-mt8195.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
drivers/spi/spi-dw-core.c
drivers/spi/spidev.c
drivers/video/fbdev/nvidia/nvidia.c
fs/btrfs/raid56.c
fs/btrfs/send.c
fs/btrfs/volumes.c
fs/btrfs/zlib.c
fs/ceph/mds_client.c
fs/cifs/file.c
fs/coredump.c
include/drm/drm_client.h
include/linux/mlx5/driver.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/ip.h
include/uapi/linux/ipv6.h
kernel/cgroup/cpuset.c
kernel/locking/rtmutex.c
kernel/trace/trace.c
mm/memblock.c
mm/page_alloc.c
net/can/j1939/address-claim.c
net/core/devlink.c
net/core/neighbour.c
net/core/sock.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv6/af_inet6.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/sockopt.c
net/mptcp/subflow.c
net/rds/message.c
net/sched/sch_htb.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_interface_core.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
sound/pci/hda/patch_realtek.c
sound/pci/lx6464es/lx_core.c
sound/soc/codecs/es8326.c
sound/soc/codecs/rt715-sdca-sdw.c
sound/soc/codecs/tas5805m.c
sound/soc/fsl/fsl_sai.c
sound/soc/soc-topology.c
sound/soc/sof/amd/acp.c
sound/synth/emux/emux_nrpn.c
tools/testing/memblock/internal.h
tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/test_vxlan_vnifiltering.sh

index a777199..51ddb26 100644 (file)
@@ -2,3 +2,8 @@
 *.example.dts
 /processed-schema*.yaml
 /processed-schema*.json
+
+#
+# We don't want to ignore the following even if they are dot-files
+#
+!.yamllint
index 9f7d3e1..8449e14 100644 (file)
@@ -108,7 +108,7 @@ properties:
 
   msi-controller:
     description:
-      Only present if the Message Based Interrupt functionnality is
+      Only present if the Message Based Interrupt functionality is
       being exposed by the HW, and the mbi-ranges property present.
 
   mbi-ranges:
index eaa87db..d052ef4 100644 (file)
@@ -16,5 +16,5 @@ Contents
 
 Support
 =======
-If you got any problem, contact Wangxun support team via support@trustnetic.com
+If you got any problem, contact Wangxun support team via nic-support@net-swift.com
 and Cc: netdev.
index fb1471c..5130458 100644 (file)
@@ -16120,7 +16120,7 @@ F:      drivers/pci/controller/pci-v3-semi.c
 
 PCI ENDPOINT SUBSYSTEM
 M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
-R:     Krzysztof WilczyÅ„ski <kw@linux.com>
+M:     Krzysztof WilczyÅ„ski <kw@linux.com>
 R:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 R:     Kishon Vijay Abraham I <kishon@kernel.org>
 L:     linux-pci@vger.kernel.org
@@ -16128,7 +16128,7 @@ S:      Supported
 Q:     https://patchwork.kernel.org/project/linux-pci/list/
 B:     https://bugzilla.kernel.org
 C:     irc://irc.oftc.net/linux-pci
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
 F:     Documentation/PCI/endpoint/*
 F:     Documentation/misc-devices/pci-endpoint-test.rst
 F:     drivers/misc/pci_endpoint_test.c
@@ -16163,7 +16163,7 @@ S:      Supported
 Q:     https://patchwork.kernel.org/project/linux-pci/list/
 B:     https://bugzilla.kernel.org
 C:     irc://irc.oftc.net/linux-pci
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
 F:     Documentation/driver-api/pci/p2pdma.rst
 F:     drivers/pci/p2pdma.c
 F:     include/linux/pci-p2pdma.h
@@ -16185,14 +16185,14 @@ F:    drivers/pci/controller/pci-xgene-msi.c
 
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
 M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
+M:     Krzysztof WilczyÅ„ski <kw@linux.com>
 R:     Rob Herring <robh@kernel.org>
-R:     Krzysztof WilczyÅ„ski <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-pci/list/
 B:     https://bugzilla.kernel.org
 C:     irc://irc.oftc.net/linux-pci
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
 F:     Documentation/devicetree/bindings/pci/
 F:     drivers/pci/controller/
 F:     drivers/pci/pci-bridge-emul.c
@@ -16205,7 +16205,7 @@ S:      Supported
 Q:     https://patchwork.kernel.org/project/linux-pci/list/
 B:     https://bugzilla.kernel.org
 C:     irc://irc.oftc.net/linux-pci
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
 F:     Documentation/PCI/
 F:     Documentation/devicetree/bindings/pci/
 F:     arch/x86/kernel/early-quirks.c
index 487b0e0..2ca76b6 100644 (file)
                clock-names = "dp", "pclk";
                phys = <&edp_phy>;
                phy-names = "dp";
+               power-domains = <&power RK3288_PD_VIO>;
                resets = <&cru SRST_EDP>;
                reset-names = "dp";
                rockchip,grf = <&grf>;
index 920a0ba..8d9a2df 100644 (file)
                                tsin-num = <0>;
                                serial-not-parallel;
                                i2c-bus = <&ssc2>;
-                               reset-gpios = <&pio15 4 GPIO_ACTIVE_HIGH>;
+                               reset-gpios = <&pio15 4 GPIO_ACTIVE_LOW>;
                                dvb-card = <STV0367_TDA18212_NIMA_1>;
                        };
                };
index 1648e67..417523d 100644 (file)
                        sd_emmc_b: sd@5000 {
                                compatible = "amlogic,meson-axg-mmc";
                                reg = <0x0 0x5000 0x0 0x800>;
-                               interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+                               interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_B>,
                                        <&clkc CLKID_SD_EMMC_B_CLK0>,
                        sd_emmc_c: mmc@7000 {
                                compatible = "amlogic,meson-axg-mmc";
                                reg = <0x0 0x7000 0x0 0x800>;
-                               interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+                               interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_C>,
                                        <&clkc CLKID_SD_EMMC_C_CLK0>,
index 9dbd508..7f55d97 100644 (file)
                sd_emmc_a: sd@ffe03000 {
                        compatible = "amlogic,meson-axg-mmc";
                        reg = <0x0 0xffe03000 0x0 0x800>;
-                       interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
+                       interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                        clocks = <&clkc CLKID_SD_EMMC_A>,
                                 <&clkc CLKID_SD_EMMC_A_CLK0>,
                sd_emmc_b: sd@ffe05000 {
                        compatible = "amlogic,meson-axg-mmc";
                        reg = <0x0 0xffe05000 0x0 0x800>;
-                       interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
+                       interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                        clocks = <&clkc CLKID_SD_EMMC_B>,
                                 <&clkc CLKID_SD_EMMC_B_CLK0>,
                sd_emmc_c: mmc@ffe07000 {
                        compatible = "amlogic,meson-axg-mmc";
                        reg = <0x0 0xffe07000 0x0 0x800>;
-                       interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
+                       interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                        clocks = <&clkc CLKID_SD_EMMC_C>,
                                 <&clkc CLKID_SD_EMMC_C_CLK0>,
index e3c12e0..5eed150 100644 (file)
                        sd_emmc_a: mmc@70000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
                                reg = <0x0 0x70000 0x0 0x800>;
-                               interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
+                               interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        };
 
                        sd_emmc_b: mmc@72000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
                                reg = <0x0 0x72000 0x0 0x800>;
-                               interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+                               interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        };
 
                        sd_emmc_c: mmc@74000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
                                reg = <0x0 0x74000 0x0 0x800>;
-                               interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+                               interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        };
                };
index 5d31536..c10cfeb 100644 (file)
                };
 
                vdosys0: syscon@1c01a000 {
-                       compatible = "mediatek,mt8195-mmsys", "syscon";
+                       compatible = "mediatek,mt8195-vdosys0", "mediatek,mt8195-mmsys", "syscon";
                        reg = <0 0x1c01a000 0 0x1000>;
                        mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
                        #clock-cells = <1>;
                };
 
                vdosys1: syscon@1c100000 {
-                       compatible = "mediatek,mt8195-mmsys", "syscon";
+                       compatible = "mediatek,mt8195-vdosys1", "syscon";
                        reg = <0 0x1c100000 0 0x1000>;
                        #clock-cells = <1>;
                };
index aa22a0c..5d5d957 100644 (file)
@@ -96,7 +96,6 @@
                        linux,default-trigger = "heartbeat";
                        gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
                        default-state = "on";
-                       mode = <0x23>;
                };
 
                user_led: led-1 {
                        linux,default-trigger = "mmc1";
                        gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
                        default-state = "off";
-                       mode = <0x05>;
                };
        };
 };
index 6e29e74..783120e 100644 (file)
                };
        };
 
-       dmc_opp_table: dmc_opp_table {
+       dmc_opp_table: opp-table-3 {
                compatible = "operating-points-v2";
 
                opp00 {
index 04403a7..a0795a2 100644 (file)
        };
 };
 
+&cpu_alert0 {
+       temperature = <65000>;
+};
+&cpu_alert1 {
+       temperature = <68000>;
+};
+
 &cpu_l0 {
        cpu-supply = <&vdd_cpu_l>;
 };
index 4391aea..1881b4b 100644 (file)
                clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
                clock-names = "hclk_master", "hclk_slave", "sclk";
                resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
-               reset-names = "master", "lave", "crypto";
+               reset-names = "master", "slave", "crypto-rst";
        };
 
        crypto1: crypto@ff8b8000 {
                clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
                clock-names = "hclk_master", "hclk_slave", "sclk";
                resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
-               reset-names = "master", "slave", "crypto";
+               reset-names = "master", "slave", "crypto-rst";
        };
 
        i2c1: i2c@ff110000 {
                pcfg_input_pull_up: pcfg-input-pull-up {
                        input-enable;
                        bias-pull-up;
-                       drive-strength = <2>;
                };
 
                pcfg_input_pull_down: pcfg-input-pull-down {
                        input-enable;
                        bias-pull-down;
-                       drive-strength = <2>;
                };
 
                clock {
index 4c7f9ab..d956496 100644 (file)
        };
 };
 
+&pmu_io_domains {
+       pmuio2-supply = <&vcc_3v3>;
+       vccio1-supply = <&vcc_3v3>;
+       vccio3-supply = <&vcc_3v3>;
+       vccio4-supply = <&vcca_1v8>;
+       vccio5-supply = <&vcc_3v3>;
+       vccio6-supply = <&vcca_1v8>;
+       vccio7-supply = <&vcc_3v3>;
+       status = "okay";
+};
+
 &pwm0 {
        status = "okay";
 };
index a1c5fdf..3c9d852 100644 (file)
 };
 
 &i2s1_8ch {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
        rockchip,trcm-sync-tx-only;
        status = "okay";
 };
        disable-wp;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v3_sd>;
        vqmmc-supply = <&vccio_sd>;
        status = "okay";
 };
 
 &sdmmc2 {
-       supports-sdio;
        bus-width = <4>;
        disable-wp;
        cap-sd-highspeed;
index 5706c3e..c27f1c7 100644 (file)
                clock-names = "aclk_mst", "aclk_slv",
                              "aclk_dbi", "pclk", "aux";
                device_type = "pci";
+               #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 7>;
                interrupt-map = <0 0 0 1 &pcie_intc 0>,
                                <0 0 0 2 &pcie_intc 1>,
index b8c4ac5..7a5f8db 100644 (file)
@@ -163,7 +163,6 @@ config PPC
        select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_MODULES_DATA_IN_VMALLOC       if PPC_BOOK3S_32 || PPC_8xx
-       select ARCH_WANTS_NO_INSTR
        select ARCH_WEAK_RELEASE_ACQUIRE
        select BINFMT_ELF
        select BUILDTIME_TABLE_SORT
index fc6631a..0ec1581 100644 (file)
@@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
  */
 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
 {
+       bool must_hard_disable = (exit_must_hard_disable() || !restartable);
+
        /* This must be done with RI=1 because tracing may touch vmaps */
        trace_hardirqs_on();
 
-       if (exit_must_hard_disable() || !restartable)
+       if (must_hard_disable)
                __hard_EE_RI_disable();
 
 #ifdef CONFIG_PPC64
        /* This pattern matches prep_irq_for_idle */
        if (unlikely(lazy_irq_pending_nocheck())) {
-               if (exit_must_hard_disable() || !restartable) {
+               if (must_hard_disable) {
                        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
                        __hard_RI_enable();
                }
index 5208575..9be3e81 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/firmware.h>
 #include <asm/kexec_ranges.h>
 #include <asm/crashdump-ppc64.h>
+#include <asm/mmzone.h>
 #include <asm/prom.h>
 
 struct umem_info {
index 4eba9a9..3e01f4f 100644 (file)
@@ -721,6 +721,10 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
        page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
        return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
 }
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
index 41c7481..2bedec3 100644 (file)
@@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p)
 
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
-       unsigned long probe_addr = (unsigned long)p->addr;
+       u16 *insn = (u16 *)p->addr;
 
-       if (probe_addr & 0x1)
+       if ((unsigned long)insn & 0x1)
                return -EILSEQ;
 
        if (!arch_check_kprobe(p))
                return -EILSEQ;
 
        /* copy instruction */
-       p->opcode = *p->addr;
+       p->opcode = (kprobe_opcode_t)(*insn++);
+       if (GET_INSN_LENGTH(p->opcode) == 4)
+               p->opcode |= (kprobe_opcode_t)(*insn) << 16;
 
        /* decode instruction */
        switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
index 75c8dd6..f9a5a7c 100644 (file)
@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                fp = (unsigned long)__builtin_frame_address(0);
                sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
+               level = -1;
        } else {
                /* task blocked in __switch_to */
                fp = task->thread.s[0];
@@ -43,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                unsigned long low, high;
                struct stackframe *frame;
 
-               if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
+               if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
                        break;
 
                /* Validate frame pointer */
index 3cc07ed..fcd6145 100644 (file)
@@ -90,8 +90,10 @@ void flush_icache_pte(pte_t pte)
        if (PageHuge(page))
                page = compound_head(page);
 
-       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+       if (!test_bit(PG_dcache_clean, &page->flags)) {
                flush_icache_all();
+               set_bit(PG_dcache_clean, &page->flags);
+       }
 }
 #endif /* CONFIG_MMU */
 
index 6645ead..fef4e73 100644 (file)
@@ -81,3 +81,23 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 }
 
 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+                                       unsigned long address, pmd_t *pmdp)
+{
+       pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+
+       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+       VM_BUG_ON(pmd_trans_huge(*pmdp));
+       /*
+        * When leaf PTE entries (regular pages) are collapsed into a leaf
+        * PMD entry (huge page), a valid non-leaf PTE is converted into a
+        * valid leaf PTE at the level 1 page table.  Since the sfence.vma
+        * forms that specify an address only apply to leaf PTEs, we need a
+        * global flush here.  collapse_huge_page() assumes these flushes are
+        * eager, so just do the fence here.
+        */
+       flush_tlb_mm(vma->vm_mm);
+       return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
index 347707d..cbaf174 100644 (file)
 #define INTEL_FAM6_METEORLAKE          0xAC
 #define INTEL_FAM6_METEORLAKE_L                0xAA
 
+#define INTEL_FAM6_LUNARLAKE_M         0xBD
+
 /* "Small Core" Processors (Atom/E-Core) */
 
 #define INTEL_FAM6_ATOM_BONNELL                0x1C /* Diamondville, Pineview */
index b36f3c3..695873c 100644 (file)
@@ -625,7 +625,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
                /* 1 byte conditional jump */
                p->ainsn.emulate_op = kprobe_emulate_jcc;
                p->ainsn.jcc.type = opcode & 0xf;
-               p->ainsn.rel32 = *(char *)insn->immediate.bytes;
+               p->ainsn.rel32 = insn->immediate.value;
                break;
        case 0x0f:
                opcode = insn->opcode.bytes[1];
index f1cc5ec..4e48d6d 100644 (file)
@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
 
        mutex_lock(&acpi_desc->init_mutex);
        set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
-       cancel_delayed_work_sync(&acpi_desc->dwork);
        mutex_unlock(&acpi_desc->init_mutex);
+       cancel_delayed_work_sync(&acpi_desc->dwork);
 
        /*
         * Bounce the nvdimm bus lock to make sure any in-flight
index ecd395a..e407f00 100644 (file)
@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
                       unsigned long rate, unsigned long parent_rate,
                       unsigned int *pm, unsigned int *pn, unsigned int *pod)
 {
-       unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
+       unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
 
        /* The frequency after the N divider must be between 1 and 50 MHz. */
        n = parent_rate / (1 * MHZ);
@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
        /* The N divider must be >= 2. */
        n = clamp_val(n, 2, 1 << pll_info->n_bits);
 
-       for (;; n >>= 1) {
-               od = (unsigned int)-1;
+       rate /= MHZ;
+       parent_rate /= MHZ;
 
-               do {
-                       m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
-               } while ((m > m_max || m & 1) && (od < 4));
-
-               if (od < 4 && m >= 4 && m <= m_max)
-                       break;
+       for (m = m_max; m >= m_max && n >= 2; n--) {
+               m = rate * n / parent_rate;
+               od = m & 1;
+               m <<= od;
        }
 
        *pm = m;
-       *pn = n;
+       *pn = n + 1;
        *pod = 1 << od;
 }
 
index 32aae88..0ddc73e 100644 (file)
@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
 
        for (unsigned int i = 0; i < num_clks; i++) {
                struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
-               char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+               char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
 
                if (!name)
                        return -ENOMEM;
 
-               snprintf(name, 23, "%s_out%u", parent->name, i);
                out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
                out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
                        out_hw->reg_offset;
@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
 
        for (unsigned int i = 0; i < num_clks; i++) {
                struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
-               char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
 
-               if (!name)
+               pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
+                                             strchrnul(dev->of_node->full_name, '@'), i);
+               if (!pll_hw->name)
                        return -ENOMEM;
 
                pll_hw->base = data->pll_base[i];
-               snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
-               pll_hw->name = (const char *)name;
                pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
                                                                      pll_hw->parents,
                                                                      &mpfs_ccc_pll_ops, 0);
index 9505a81..957cf6b 100644 (file)
@@ -143,40 +143,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
        return lval * xo_rate;
 }
 
-/* Get the current frequency of the CPU (after throttling) */
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
 {
        struct qcom_cpufreq_data *data;
+       const struct qcom_cpufreq_soc_data *soc_data;
        struct cpufreq_policy *policy;
+       unsigned int index;
 
        policy = cpufreq_cpu_get_raw(cpu);
        if (!policy)
                return 0;
 
        data = policy->driver_data;
+       soc_data = qcom_cpufreq.soc_data;
 
-       return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+       index = readl_relaxed(data->base + soc_data->reg_perf_state);
+       index = min(index, LUT_MAX_ENTRIES - 1);
+
+       return policy->freq_table[index].frequency;
 }
 
-/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
 {
        struct qcom_cpufreq_data *data;
-       const struct qcom_cpufreq_soc_data *soc_data;
        struct cpufreq_policy *policy;
-       unsigned int index;
 
        policy = cpufreq_cpu_get_raw(cpu);
        if (!policy)
                return 0;
 
        data = policy->driver_data;
-       soc_data = qcom_cpufreq.soc_data;
 
-       index = readl_relaxed(data->base + soc_data->reg_perf_state);
-       index = min(index, LUT_MAX_ENTRIES - 1);
+       if (data->throttle_irq >= 0)
+               return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
 
-       return policy->freq_table[index].frequency;
+       return qcom_cpufreq_get_freq(cpu);
 }
 
 static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
index 02f28da..940f805 100644 (file)
@@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
                struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
                struct cxl_port *iter = cxled_to_port(cxled);
                struct cxl_ep *ep;
-               int rc;
+               int rc = 0;
 
                while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
                        iter = to_cxl_port(iter->dev.parent);
@@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
 
                        cxl_rr = cxl_rr_load(iter, cxlr);
                        cxld = cxl_rr->decoder;
-                       rc = cxld->reset(cxld);
+                       if (cxld->reset)
+                               rc = cxld->reset(cxld);
                        if (rc)
                                return rc;
                }
@@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
                             iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
                                cxl_rr = cxl_rr_load(iter, cxlr);
                                cxld = cxl_rr->decoder;
-                               cxld->reset(cxld);
+                               if (cxld->reset)
+                                       cxld->reset(cxld);
                        }
 
                        cxled->cxld.reset(&cxled->cxld);
@@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
                int i, distance;
 
                /*
-                * Passthrough ports impose no distance requirements between
+                * Passthrough decoders impose no distance requirements between
                 * peers
                 */
-               if (port->nr_dports == 1)
+               if (cxl_rr->nr_targets == 1)
                        distance = 0;
                else
                        distance = p->nr_targets / cxl_rr->nr_targets;
index da4438f..c4c4728 100644 (file)
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
 /**
  * dax_holder() - obtain the holder of a dax device
  * @dax_dev: a dax_device instance
-
+ *
  * Return: the holder's data which represents the holder if registered,
  * otherwize NULL.
  */
index ff2d18c..4501652 100644 (file)
@@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
        const u8 *type1_family = efi_get_smbios_string(1, family);
 
        /*
-        * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
-        * has not been called prior.
+        * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+        * SetVirtualAddressMap() has not been called prior.
         */
-       if (!type1_family || strcmp(type1_family, "Altra"))
+       if (!type1_family || (
+           strcmp(type1_family, "eMAG") &&
+           strcmp(type1_family, "Altra") &&
+           strcmp(type1_family, "Altra Max")))
                return false;
 
        efi_warn("Working around broken SetVirtualAddressMap()\n");
index e3e2e6e..d148a1b 100644 (file)
@@ -243,6 +243,7 @@ extern int amdgpu_num_kcq;
 
 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
 extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
 
 #define AMDGPU_VM_MAX_NUM_CTX                  4096
 #define AMDGPU_SG_THRESHOLD                    (256*1024*1024)
index 7b5ce00..7af3041 100644 (file)
@@ -1220,10 +1220,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
                 * next job actually sees the results from the previous one
                 * before we start executing on the same scheduler ring.
                 */
-               if (!s_fence || s_fence->sched != sched)
+               if (!s_fence || s_fence->sched != sched) {
+                       dma_fence_put(fence);
                        continue;
+               }
 
                r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+               dma_fence_put(fence);
                if (r)
                        return r;
        }
index cd4caaa..3fe277b 100644 (file)
@@ -186,6 +186,7 @@ int amdgpu_num_kcq = -1;
 int amdgpu_smartshift_bias;
 int amdgpu_use_xgmi_p2p = 1;
 int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
 
 static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
 
@@ -932,6 +933,16 @@ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = e
 module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
 
 /**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs.  Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
  * DOC: smu_pptable_id (int)
  * Used to override pptable id. id = 0 use VBIOS pptable.
  * id > 0 use the soft pptable with specicfied id.
index 0044420..faff4a3 100644 (file)
@@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
                if (!ring || !ring->fence_drv.initialized)
                        continue;
 
-               if (!ring->no_scheduler)
+               /*
+                * Notice we check for sched.ops since there's some
+                * override on the meaning of sched.ready by amdgpu.
+                * The natural check would be sched.ready, which is
+                * set as drm_sched_init() finishes...
+                */
+               if (ring->sched.ops)
                        drm_sched_fini(&ring->sched);
 
                for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
index f752c7a..3989e75 100644 (file)
@@ -295,7 +295,7 @@ struct amdgpu_ring {
 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
index b5f3bba..01e42bd 100644 (file)
@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
                        trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
                                                    min(nptes, 32u), dst, incr,
                                                    upd_flags,
-                                                   vm->task_info.pid,
+                                                   vm->task_info.tgid,
                                                    vm->immediate.fence_context);
                        amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
                                                   cursor.level, pe_start, dst,
index f202b45..5dde6f8 100644 (file)
@@ -6877,7 +6877,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
        .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
        .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
        .test_ring = gfx_v9_0_ring_test_ring,
-       .test_ib = gfx_v9_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_switch_buffer = gfx_v9_ring_emit_sb,
index 9eff5f4..7050238 100644 (file)
@@ -641,7 +641,9 @@ static int soc21_common_early_init(void *handle)
                        AMD_CG_SUPPORT_GFX_CGLS |
                        AMD_CG_SUPPORT_REPEATER_FGCG |
                        AMD_CG_SUPPORT_GFX_MGCG |
-                       AMD_CG_SUPPORT_HDP_SD;
+                       AMD_CG_SUPPORT_HDP_SD |
+                       AMD_CG_SUPPORT_ATHUB_MGCG |
+                       AMD_CG_SUPPORT_ATHUB_LS;
                adev->pg_flags = AMD_PG_SUPPORT_VCN |
                        AMD_PG_SUPPORT_VCN_DPG |
                        AMD_PG_SUPPORT_JPEG;
index 31bce52..93dee3d 100644 (file)
@@ -1184,24 +1184,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 
        memset(pa_config, 0, sizeof(*pa_config));
 
-       logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
-       pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
-
-       if (adev->apu_flags & AMD_APU_IS_RAVEN2)
-               /*
-                * Raven2 has a HW issue that it is unable to use the vram which
-                * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
-                * workaround that increase system aperture high address (add 1)
-                * to get rid of the VM fault and hardware hang.
-                */
-               logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
-       else
-               logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
-
        agp_base = 0;
        agp_bot = adev->gmc.agp_start >> 24;
        agp_top = adev->gmc.agp_end >> 24;
 
+       /* AGP aperture is disabled */
+       if (agp_bot == agp_top) {
+               logical_addr_low  = adev->gmc.vram_start >> 18;
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+                       /*
+                        * Raven2 has a HW issue that it is unable to use the vram which
+                        * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+                        * workaround that increase system aperture high address (add 1)
+                        * to get rid of the VM fault and hardware hang.
+                        */
+                       logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+               else
+                       logical_addr_high = adev->gmc.vram_end >> 18;
+       } else {
+               logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+                       /*
+                        * Raven2 has a HW issue that it is unable to use the vram which
+                        * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+                        * workaround that increase system aperture high address (add 1)
+                        * to get rid of the VM fault and hardware hang.
+                        */
+                       logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+               else
+                       logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+       }
+
+       pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
        page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
        page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@@ -1503,6 +1517,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 1):
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
+               case IP_VERSION(3, 1, 4):
+               case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
                        init_data.flags.gpu_vm_support = true;
                        break;
@@ -1511,6 +1527,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                }
                break;
        }
+       if (init_data.flags.gpu_vm_support &&
+           (amdgpu_sg_display == 0))
+               init_data.flags.gpu_vm_support = false;
 
        if (init_data.flags.gpu_vm_support)
                adev->mode_info.gpu_vm_support = true;
index fe2023f..8f894c1 100644 (file)
@@ -3626,7 +3626,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
                                                (int)hubp->curs_attr.width || pos_cpy.x
                                                <= (int)hubp->curs_attr.width +
                                                pipe_ctx->plane_state->src_rect.x) {
-                                               pos_cpy.x = temp_x + viewport_width;
+                                               pos_cpy.x = 2 * viewport_width - temp_x;
                                        }
                                }
                        } else {
index a917036..2f3e239 100644 (file)
@@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                case IP_VERSION(9, 4, 2):
                case IP_VERSION(10, 3, 0):
                case IP_VERSION(11, 0, 0):
+               case IP_VERSION(11, 0, 1):
+               case IP_VERSION(11, 0, 2):
                        *states = ATTR_STATE_SUPPORTED;
                        break;
                default:
index d6b964c..4bc7aee 100644 (file)
                                                                        (1 << FEATURE_DS_FCLK_BIT) | \
                                                                        (1 << FEATURE_DS_LCLK_BIT) | \
                                                                        (1 << FEATURE_DS_DCFCLK_BIT) | \
-                                                                       (1 << FEATURE_DS_UCLK_BIT))
+                                                                       (1 << FEATURE_DS_UCLK_BIT) | \
+                                                                       (1ULL << FEATURE_DS_VCN_BIT))
 
 //For use with feature control messages
 typedef enum {
@@ -522,9 +523,9 @@ typedef enum  {
   TEMP_HOTSPOT_M,
   TEMP_MEM,
   TEMP_VR_GFX,
-  TEMP_VR_SOC,
   TEMP_VR_MEM0,
   TEMP_VR_MEM1,
+  TEMP_VR_SOC,
   TEMP_VR_U,
   TEMP_LIQUID0,
   TEMP_LIQUID1,
index d6b1393..48a3a39 100644 (file)
 #define NUM_FEATURES                          64
 
 #define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
-#define ALLOWED_FEATURE_CTRL_SCPM        (1 << FEATURE_DPM_GFXCLK_BIT) | \
-                                         (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
-                                         (1 << FEATURE_DPM_UCLK_BIT) | \
-                                         (1 << FEATURE_DPM_FCLK_BIT) | \
-                                         (1 << FEATURE_DPM_SOCCLK_BIT) | \
-                                         (1 << FEATURE_DPM_MP0CLK_BIT) | \
-                                         (1 << FEATURE_DPM_LINK_BIT) | \
-                                         (1 << FEATURE_DPM_DCN_BIT) | \
-                                         (1 << FEATURE_DS_GFXCLK_BIT) | \
-                                         (1 << FEATURE_DS_SOCCLK_BIT) | \
-                                         (1 << FEATURE_DS_FCLK_BIT) | \
-                                         (1 << FEATURE_DS_LCLK_BIT) | \
-                                         (1 << FEATURE_DS_DCFCLK_BIT) | \
-                                         (1 << FEATURE_DS_UCLK_BIT)
+#define ALLOWED_FEATURE_CTRL_SCPM      ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+                                       (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+                                       (1 << FEATURE_DPM_UCLK_BIT) | \
+                                       (1 << FEATURE_DPM_FCLK_BIT) | \
+                                       (1 << FEATURE_DPM_SOCCLK_BIT) | \
+                                       (1 << FEATURE_DPM_MP0CLK_BIT) | \
+                                       (1 << FEATURE_DPM_LINK_BIT) | \
+                                       (1 << FEATURE_DPM_DCN_BIT) | \
+                                       (1 << FEATURE_DS_GFXCLK_BIT) | \
+                                       (1 << FEATURE_DS_SOCCLK_BIT) | \
+                                       (1 << FEATURE_DS_FCLK_BIT) | \
+                                       (1 << FEATURE_DS_LCLK_BIT) | \
+                                       (1 << FEATURE_DS_DCFCLK_BIT) | \
+                                       (1 << FEATURE_DS_UCLK_BIT) | \
+                                       (1ULL << FEATURE_DS_VCN_BIT))
 
 //For use with feature control messages
 typedef enum {
index e8c6feb..992163e 100644 (file)
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
 
 #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
index cf96c3f..508e392 100644 (file)
@@ -407,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
                                                &smu_table->power_play_table,
                                                &smu_table->power_play_table_size);
@@ -1257,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
                table_context->power_play_table;
        PPTable_t *pptable = smu->smu_table.driver_pptable;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (!range)
                return -EINVAL;
 
index e87db7e..9e1967d 100644 (file)
@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
        MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
        MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
+       MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
index fd67efe..056ab9d 100644 (file)
@@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
 
 static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
 {
-       struct drm_device *dev = buffer->client->dev;
-
        if (buffer->gem) {
                drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
                drm_gem_object_put(buffer->gem);
        }
 
-       if (buffer->handle)
-               drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
-
        kfree(buffer);
 }
 
 static struct drm_client_buffer *
-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+                        u32 format, u32 *handle)
 {
        const struct drm_format_info *info = drm_format_info(format);
        struct drm_mode_create_dumb dumb_args = { };
@@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
        if (ret)
                goto err_delete;
 
-       buffer->handle = dumb_args.handle;
-       buffer->pitch = dumb_args.pitch;
-
        obj = drm_gem_object_lookup(client->file, dumb_args.handle);
        if (!obj)  {
                ret = -ENOENT;
                goto err_delete;
        }
 
+       buffer->pitch = dumb_args.pitch;
        buffer->gem = obj;
+       *handle = dumb_args.handle;
 
        return buffer;
 
@@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
 }
 
 static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
-                                  u32 width, u32 height, u32 format)
+                                  u32 width, u32 height, u32 format,
+                                  u32 handle)
 {
        struct drm_client_dev *client = buffer->client;
        struct drm_mode_fb_cmd fb_req = { };
@@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
        fb_req.depth = info->depth;
        fb_req.width = width;
        fb_req.height = height;
-       fb_req.handle = buffer->handle;
+       fb_req.handle = handle;
        fb_req.pitch = buffer->pitch;
 
        ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@@ -414,13 +410,24 @@ struct drm_client_buffer *
 drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
 {
        struct drm_client_buffer *buffer;
+       u32 handle;
        int ret;
 
-       buffer = drm_client_buffer_create(client, width, height, format);
+       buffer = drm_client_buffer_create(client, width, height, format,
+                                         &handle);
        if (IS_ERR(buffer))
                return buffer;
 
-       ret = drm_client_buffer_addfb(buffer, width, height, format);
+       ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+
+       /*
+        * The handle is only needed for creating the framebuffer, destroy it
+        * again to solve a circular dependency should anybody export the GEM
+        * object as DMA-buf. The framebuffer and our buffer structure are still
+        * holding references to the GEM object to prevent its destruction.
+        */
+       drm_mode_destroy_dumb(client->dev, handle, client->file);
+
        if (ret) {
                drm_client_buffer_delete(buffer);
                return ERR_PTR(ret);
index 572a4e3..a491e6c 100644 (file)
@@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
                                          dvo_port);
 }
 
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+       switch (dvo_port) {
+       case DVO_PORT_MIPIA:
+               return PORT_A;
+       case DVO_PORT_MIPIC:
+               if (DISPLAY_VER(i915) >= 11)
+                       return PORT_B;
+               else
+                       return PORT_C;
+       default:
+               return PORT_NONE;
+       }
+}
+
 static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
 {
        switch (vbt_max_link_rate) {
@@ -3414,19 +3430,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
 
                dvo_port = child->dvo_port;
 
-               if (dvo_port == DVO_PORT_MIPIA ||
-                   (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
-                   (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
-                       if (port)
-                               *port = dvo_port - DVO_PORT_MIPIA;
-                       return true;
-               } else if (dvo_port == DVO_PORT_MIPIB ||
-                          dvo_port == DVO_PORT_MIPIC ||
-                          dvo_port == DVO_PORT_MIPID) {
+               if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
                        drm_dbg_kms(&i915->drm,
                                    "VBT has unsupported DSI port %c\n",
                                    port_name(dvo_port - DVO_PORT_MIPIA));
+                       continue;
                }
+
+               if (port)
+                       *port = dsi_dvo_port_to_port(i915, dvo_port);
+               return true;
        }
 
        return false;
@@ -3511,7 +3524,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
                if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
                        continue;
 
-               if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+               if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
                        if (!devdata->dsc)
                                return false;
 
index 5575d7a..f76c06b 100644 (file)
@@ -328,8 +328,20 @@ out_unlock:
        return ret;
 }
 
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+       if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+               return 0;
+
+       if (helper->fb->funcs->dirty)
+               return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+       return 0;
+}
+
 static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
        .fb_probe = intelfb_create,
+       .fb_dirty = intelfb_dirty,
 };
 
 static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
index e0766d1..1155464 100644 (file)
@@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
                                skl_check_wm_level(&wm->wm[level], ddb);
 
                        if (icl_need_wm1_wa(i915, plane_id) &&
-                           level == 1 && wm->wm[0].enable) {
+                           level == 1 && !wm->wm[level].enable &&
+                           wm->wm[0].enable) {
                                wm->wm[level].blocks = wm->wm[0].blocks;
                                wm->wm[level].lines = wm->wm[0].lines;
                                wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
index f266b68..0f2e056 100644 (file)
@@ -3483,6 +3483,13 @@ err_request:
                                   eb.composite_fence :
                                   &eb.requests[0]->fence);
 
+       if (unlikely(eb.gem_context->syncobj)) {
+               drm_syncobj_replace_fence(eb.gem_context->syncobj,
+                                         eb.composite_fence ?
+                                         eb.composite_fence :
+                                         &eb.requests[0]->fence);
+       }
+
        if (out_fence) {
                if (err == 0) {
                        fd_install(out_fence_fd, out_fence->file);
@@ -3494,13 +3501,6 @@ err_request:
                }
        }
 
-       if (unlikely(eb.gem_context->syncobj)) {
-               drm_syncobj_replace_fence(eb.gem_context->syncobj,
-                                         eb.composite_fence ?
-                                         eb.composite_fence :
-                                         &eb.requests[0]->fence);
-       }
-
        if (!out_fence && eb.composite_fence)
                dma_fence_put(eb.composite_fence);
 
index 9c759df..9377288 100644 (file)
@@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
        mapping_set_gfp_mask(mapping, mask);
        GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
-       i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+       i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
        obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
index 9f4a904..da45215 100644 (file)
@@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
        void __user *user_bo_handles = NULL;
        struct virtio_gpu_object_array *buflist = NULL;
        struct sync_file *sync_file;
-       int in_fence_fd = exbuf->fence_fd;
        int out_fence_fd = -1;
        void *buf;
        uint64_t fence_ctx;
@@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
                ring_idx = exbuf->ring_idx;
        }
 
-       exbuf->fence_fd = -1;
-
        virtio_gpu_create_context(dev, file);
        if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
                struct dma_fence *in_fence;
 
-               in_fence = sync_file_get_fence(in_fence_fd);
+               in_fence = sync_file_get_fence(exbuf->fence_fd);
 
                if (!in_fence)
                        return -EINVAL;
index 1fb0f71..c751d12 100644 (file)
@@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
        cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
        if (cl_data->num_hid_devices == 0)
                return -ENODEV;
+       cl_data->is_any_sensor_enabled = false;
 
        INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
        INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                status = amd_sfh_wait_for_response
                                (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
                if (status == SENSOR_ENABLED) {
+                       cl_data->is_any_sensor_enabled = true;
                        cl_data->sensor_sts[i] = SENSOR_ENABLED;
                        rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
                        if (rc) {
@@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                                        cl_data->sensor_sts[i]);
                                goto cleanup;
                        }
+               } else {
+                       cl_data->sensor_sts[i] = SENSOR_DISABLED;
+                       dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+                               cl_data->sensor_idx[i],
+                               get_sensor_name(cl_data->sensor_idx[i]),
+                               cl_data->sensor_sts[i]);
                }
                dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
                        cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
                        cl_data->sensor_sts[i]);
        }
-       if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
+       if (!cl_data->is_any_sensor_enabled ||
+          (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
                amd_sfh_hid_client_deinit(privdata);
                for (i = 0; i < cl_data->num_hid_devices; i++) {
                        devm_kfree(dev, cl_data->feature_report[i]);
                        devm_kfree(dev, in_data->input_report[i]);
                        devm_kfree(dev, cl_data->report_descr[i]);
                }
-               dev_warn(dev, "Failed to discover, sensors not enabled\n");
+               dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
                return -EOPNOTSUPP;
        }
        schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
index 3754fb4..5280368 100644 (file)
@@ -32,6 +32,7 @@ struct amd_input_data {
 struct amdtp_cl_data {
        u8 init_done;
        u32 cur_hid_dev;
+       bool is_any_sensor_enabled;
        u32 hid_dev_count;
        u32 num_hid_devices;
        struct device_info *hid_devices;
index 3e18035..5c72aef 100644 (file)
@@ -1202,6 +1202,7 @@ int hid_open_report(struct hid_device *device)
        __u8 *end;
        __u8 *next;
        int ret;
+       int i;
        static int (*dispatch_type[])(struct hid_parser *parser,
                                      struct hid_item *item) = {
                hid_parser_main,
@@ -1252,6 +1253,8 @@ int hid_open_report(struct hid_device *device)
                goto err;
        }
        device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+       for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
+               device->collection[i].parent_idx = -1;
 
        ret = -EINVAL;
        while ((next = fetch_item(start, end, &item)) != NULL) {
index e59e991..4fa45ee 100644 (file)
@@ -12,6 +12,7 @@
  *  Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
  *  Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
  *  Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
+ *  Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net>
  */
 
 /*
@@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        case USB_DEVICE_ID_ELECOM_M_DT1URBK:
        case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
        case USB_DEVICE_ID_ELECOM_M_HT1URBK:
-       case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
+       case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
                /*
                 * Report descriptor format:
                 * 12: button bit count
@@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 */
                mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
                break;
+       case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
+               /*
+                * Report descriptor format:
+                * 22: button bit count
+                * 30: padding bit count
+                * 24: button report size
+                * 16: button usage maximum
+                */
+               mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
+               break;
        }
        return rdesc;
 }
@@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index 0f8c118..9e36b4c 100644 (file)
 #define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100   0x29CF
 #define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV    0x2CF9
 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15       0x2817
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG  0x29DF
+#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
 #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN   0x2706
 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN   0x261A
 #define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
 #define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
 #define USB_DEVICE_ID_ELECOM_M_HT1URBK 0x010c
-#define USB_DEVICE_ID_ELECOM_M_HT1DRBK 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D    0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C    0x011c
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
index 9b59e43..77c8c49 100644 (file)
@@ -370,6 +370,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
+         HID_BATTERY_QUIRK_IGNORE },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
          HID_BATTERY_QUIRK_IGNORE },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@@ -384,6 +386,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
+         HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
index abf2c95..9c1ee8e 100644 (file)
@@ -3978,7 +3978,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
        }
 
        hidpp_initialize_battery(hidpp);
-       hidpp_initialize_hires_scroll(hidpp);
+       if (!hid_is_usb(hidpp->hid_dev))
+               hidpp_initialize_hires_scroll(hidpp);
 
        /* forward current battery state */
        if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
index be3ad02..5bc91f6 100644 (file)
@@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ELO)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
index 43b26bc..39357dc 100644 (file)
@@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
        if (umem_dmabuf->sgt)
                goto wait_fence;
 
-       sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
-                                             DMA_BIDIRECTIONAL);
+       sgt = dma_buf_map_attachment(umem_dmabuf->attach,
+                                    DMA_BIDIRECTIONAL);
        if (IS_ERR(sgt))
                return PTR_ERR(sgt);
 
@@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
                umem_dmabuf->last_sg_trim = 0;
        }
 
-       dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
-                                         DMA_BIDIRECTIONAL);
+       dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+                                DMA_BIDIRECTIONAL);
 
        umem_dmabuf->sgt = NULL;
 }
index f5f9269..7c5d487 100644 (file)
@@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
                addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
                if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
                                 sizeof(tinfo.tidcnt)))
-                       return -EFAULT;
+                       ret = -EFAULT;
 
                addr = arg + offsetof(struct hfi1_tid_info, length);
-               if (copy_to_user((void __user *)addr, &tinfo.length,
+               if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
                                 sizeof(tinfo.length)))
                        ret = -EFAULT;
+
+               if (ret)
+                       hfi1_user_exp_rcv_invalid(fd, &tinfo);
        }
 
        return ret;
index b02f2f0..350884d 100644 (file)
@@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
 {
        int pinned;
-       unsigned int npages;
+       unsigned int npages = tidbuf->npages;
        unsigned long vaddr = tidbuf->vaddr;
        struct page **pages = NULL;
        struct hfi1_devdata *dd = fd->uctxt->dd;
 
-       /* Get the number of pages the user buffer spans */
-       npages = num_user_pages(vaddr, tidbuf->length);
-       if (!npages)
-               return -EINVAL;
-
        if (npages > fd->uctxt->expected_count) {
                dd_dev_err(dd, "Expected buffer too big\n");
                return -EINVAL;
@@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
                return pinned;
        }
        tidbuf->pages = pages;
-       tidbuf->npages = npages;
        fd->tid_n_pinned += pinned;
        return pinned;
 }
@@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
        mutex_init(&tidbuf->cover_mutex);
        tidbuf->vaddr = tinfo->vaddr;
        tidbuf->length = tinfo->length;
+       tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
        tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
                                GFP_KERNEL);
        if (!tidbuf->psets) {
index 7b086fe..195aa9e 100644 (file)
@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
                        continue;
 
                idev = in_dev_get(ip_dev);
+               if (!idev)
+                       continue;
+
                in_dev_for_each_ifa_rtnl(ifa, idev) {
                        ibdev_dbg(&iwdev->ibdev,
                                  "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
index ea15ec7..54b6193 100644 (file)
@@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
        /* IB ports start with 1, MANA Ethernet ports start with 0 */
        port = ucmd.port;
-       if (ucmd.port > mc->num_ports)
+       if (port < 1 || port > mc->num_ports)
                return -EINVAL;
 
        if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
index c301b3b..a2857ac 100644 (file)
@@ -276,8 +276,8 @@ iter_chunk:
                                size = pa_end - pa_start + PAGE_SIZE;
                                usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
                                        va_start, &pa_start, size, flags);
-                               err = iommu_map(pd->domain, va_start, pa_start,
-                                                       size, flags);
+                               err = iommu_map_atomic(pd->domain, va_start,
+                                                      pa_start, size, flags);
                                if (err) {
                                        usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
                                                va_start, &pa_start, size, err);
@@ -293,8 +293,8 @@ iter_chunk:
                                size = pa - pa_start + PAGE_SIZE;
                                usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
                                        va_start, &pa_start, size, flags);
-                               err = iommu_map(pd->domain, va_start, pa_start,
-                                               size, flags);
+                               err = iommu_map_atomic(pd->domain, va_start,
+                                                      pa_start, size, flags);
                                if (err) {
                                        usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
                                                va_start, &pa_start, size, err);
index ac25fc8..f10d4bc 100644 (file)
@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
                rn->attach_mcast = ipoib_mcast_attach;
                rn->detach_mcast = ipoib_mcast_detach;
                rn->hca = hca;
+
+               rc = netif_set_real_num_tx_queues(dev, 1);
+               if (rc)
+                       goto out;
+
+               rc = netif_set_real_num_rx_queues(dev, 1);
+               if (rc)
+                       goto out;
        }
 
        priv->rn_ops = dev->netdev_ops;
index c76ba29..5adba0f 100644 (file)
@@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
 
        if (srv_path->kobj.state_in_sysfs) {
                sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
-               kobject_del(&srv_path->kobj);
                kobject_put(&srv_path->kobj);
+               rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
        }
 
-       rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
 }
index 4f9b4a1..5940945 100644 (file)
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
 
        d = debugfs_rename(bonding_debug_root, bond->debug_dir,
                           bonding_debug_root, bond->dev->name);
-       if (d) {
+       if (!IS_ERR(d)) {
                bond->debug_dir = d;
        } else {
                netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
index 908fa89..338f238 100644 (file)
@@ -1309,14 +1309,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
                if (!priv->ports[port].pvid)
                        mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
                                   MT7530_VLAN_ACC_TAGGED);
-       }
 
-       /* Set the port as a user port which is to be able to recognize VID
-        * from incoming packets before fetching entry within the VLAN table.
-        */
-       mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
-                  VLAN_ATTR(MT7530_VLAN_USER) |
-                  PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+               /* Set the port as a user port which is to be able to recognize
+                * VID from incoming packets before fetching entry within the
+                * VLAN table.
+                */
+               mt7530_rmw(priv, MT7530_PVC_P(port),
+                          VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+                          VLAN_ATTR(MT7530_VLAN_USER) |
+                          PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+       } else {
+               /* Also set CPU ports to the "user" VLAN port attribute, to
+                * allow VLAN classification, but keep the EG_TAG attribute as
+                * "consistent" (i.o.w. don't change its value) for packets
+                * received by the switch from the CPU, so that tagged packets
+                * are forwarded to user ports as tagged, and untagged as
+                * untagged.
+                */
+               mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+                          VLAN_ATTR(MT7530_VLAN_USER));
+       }
 }
 
 static void
index 72e4282..6cda315 100644 (file)
@@ -4627,25 +4627,26 @@ static int init_reset_optional(struct platform_device *pdev)
                if (ret)
                        return dev_err_probe(&pdev->dev, ret,
                                             "failed to init SGMII PHY\n");
-       }
 
-       ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
-       if (!ret) {
-               u32 pm_info[2];
+               ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+               if (!ret) {
+                       u32 pm_info[2];
+
+                       ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+                                                        pm_info, ARRAY_SIZE(pm_info));
+                       if (ret) {
+                               dev_err(&pdev->dev, "Failed to read power management information\n");
+                               goto err_out_phy_exit;
+                       }
+                       ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+                       if (ret)
+                               goto err_out_phy_exit;
 
-               ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
-                                                pm_info, ARRAY_SIZE(pm_info));
-               if (ret) {
-                       dev_err(&pdev->dev, "Failed to read power management information\n");
-                       goto err_out_phy_exit;
+                       ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+                       if (ret)
+                               goto err_out_phy_exit;
                }
-               ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
-               if (ret)
-                       goto err_out_phy_exit;
 
-               ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
-               if (ret)
-                       goto err_out_phy_exit;
        }
 
        /* Fully reset controller at hardware level if mapped in device tree */
index d02b55b..3e08847 100644 (file)
@@ -5524,7 +5524,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
  * returned by the firmware is a 16 bit * value, but is indexed
  * by [fls(speed) - 1]
  */
-static const u32 ice_aq_to_link_speed[15] = {
+static const u32 ice_aq_to_link_speed[] = {
        SPEED_10,       /* BIT(0) */
        SPEED_100,
        SPEED_1000,
@@ -5536,10 +5536,6 @@ static const u32 ice_aq_to_link_speed[15] = {
        SPEED_40000,
        SPEED_50000,
        SPEED_100000,   /* BIT(10) */
-       0,
-       0,
-       0,
-       0               /* BIT(14) */
 };
 
 /**
@@ -5550,5 +5546,8 @@ static const u32 ice_aq_to_link_speed[15] = {
  */
 u32 ice_get_link_speed(u16 index)
 {
+       if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
+               return 0;
+
        return ice_aq_to_link_speed[index];
 }
index 5f86e41..b288a01 100644 (file)
@@ -5541,7 +5541,7 @@ static int __init ice_module_init(void)
        pr_info("%s\n", ice_driver_string);
        pr_info("%s\n", ice_copyright);
 
-       ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+       ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
        if (!ice_wq) {
                pr_err("Failed to create workqueue\n");
                return -ENOMEM;
index 9b762f7..61f844d 100644 (file)
@@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
         */
        status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
        if (status)
-               goto err_free_lkup_exts;
+               goto err_unroll;
 
        /* Group match words into recipes using preferred recipe grouping
         * criteria.
index faba0f8..95f392a 100644 (file)
@@ -1681,7 +1681,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
        struct ice_vsi *ch_vsi = NULL;
        u16 queue = act->rx_queue;
 
-       if (queue > vsi->num_rxq) {
+       if (queue >= vsi->num_rxq) {
                NL_SET_ERR_MSG_MOD(fltr->extack,
                                   "Unable to add filter because specified queue is invalid");
                return -EINVAL;
index d4a4001..f56fa94 100644 (file)
@@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
        return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
 }
 
-static const u32 ice_legacy_aq_to_vc_speed[15] = {
+static const u32 ice_legacy_aq_to_vc_speed[] = {
        VIRTCHNL_LINK_SPEED_100MB,      /* BIT(0) */
        VIRTCHNL_LINK_SPEED_100MB,
        VIRTCHNL_LINK_SPEED_1GB,
@@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
        VIRTCHNL_LINK_SPEED_40GB,
        VIRTCHNL_LINK_SPEED_40GB,
        VIRTCHNL_LINK_SPEED_40GB,
-       VIRTCHNL_LINK_SPEED_UNKNOWN,
-       VIRTCHNL_LINK_SPEED_UNKNOWN,
-       VIRTCHNL_LINK_SPEED_UNKNOWN,
-       VIRTCHNL_LINK_SPEED_UNKNOWN     /* BIT(14) */
 };
 
 /**
@@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
  */
 u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
 {
-       u32 speed;
+       /* convert a BIT() value into an array index */
+       u32 index = fls(link_speed) - 1;
 
-       if (adv_link_support) {
-               /* convert a BIT() value into an array index */
-               speed = ice_get_link_speed(fls(link_speed) - 1);
-       } else {
+       if (adv_link_support)
+               return ice_get_link_speed(index);
+       else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
                /* Virtchnl speeds are not defined for every speed supported in
                 * the hardware. To maintain compatibility with older AVF
                 * drivers, while reporting the speed the new speed values are
                 * resolved to the closest known virtchnl speeds
                 */
-               speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
-       }
+               return ice_legacy_aq_to_vc_speed[index];
 
-       return speed;
+       return VIRTCHNL_LINK_SPEED_UNKNOWN;
 }
 
 /* The mailbox overflow detection algorithm helps to check if there
index 5ecc0ee..b1ffb81 100644 (file)
@@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
 
                /* outer VLAN ops regardless of port VLAN config */
                vlan_ops->add_vlan = ice_vsi_add_vlan;
-               vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
                vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
                vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
 
                if (ice_vf_is_port_vlan_ena(vf)) {
                        /* setup outer VLAN ops */
                        vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+                       /* all Rx traffic should be in the domain of the
+                        * assigned port VLAN, so prevent disabling Rx VLAN
+                        * filtering
+                        */
+                       vlan_ops->dis_rx_filtering = noop_vlan;
                        vlan_ops->ena_rx_filtering =
                                ice_vsi_ena_rx_vlan_filtering;
 
@@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
                        vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
                        vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
                } else {
+                       vlan_ops->dis_rx_filtering =
+                               ice_vsi_dis_rx_vlan_filtering;
+
                        if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
                                vlan_ops->ena_rx_filtering = noop_vlan;
                        else
@@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
                        vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
                        vlan_ops->ena_rx_filtering =
                                ice_vsi_ena_rx_vlan_filtering;
+                       /* all Rx traffic should be in the domain of the
+                        * assigned port VLAN, so prevent disabling Rx VLAN
+                        * filtering
+                        */
+                       vlan_ops->dis_rx_filtering = noop_vlan;
                } else {
+                       vlan_ops->dis_rx_filtering =
+                               ice_vsi_dis_rx_vlan_filtering;
                        if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
                                vlan_ops->ena_rx_filtering = noop_vlan;
                        else
index 44b1740..1dd2a7f 100644 (file)
@@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
                if (tx_buffer->next_to_watch &&
                    time_after(jiffies, tx_buffer->time_stamp +
                    (adapter->tx_timeout_factor * HZ)) &&
-                   !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+                   !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
+                   (rd32(IGC_TDH(tx_ring->reg_idx)) !=
+                    readl(tx_ring->tail))) {
                        /* detected Tx unit hang */
                        netdev_err(tx_ring->netdev,
                                   "Detected Tx Unit Hang\n"
@@ -5069,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
 }
 
 /**
+ * igc_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void igc_tx_timeout(struct net_device *netdev,
+                          unsigned int __always_unused txqueue)
+{
+       struct igc_adapter *adapter = netdev_priv(netdev);
+       struct igc_hw *hw = &adapter->hw;
+
+       /* Do the reset outside of interrupt context */
+       adapter->tx_timeout_count++;
+       schedule_work(&adapter->reset_task);
+       wr32(IGC_EICS,
+            (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+/**
  * igc_get_stats64 - Get System Network Statistics
  * @netdev: network interface device structure
  * @stats: rtnl_link_stats64 pointer
@@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
                        case SPEED_100:
                        case SPEED_1000:
                        case SPEED_2500:
-                               adapter->tx_timeout_factor = 7;
+                               adapter->tx_timeout_factor = 1;
                                break;
                        }
 
@@ -6320,6 +6340,7 @@ static const struct net_device_ops igc_netdev_ops = {
        .ndo_set_rx_mode        = igc_set_rx_mode,
        .ndo_set_mac_address    = igc_set_mac,
        .ndo_change_mtu         = igc_change_mtu,
+       .ndo_tx_timeout         = igc_tx_timeout,
        .ndo_get_stats64        = igc_get_stats64,
        .ndo_fix_features       = igc_fix_features,
        .ndo_set_features       = igc_set_features,
index d56eda6..e312372 100644 (file)
@@ -1570,8 +1570,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
        if (IS_ERR(pp))
                return pp;
 
-       err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
-                                id, PAGE_SIZE);
+       err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+                                eth->rx_napi.napi_id, PAGE_SIZE);
        if (err < 0)
                goto err_free_pp;
 
@@ -1870,7 +1870,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
 
        while (done < budget) {
                unsigned int pktlen, *rxdcsum;
+               bool has_hwaccel_tag = false;
                struct net_device *netdev;
+               u16 vlan_proto, vlan_tci;
                dma_addr_t dma_addr;
                u32 hash, reason;
                int mac = 0;
@@ -2010,27 +2012,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
 
                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
-                               if (trxd.rxd3 & RX_DMA_VTAG_V2)
-                                       __vlan_hwaccel_put_tag(skb,
-                                               htons(RX_DMA_VPID(trxd.rxd4)),
-                                               RX_DMA_VID(trxd.rxd4));
+                               if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+                                       vlan_proto = RX_DMA_VPID(trxd.rxd4);
+                                       vlan_tci = RX_DMA_VID(trxd.rxd4);
+                                       has_hwaccel_tag = true;
+                               }
                        } else if (trxd.rxd2 & RX_DMA_VTAG) {
-                               __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
-                                                      RX_DMA_VID(trxd.rxd3));
+                               vlan_proto = RX_DMA_VPID(trxd.rxd3);
+                               vlan_tci = RX_DMA_VID(trxd.rxd3);
+                               has_hwaccel_tag = true;
                        }
                }
 
                /* When using VLAN untagging in combination with DSA, the
                 * hardware treats the MTK special tag as a VLAN and untags it.
                 */
-               if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
-                       unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
+               if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
+                       unsigned int port = vlan_proto & GENMASK(2, 0);
 
                        if (port < ARRAY_SIZE(eth->dsa_meta) &&
                            eth->dsa_meta[port])
                                skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
-
-                       __vlan_hwaccel_clear_tag(skb);
+               } else if (has_hwaccel_tag) {
+                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
                }
 
                skb_record_rx_queue(skb, 0);
@@ -3111,7 +3115,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
 
                val |= config;
 
-               if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+               if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
                        val |= MTK_GDMA_SPECIAL_TAG;
 
                mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@@ -3177,8 +3181,7 @@ static int mtk_open(struct net_device *dev)
        struct mtk_eth *eth = mac->hw;
        int i, err;
 
-       if ((mtk_uses_dsa(dev) && !eth->prog) &&
-           !(mac->id == 1 && MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_TRGMII))) {
+       if (mtk_uses_dsa(dev) && !eth->prog) {
                for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
                        struct metadata_dst *md_dst = eth->dsa_meta[i];
 
@@ -3195,8 +3198,7 @@ static int mtk_open(struct net_device *dev)
                }
        } else {
                /* Hardware special tag parsing needs to be disabled if at least
-                * one MAC does not use DSA, or the second MAC of the MT7621 and
-                * MT7623 SoCs is being used.
+                * one MAC does not use DSA.
                 */
                u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
                val &= ~MTK_CDMP_STAG_EN;
index 3e232a6..bb95b40 100644 (file)
@@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
        pages = dev->priv.dbg.pages_debugfs;
 
        debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
-       debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
-       debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+       debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+       debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
+       debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
        debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
        debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
        debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
index 2183138..5b05b88 100644 (file)
@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
                        MLX5_GET(mtrc_cap, out, num_string_trace);
        tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
        tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+       tracer->str_db.loaded = false;
 
        for (i = 0; i < tracer->str_db.num_string_db; i++) {
                mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
        if (err)
                mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
 
+       tracer->buff.consumer_index = 0;
        return err;
 }
 
@@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
        mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
        if (tracer->owner) {
                tracer->owner = false;
-               tracer->buff.consumer_index = 0;
                return;
        }
 
index 464eb3a..cdc87ec 100644 (file)
@@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
 
        mlx5_host_pf_cleanup(dev);
 
-       err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+       err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
        if (err)
                mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
 }
index 8099a21..ce85b48 100644 (file)
@@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
 
        switch (event) {
        case SWITCHDEV_FDB_ADD_TO_BRIDGE:
-               /* only handle the event on native eswtich of representor */
-               if (!mlx5_esw_bridge_is_local(dev, rep, esw))
-                       break;
-
                fdb_info = container_of(info,
                                        struct switchdev_notifier_fdb_info,
                                        info);
index 1892ccb..7cd36f4 100644 (file)
@@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
 
 void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
 {
-       if (fs->vlan->cvlan_filter_disabled)
+       if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
                return;
 
        fs->vlan->cvlan_filter_disabled = true;
index abcc614..6c24f33 100644 (file)
@@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
        rq->ix           = c->ix;
        rq->channel      = c;
        rq->mdev         = mdev;
-       rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+       rq->hw_mtu =
+               MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
        rq->xdpsq        = &c->rq_xdpsq;
        rq->stats        = &c->priv->channel_stats[c->ix]->rq;
        rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
        return mlx5e_rq_to_ready(rq, curr_state);
 }
 
-static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
-{
-       struct mlx5_core_dev *mdev = rq->mdev;
-
-       void *in;
-       void *rqc;
-       int inlen;
-       int err;
-
-       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
-       MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
-       MLX5_SET64(modify_rq_in, in, modify_bitmask,
-                  MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
-       MLX5_SET(rqc, rqc, scatter_fcs, enable);
-       MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-
-       err = mlx5_core_modify_rq(mdev, rq->rqn, in);
-
-       kvfree(in);
-
-       return err;
-}
-
 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
 {
        struct mlx5_core_dev *mdev = rq->mdev;
@@ -3314,20 +3286,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
        mlx5e_destroy_tises(priv);
 }
 
-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
-{
-       int err = 0;
-       int i;
-
-       for (i = 0; i < chs->num; i++) {
-               err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
 {
        int err;
@@ -3903,41 +3861,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
        return mlx5_set_ports_check(mdev, in, sizeof(in));
 }
 
+static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       bool enable = *(bool *)ctx;
+
+       return mlx5e_set_rx_port_ts(mdev, enable);
+}
+
 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_channels *chs = &priv->channels;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_params new_params;
        int err;
 
        mutex_lock(&priv->state_lock);
 
-       if (enable) {
-               err = mlx5e_set_rx_port_ts(mdev, false);
-               if (err)
-                       goto out;
-
-               chs->params.scatter_fcs_en = true;
-               err = mlx5e_modify_channels_scatter_fcs(chs, true);
-               if (err) {
-                       chs->params.scatter_fcs_en = false;
-                       mlx5e_set_rx_port_ts(mdev, true);
-               }
-       } else {
-               chs->params.scatter_fcs_en = false;
-               err = mlx5e_modify_channels_scatter_fcs(chs, false);
-               if (err) {
-                       chs->params.scatter_fcs_en = true;
-                       goto out;
-               }
-               err = mlx5e_set_rx_port_ts(mdev, true);
-               if (err) {
-                       mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
-                       err = 0;
-               }
-       }
-
-out:
+       new_params = chs->params;
+       new_params.scatter_fcs_en = enable;
+       err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+                                      &new_params.scatter_fcs_en, true);
        mutex_unlock(&priv->state_lock);
        return err;
 }
@@ -4074,6 +4018,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
        if (netdev->features & NETIF_F_GRO_HW)
                netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
 
+       features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+       if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+
        return features;
 }
 
index b176648..3cdcb0e 100644 (file)
@@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
        struct mlx5_esw_bridge *bridge;
 
        port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
-       if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
+       if (!port)
                return;
 
        bridge = port->bridge;
index eff92dc..e09518f 100644 (file)
@@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
        }
 }
 
-static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
+static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
 {
        int rate, width;
 
        rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
        if (rate < 0)
-               return -EINVAL;
+               return SPEED_UNKNOWN;
        width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
        if (width < 0)
-               return -EINVAL;
+               return SPEED_UNKNOWN;
 
        return rate * width;
 }
@@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
        ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
        speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
-       if (speed < 0)
-               return -EINVAL;
+       link_ksettings->base.speed = speed;
+       link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
 
-       link_ksettings->base.duplex = DUPLEX_FULL;
        link_ksettings->base.port = PORT_OTHER;
 
        link_ksettings->base.autoneg = AUTONEG_DISABLE;
 
-       link_ksettings->base.speed = speed;
-
        return 0;
 }
 
index 3d5f2a4..4e1b575 100644 (file)
@@ -2110,7 +2110,7 @@ static int __init mlx5_init(void)
        mlx5_core_verify_params();
        mlx5_register_debugfs();
 
-       err = pci_register_driver(&mlx5_core_driver);
+       err = mlx5e_init();
        if (err)
                goto err_debug;
 
@@ -2118,16 +2118,16 @@ static int __init mlx5_init(void)
        if (err)
                goto err_sf;
 
-       err = mlx5e_init();
+       err = pci_register_driver(&mlx5_core_driver);
        if (err)
-               goto err_en;
+               goto err_pci;
 
        return 0;
 
-err_en:
+err_pci:
        mlx5_sf_driver_unregister();
 err_sf:
-       pci_unregister_driver(&mlx5_core_driver);
+       mlx5e_cleanup();
 err_debug:
        mlx5_unregister_debugfs();
        return err;
@@ -2135,9 +2135,9 @@ err_debug:
 
 static void __exit mlx5_cleanup(void)
 {
-       mlx5e_cleanup();
-       mlx5_sf_driver_unregister();
        pci_unregister_driver(&mlx5_core_driver);
+       mlx5_sf_driver_unregister();
+       mlx5e_cleanup();
        mlx5_unregister_debugfs();
 }
 
index 6059635..0eb50be 100644 (file)
@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
        return (u32)func_id | (ec_function << 16);
 }
 
+static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
+{
+       if (!func_id)
+               return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
+
+       return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
+}
+
 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
 {
        struct rb_root *root;
@@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
        u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
        int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
        int notify_fail = event;
+       u16 func_type;
        u64 addr;
        int err;
        u32 *in;
@@ -383,11 +392,9 @@ retry:
                goto out_dropped;
        }
 
+       func_type = func_id_to_type(dev, func_id, ec_function);
+       dev->priv.page_counters[func_type] += npages;
        dev->priv.fw_pages += npages;
-       if (func_id)
-               dev->priv.vfs_pages += npages;
-       else if (mlx5_core_is_ecpf(dev) && !ec_function)
-               dev->priv.host_pf_pages += npages;
 
        mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
                      npages, ec_function, func_id, err);
@@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
        struct rb_root *root;
        struct rb_node *p;
        int npages = 0;
+       u16 func_type;
 
        root = xa_load(&dev->priv.page_root_xa, function);
        if (WARN_ON_ONCE(!root))
@@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
                free_fwp(dev, fwp, fwp->free_count);
        }
 
+       func_type = func_id_to_type(dev, func_id, ec_function);
+       dev->priv.page_counters[func_type] -= npages;
        dev->priv.fw_pages -= npages;
-       if (func_id)
-               dev->priv.vfs_pages -= npages;
-       else if (mlx5_core_is_ecpf(dev) && !ec_function)
-               dev->priv.host_pf_pages -= npages;
 
        mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
                      npages, ec_function, func_id);
@@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
        int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
        u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
        int num_claimed;
+       u16 func_type;
        u32 *out;
        int err;
        int i;
@@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
        if (nclaimed)
                *nclaimed = num_claimed;
 
+       func_type = func_id_to_type(dev, func_id, ec_function);
+       dev->priv.page_counters[func_type] -= num_claimed;
        dev->priv.fw_pages -= num_claimed;
-       if (func_id)
-               dev->priv.vfs_pages -= num_claimed;
-       else if (mlx5_core_is_ecpf(dev) && !ec_function)
-               dev->priv.host_pf_pages -= num_claimed;
 
 out_free:
        kvfree(out);
@@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
        WARN(dev->priv.fw_pages,
             "FW pages counter is %d after reclaiming all pages\n",
             dev->priv.fw_pages);
-       WARN(dev->priv.vfs_pages,
+       WARN(dev->priv.page_counters[MLX5_VF],
             "VFs FW pages counter is %d after reclaiming all pages\n",
-            dev->priv.vfs_pages);
-       WARN(dev->priv.host_pf_pages,
+            dev->priv.page_counters[MLX5_VF]);
+       WARN(dev->priv.page_counters[MLX5_HOST_PF],
             "External host PF FW pages counter is %d after reclaiming all pages\n",
-            dev->priv.host_pf_pages);
+            dev->priv.page_counters[MLX5_HOST_PF]);
 
        return 0;
 }
index c0e6c48..3008e9c 100644 (file)
@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
 
        mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
 
-       if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
+       if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
 }
 
index b851141..042ca03 100644 (file)
@@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
                         rule->flow_source))
                return 0;
 
+       mlx5dr_domain_nic_lock(nic_dmn);
+
        ret = mlx5dr_matcher_select_builders(matcher,
                                             nic_matcher,
                                             dr_rule_get_ipv(&param->outer),
                                             dr_rule_get_ipv(&param->inner));
        if (ret)
-               return ret;
+               goto err_unlock;
 
        hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
        if (likely(hw_ste_arr_is_opt)) {
@@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
                hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
                                     DR_STE_SIZE, GFP_KERNEL);
 
-               if (!hw_ste_arr)
-                       return -ENOMEM;
+               if (!hw_ste_arr) {
+                       ret = -ENOMEM;
+                       goto err_unlock;
+               }
        }
 
-       mlx5dr_domain_nic_lock(nic_dmn);
-
        ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
        if (ret)
                goto free_hw_ste;
@@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
 
        mlx5dr_domain_nic_unlock(nic_dmn);
 
-       goto out;
+       if (unlikely(!hw_ste_arr_is_opt))
+               kfree(hw_ste_arr);
+
+       return 0;
 
 free_rule:
        dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
                mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
 
 free_hw_ste:
-       mlx5dr_domain_nic_unlock(nic_dmn);
-
-out:
-       if (unlikely(!hw_ste_arr_is_opt))
+       if (!hw_ste_arr_is_opt)
                kfree(hw_ste_arr);
 
+err_unlock:
+       mlx5dr_domain_nic_unlock(nic_dmn);
+
        return ret;
 }
 
index 0ed1ea7..69e7663 100644 (file)
@@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
        /* Enable master counters */
        spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
 
-       for (i = 0; i < sparx5->port_count; i++) {
+       for (i = 0; i < SPX5_PORTS; i++) {
                port = sparx5->ports[i];
                if (!port)
                        continue;
@@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
        struct sparx5_port *port;
        int i;
 
-       for (i = 0; i < sparx5->port_count; i++) {
+       for (i = 0; i < SPX5_PORTS; i++) {
                port = sparx5->ports[i];
                if (!port)
                        continue;
index b144f22..f9b8f37 100644 (file)
@@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
        unsigned int max_queues_per_port = num_online_cpus();
        struct gdma_context *gc = pci_get_drvdata(pdev);
        struct gdma_irq_context *gic;
-       unsigned int max_irqs;
-       u16 *cpus;
-       cpumask_var_t req_mask;
+       unsigned int max_irqs, cpu;
        int nvec, irq;
        int err, i = 0, j;
 
@@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
                goto free_irq_vector;
        }
 
-       if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
-               err = -ENOMEM;
-               goto free_irq;
-       }
-
-       cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
-       if (!cpus) {
-               err = -ENOMEM;
-               goto free_mask;
-       }
-       for (i = 0; i < nvec; i++)
-               cpus[i] = cpumask_local_spread(i, gc->numa_node);
-
        for (i = 0; i < nvec; i++) {
-               cpumask_set_cpu(cpus[i], req_mask);
                gic = &gc->irq_contexts[i];
                gic->handler = NULL;
                gic->arg = NULL;
@@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
                irq = pci_irq_vector(pdev, i);
                if (irq < 0) {
                        err = irq;
-                       goto free_mask;
+                       goto free_irq;
                }
 
                err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
                if (err)
-                       goto free_mask;
-               irq_set_affinity_and_hint(irq, req_mask);
-               cpumask_clear(req_mask);
+                       goto free_irq;
+
+               cpu = cpumask_local_spread(i, gc->numa_node);
+               irq_set_affinity_and_hint(irq, cpumask_of(cpu));
        }
-       free_cpumask_var(req_mask);
-       kfree(cpus);
 
        err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
        if (err)
@@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
 
        return 0;
 
-free_mask:
-       free_cpumask_var(req_mask);
-       kfree(cpus);
 free_irq:
        for (j = i - 1; j >= 0; j--) {
                irq = pci_irq_vector(pdev, j);
                gic = &gc->irq_contexts[j];
+
+               irq_update_affinity_hint(irq, NULL);
                free_irq(irq, gic);
        }
 
@@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
                        continue;
 
                gic = &gc->irq_contexts[i];
+
+               /* Need to clear the hint before free_irq */
+               irq_update_affinity_hint(irq, NULL);
                free_irq(irq, gic);
        }
 
index 7c0897e..ee05240 100644 (file)
@@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
                flow_rule_match_control(rule, &match);
        }
 
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+               struct flow_match_vlan match;
+
+               flow_rule_match_vlan(rule, &match);
+               filter->key_type = OCELOT_VCAP_KEY_ANY;
+               filter->vlan.vid.value = match.key->vlan_id;
+               filter->vlan.vid.mask = match.mask->vlan_id;
+               filter->vlan.pcp.value[0] = match.key->vlan_priority;
+               filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+               match_protocol = false;
+       }
+
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
                struct flow_match_eth_addrs match;
 
@@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
                match_protocol = false;
        }
 
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
-               struct flow_match_vlan match;
-
-               flow_rule_match_vlan(rule, &match);
-               filter->key_type = OCELOT_VCAP_KEY_ANY;
-               filter->vlan.vid.value = match.key->vlan_id;
-               filter->vlan.vid.mask = match.mask->vlan_id;
-               filter->vlan.pcp.value[0] = match.key->vlan_priority;
-               filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
-               match_protocol = false;
-       }
-
 finished_key_parsing:
        if (match_protocol && proto != ETH_P_ALL) {
                if (filter->block_id == VCAP_ES0) {
index 1a82f10..2180ae9 100644 (file)
@@ -335,8 +335,8 @@ static void
 ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
 {
        trap->key_type = OCELOT_VCAP_KEY_IPV6;
-       trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
-       trap->key.ipv4.proto.mask[0] = 0xff;
+       trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+       trap->key.ipv6.proto.mask[0] = 0xff;
        trap->key.ipv6.dport.value = PTP_EV_PORT;
        trap->key.ipv6.dport.mask = 0xffff;
 }
@@ -355,8 +355,8 @@ static void
 ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
 {
        trap->key_type = OCELOT_VCAP_KEY_IPV6;
-       trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
-       trap->key.ipv4.proto.mask[0] = 0xff;
+       trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+       trap->key.ipv6.proto.mask[0] = 0xff;
        trap->key.ipv6.dport.value = PTP_GEN_PORT;
        trap->key.ipv6.dport.mask = 0xffff;
 }
index a4a89ef..cc97b3d 100644 (file)
@@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
        }
 }
 
-static const u16 nfp_eth_media_table[] = {
-       [NFP_MEDIA_1000BASE_CX]         = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
-       [NFP_MEDIA_1000BASE_KX]         = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
-       [NFP_MEDIA_10GBASE_KX4]         = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
-       [NFP_MEDIA_10GBASE_KR]          = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
-       [NFP_MEDIA_10GBASE_CX4]         = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
-       [NFP_MEDIA_10GBASE_CR]          = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
-       [NFP_MEDIA_10GBASE_SR]          = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
-       [NFP_MEDIA_10GBASE_ER]          = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
-       [NFP_MEDIA_25GBASE_KR]          = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
-       [NFP_MEDIA_25GBASE_KR_S]        = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
-       [NFP_MEDIA_25GBASE_CR]          = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
-       [NFP_MEDIA_25GBASE_CR_S]        = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
-       [NFP_MEDIA_25GBASE_SR]          = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
-       [NFP_MEDIA_40GBASE_CR4]         = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
-       [NFP_MEDIA_40GBASE_KR4]         = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
-       [NFP_MEDIA_40GBASE_SR4]         = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
-       [NFP_MEDIA_40GBASE_LR4]         = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
-       [NFP_MEDIA_50GBASE_KR]          = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
-       [NFP_MEDIA_50GBASE_SR]          = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
-       [NFP_MEDIA_50GBASE_CR]          = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
-       [NFP_MEDIA_50GBASE_LR]          = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
-       [NFP_MEDIA_50GBASE_ER]          = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
-       [NFP_MEDIA_50GBASE_FR]          = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
-       [NFP_MEDIA_100GBASE_KR4]        = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
-       [NFP_MEDIA_100GBASE_SR4]        = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
-       [NFP_MEDIA_100GBASE_CR4]        = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
-       [NFP_MEDIA_100GBASE_KP4]        = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
-       [NFP_MEDIA_100GBASE_CR10]       = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+static const struct nfp_eth_media_link_mode {
+       u16 ethtool_link_mode;
+       u16 speed;
+} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
+       [NFP_MEDIA_1000BASE_CX] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+               .speed                  = NFP_SPEED_1G,
+       },
+       [NFP_MEDIA_1000BASE_KX] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+               .speed                  = NFP_SPEED_1G,
+       },
+       [NFP_MEDIA_10GBASE_KX4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+               .speed                  = NFP_SPEED_10G,
+       },
+       [NFP_MEDIA_10GBASE_KR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+               .speed                  = NFP_SPEED_10G,
+       },
+       [NFP_MEDIA_10GBASE_CX4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+               .speed                  = NFP_SPEED_10G,
+       },
+       [NFP_MEDIA_10GBASE_CR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+               .speed                  = NFP_SPEED_10G,
+       },
+       [NFP_MEDIA_10GBASE_SR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+               .speed                  = NFP_SPEED_10G,
+       },
+       [NFP_MEDIA_10GBASE_ER] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+               .speed                  = NFP_SPEED_10G,
+       },
+       [NFP_MEDIA_25GBASE_KR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+               .speed                  = NFP_SPEED_25G,
+       },
+       [NFP_MEDIA_25GBASE_KR_S] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+               .speed                  = NFP_SPEED_25G,
+       },
+       [NFP_MEDIA_25GBASE_CR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+               .speed                  = NFP_SPEED_25G,
+       },
+       [NFP_MEDIA_25GBASE_CR_S] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+               .speed                  = NFP_SPEED_25G,
+       },
+       [NFP_MEDIA_25GBASE_SR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+               .speed                  = NFP_SPEED_25G,
+       },
+       [NFP_MEDIA_40GBASE_CR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+               .speed                  = NFP_SPEED_40G,
+       },
+       [NFP_MEDIA_40GBASE_KR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+               .speed                  = NFP_SPEED_40G,
+       },
+       [NFP_MEDIA_40GBASE_SR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+               .speed                  = NFP_SPEED_40G,
+       },
+       [NFP_MEDIA_40GBASE_LR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+               .speed                  = NFP_SPEED_40G,
+       },
+       [NFP_MEDIA_50GBASE_KR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+               .speed                  = NFP_SPEED_50G,
+       },
+       [NFP_MEDIA_50GBASE_SR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+               .speed                  = NFP_SPEED_50G,
+       },
+       [NFP_MEDIA_50GBASE_CR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+               .speed                  = NFP_SPEED_50G,
+       },
+       [NFP_MEDIA_50GBASE_LR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+               .speed                  = NFP_SPEED_50G,
+       },
+       [NFP_MEDIA_50GBASE_ER] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+               .speed                  = NFP_SPEED_50G,
+       },
+       [NFP_MEDIA_50GBASE_FR] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+               .speed                  = NFP_SPEED_50G,
+       },
+       [NFP_MEDIA_100GBASE_KR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+               .speed                  = NFP_SPEED_100G,
+       },
+       [NFP_MEDIA_100GBASE_SR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+               .speed                  = NFP_SPEED_100G,
+       },
+       [NFP_MEDIA_100GBASE_CR4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+               .speed                  = NFP_SPEED_100G,
+       },
+       [NFP_MEDIA_100GBASE_KP4] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+               .speed                  = NFP_SPEED_100G,
+       },
+       [NFP_MEDIA_100GBASE_CR10] = {
+               .ethtool_link_mode      = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+               .speed                  = NFP_SPEED_100G,
+       },
+};
+
+static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
+       [NFP_SPEED_1G]          = SPEED_1000,
+       [NFP_SPEED_10G]         = SPEED_10000,
+       [NFP_SPEED_25G]         = SPEED_25000,
+       [NFP_SPEED_40G]         = SPEED_40000,
+       [NFP_SPEED_50G]         = SPEED_50000,
+       [NFP_SPEED_100G]        = SPEED_100000,
 };
 
 static void nfp_add_media_link_mode(struct nfp_port *port,
@@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
        };
        struct nfp_cpp *cpp = port->app->cpp;
 
-       if (nfp_eth_read_media(cpp, &ethm))
+       if (nfp_eth_read_media(cpp, &ethm)) {
+               bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
                return;
+       }
+
+       bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
 
        for (u32 i = 0; i < 2; i++) {
                supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
@@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
 
        for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
                if (i < 64) {
-                       if (supported_modes[0] & BIT_ULL(i))
-                               __set_bit(nfp_eth_media_table[i],
+                       if (supported_modes[0] & BIT_ULL(i)) {
+                               __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
                                          cmd->link_modes.supported);
+                               __set_bit(nfp_eth_media_table[i].speed,
+                                         port->speed_bitmap);
+                       }
 
                        if (advertised_modes[0] & BIT_ULL(i))
-                               __set_bit(nfp_eth_media_table[i],
+                               __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
                                          cmd->link_modes.advertising);
                } else {
-                       if (supported_modes[1] & BIT_ULL(i - 64))
-                               __set_bit(nfp_eth_media_table[i],
+                       if (supported_modes[1] & BIT_ULL(i - 64)) {
+                               __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
                                          cmd->link_modes.supported);
+                               __set_bit(nfp_eth_media_table[i].speed,
+                                         port->speed_bitmap);
+                       }
 
                        if (advertised_modes[1] & BIT_ULL(i - 64))
-                               __set_bit(nfp_eth_media_table[i],
+                               __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
                                          cmd->link_modes.advertising);
                }
        }
@@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
 
        if (cmd->base.speed != SPEED_UNKNOWN) {
                u32 speed = cmd->base.speed / eth_port->lanes;
+               bool is_supported = false;
+
+               for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
+                       if (cmd->base.speed == nfp_eth_speed_map[i] &&
+                           test_bit(i, port->speed_bitmap)) {
+                               is_supported = true;
+                               break;
+                       }
+               }
+
+               if (!is_supported) {
+                       netdev_err(netdev, "Speed %u is not supported.\n",
+                                  cmd->base.speed);
+                       err = -EINVAL;
+                       goto err_bad_set;
+               }
 
                if (req_aneg) {
                        netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
index f8cd157..9c04f9f 100644 (file)
@@ -38,6 +38,16 @@ enum nfp_port_flags {
        NFP_PORT_CHANGED = 0,
 };
 
+enum {
+       NFP_SPEED_1G,
+       NFP_SPEED_10G,
+       NFP_SPEED_25G,
+       NFP_SPEED_40G,
+       NFP_SPEED_50G,
+       NFP_SPEED_100G,
+       NFP_SUP_SPEED_NUMBER
+};
+
 /**
  * struct nfp_port - structure representing NFP port
  * @netdev:    backpointer to associated netdev
@@ -52,6 +62,7 @@ enum nfp_port_flags {
  * @eth_forced:        for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
  * @eth_port:  for %NFP_PORT_PHYS_PORT translated ETH Table port entry
  * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
+ * @speed_bitmap:      for %NFP_PORT_PHYS_PORT supported speed bitmap
  * @pf_id:     for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
  * @vf_id:     for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
  * @pf_split:  for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@@ -78,6 +89,7 @@ struct nfp_port {
                        bool eth_forced;
                        struct nfp_eth_table_port *eth_port;
                        u8 __iomem *eth_stats;
+                       DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
                };
                /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
                struct {
index 626b911..d911f4f 100644 (file)
@@ -708,9 +708,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
                q->lif->index, q->name, q->hw_type, q->hw_index,
                q->head_idx, ring_doorbell);
 
-       if (ring_doorbell)
+       if (ring_doorbell) {
                ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
                                 q->dbval | q->head_idx);
+
+               q->dbell_jiffies = jiffies;
+
+               if (q_to_qcq(q)->napi_qcq)
+                       mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+                                 jiffies + IONIC_NAPI_DEADLINE);
+       }
 }
 
 static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
index 2a1d7b9..bce3ca3 100644 (file)
 #define IONIC_DEV_INFO_REG_COUNT       32
 #define IONIC_DEV_CMD_REG_COUNT                32
 
+#define IONIC_NAPI_DEADLINE            (HZ / 200)      /* 5ms */
+#define IONIC_ADMIN_DOORBELL_DEADLINE  (HZ / 2)        /* 500ms */
+#define IONIC_TX_DOORBELL_DEADLINE     (HZ / 100)      /* 10ms */
+#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100)      /* 10ms */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5)        /* 5s */
+
 struct ionic_dev_bar {
        void __iomem *vaddr;
        phys_addr_t bus_addr;
@@ -216,6 +222,8 @@ struct ionic_queue {
        struct ionic_lif *lif;
        struct ionic_desc_info *info;
        u64 dbval;
+       unsigned long dbell_deadline;
+       unsigned long dbell_jiffies;
        u16 head_idx;
        u16 tail_idx;
        unsigned int index;
@@ -361,4 +369,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
 int ionic_heartbeat_check(struct ionic *ionic);
 bool ionic_is_fw_running(struct ionic_dev *idev);
 
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
+bool ionic_txq_poke_doorbell(struct ionic_queue *q);
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
+
 #endif /* _IONIC_DEV_H_ */
index 4dd16c4..63a78a9 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "ionic.h"
 #include "ionic_bus.h"
+#include "ionic_dev.h"
 #include "ionic_lif.h"
 #include "ionic_txrx.h"
 #include "ionic_ethtool.h"
@@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
        }
 }
 
+static void ionic_napi_deadline(struct timer_list *timer)
+{
+       struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
+
+       napi_schedule(&qcq->napi);
+}
+
 static irqreturn_t ionic_isr(int irq, void *data)
 {
        struct napi_struct *napi = data;
@@ -269,6 +277,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
                        .oper = IONIC_Q_ENABLE,
                },
        };
+       int ret;
 
        idev = &lif->ionic->idev;
        dev = lif->ionic->dev;
@@ -276,16 +285,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
        dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
                ctx.cmd.q_control.index, ctx.cmd.q_control.type);
 
+       if (qcq->flags & IONIC_QCQ_F_INTR)
+               ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+
+       ret = ionic_adminq_post_wait(lif, &ctx);
+       if (ret)
+               return ret;
+
+       if (qcq->napi.poll)
+               napi_enable(&qcq->napi);
+
        if (qcq->flags & IONIC_QCQ_F_INTR) {
                irq_set_affinity_hint(qcq->intr.vector,
                                      &qcq->intr.affinity_mask);
-               napi_enable(&qcq->napi);
-               ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
                ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
                                IONIC_INTR_MASK_CLEAR);
        }
 
-       return ionic_adminq_post_wait(lif, &ctx);
+       return 0;
 }
 
 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
@@ -316,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
                synchronize_irq(qcq->intr.vector);
                irq_set_affinity_hint(qcq->intr.vector, NULL);
                napi_disable(&qcq->napi);
+               del_timer_sync(&qcq->napi_deadline);
        }
 
        /* If there was a previous fw communcation error, don't bother with
@@ -451,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
 
        n_qcq->intr.vector = src_qcq->intr.vector;
        n_qcq->intr.index = src_qcq->intr.index;
+       n_qcq->napi_qcq = src_qcq->napi_qcq;
 }
 
 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -564,13 +583,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
        }
 
        if (flags & IONIC_QCQ_F_NOTIFYQ) {
-               int q_size, cq_size;
+               int q_size;
 
-               /* q & cq need to be contiguous in case of notifyq */
+               /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
+                * and don't alloc qc.  We leave new->qc_size and new->qc_base
+                * as 0 to be sure we don't try to free it later.
+                */
                q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
-               cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
-
-               new->q_size = PAGE_SIZE + q_size + cq_size;
+               new->q_size = PAGE_SIZE + q_size +
+                             ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
                new->q_base = dma_alloc_coherent(dev, new->q_size,
                                                 &new->q_base_pa, GFP_KERNEL);
                if (!new->q_base) {
@@ -773,8 +794,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
        dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
        dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
 
-       if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+       q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
+       q->dbell_jiffies = jiffies;
+
+       if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
                netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
+               qcq->napi_qcq = qcq;
+               timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+       }
 
        qcq->flags |= IONIC_QCQ_F_INITED;
 
@@ -828,11 +855,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
        dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
        dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
 
+       q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+       q->dbell_jiffies = jiffies;
+
        if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
                netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
        else
                netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
 
+       qcq->napi_qcq = qcq;
+       timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
        qcq->flags |= IONIC_QCQ_F_INITED;
 
        return 0;
@@ -1150,6 +1183,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
        struct ionic_dev *idev = &lif->ionic->idev;
        unsigned long irqflags;
        unsigned int flags = 0;
+       bool resched = false;
        int rx_work = 0;
        int tx_work = 0;
        int n_work = 0;
@@ -1187,6 +1221,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
                ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
        }
 
+       if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
+               resched = true;
+       if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
+               resched = true;
+       if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
+               resched = true;
+       if (resched)
+               mod_timer(&lif->adminqcq->napi_deadline,
+                         jiffies + IONIC_NAPI_DEADLINE);
+
        return work_done;
 }
 
@@ -3245,8 +3289,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
        dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
        dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
 
+       q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
+       q->dbell_jiffies = jiffies;
+
        netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
 
+       qcq->napi_qcq = qcq;
+       timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
        napi_enable(&qcq->napi);
 
        if (qcq->flags & IONIC_QCQ_F_INTR)
index a53984b..7345198 100644 (file)
@@ -74,8 +74,10 @@ struct ionic_qcq {
        struct ionic_queue q;
        struct ionic_cq cq;
        struct ionic_intr_info intr;
+       struct timer_list napi_deadline;
        struct napi_struct napi;
        unsigned int flags;
+       struct ionic_qcq *napi_qcq;
        struct dentry *dentry;
 };
 
index a13530e..08c42b0 100644 (file)
@@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
        complete_all(&ctx->work);
 }
 
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
+{
+       struct ionic_lif *lif = q->lif;
+       unsigned long now, then, dif;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&lif->adminq_lock, irqflags);
+
+       if (q->tail_idx == q->head_idx) {
+               spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+               return false;
+       }
+
+       now = READ_ONCE(jiffies);
+       then = q->dbell_jiffies;
+       dif = now - then;
+
+       if (dif > q->dbell_deadline) {
+               ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+                                q->dbval | q->head_idx);
+
+               q->dbell_jiffies = now;
+       }
+
+       spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+
+       return true;
+}
+
 int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
 {
        struct ionic_desc_info *desc_info;
index 0c39774..f761780 100644 (file)
@@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
        ionic_q_post(q, ring_dbell, cb_func, cb_arg);
 }
 
+bool ionic_txq_poke_doorbell(struct ionic_queue *q)
+{
+       unsigned long now, then, dif;
+       struct netdev_queue *netdev_txq;
+       struct net_device *netdev;
+
+       netdev = q->lif->netdev;
+       netdev_txq = netdev_get_tx_queue(netdev, q->index);
+
+       HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
+
+       if (q->tail_idx == q->head_idx) {
+               HARD_TX_UNLOCK(netdev, netdev_txq);
+               return false;
+       }
+
+       now = READ_ONCE(jiffies);
+       then = q->dbell_jiffies;
+       dif = now - then;
+
+       if (dif > q->dbell_deadline) {
+               ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+                                q->dbval | q->head_idx);
+
+               q->dbell_jiffies = now;
+       }
+
+       HARD_TX_UNLOCK(netdev, netdev_txq);
+
+       return true;
+}
+
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
+{
+       unsigned long now, then, dif;
+
+       /* no lock, called from rx napi or txrx napi, nothing else can fill */
+
+       if (q->tail_idx == q->head_idx)
+               return false;
+
+       now = READ_ONCE(jiffies);
+       then = q->dbell_jiffies;
+       dif = now - then;
+
+       if (dif > q->dbell_deadline) {
+               ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+                                q->dbval | q->head_idx);
+
+               q->dbell_jiffies = now;
+
+               dif = 2 * q->dbell_deadline;
+               if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
+                       dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
+
+               q->dbell_deadline = dif;
+       }
+
+       return true;
+}
+
 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
 {
        return netdev_get_tx_queue(q->lif->netdev, q->index);
@@ -424,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q)
 
        ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
                         q->dbval | q->head_idx);
+
+       q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+       q->dbell_jiffies = jiffies;
+
+       mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+                 jiffies + IONIC_NAPI_DEADLINE);
 }
 
 void ionic_rx_empty(struct ionic_queue *q)
@@ -511,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
                                   work_done, flags);
        }
 
+       if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
+               mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
        return work_done;
 }
 
@@ -544,23 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
                                   work_done, flags);
        }
 
+       if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
+               mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
        return work_done;
 }
 
 int ionic_txrx_napi(struct napi_struct *napi, int budget)
 {
-       struct ionic_qcq *qcq = napi_to_qcq(napi);
+       struct ionic_qcq *rxqcq = napi_to_qcq(napi);
        struct ionic_cq *rxcq = napi_to_cq(napi);
        unsigned int qi = rxcq->bound_q->index;
+       struct ionic_qcq *txqcq;
        struct ionic_dev *idev;
        struct ionic_lif *lif;
        struct ionic_cq *txcq;
+       bool resched = false;
        u32 rx_work_done = 0;
        u32 tx_work_done = 0;
        u32 flags = 0;
 
        lif = rxcq->bound_q->lif;
        idev = &lif->ionic->idev;
+       txqcq = lif->txqcqs[qi];
        txcq = &lif->txqcqs[qi]->cq;
 
        tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
@@ -572,7 +648,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
        ionic_rx_fill(rxcq->bound_q);
 
        if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
-               ionic_dim_update(qcq, 0);
+               ionic_dim_update(rxqcq, 0);
                flags |= IONIC_INTR_CRED_UNMASK;
                rxcq->bound_intr->rearm_count++;
        }
@@ -583,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
                                   tx_work_done + rx_work_done, flags);
        }
 
+       if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
+               resched = true;
+       if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
+               resched = true;
+       if (resched)
+               mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
        return rx_work_done;
 }
 
index e02d1e3..79f4e13 100644 (file)
@@ -1034,7 +1034,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
 
        packet->dma_range = kcalloc(page_count,
                                    sizeof(*packet->dma_range),
-                                   GFP_KERNEL);
+                                   GFP_ATOMIC);
        if (!packet->dma_range)
                return -ENOMEM;
 
index 5e41658..a6015cd 100644 (file)
@@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
                .handle_interrupt = meson_gxl_handle_interrupt,
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
+               .read_mmd       = genphy_read_mmd_unsupported,
+               .write_mmd      = genphy_write_mmd_unsupported,
        }, {
                PHY_ID_MATCH_EXACT(0x01803301),
                .name           = "Meson G12A Internal PHY",
index 09cc65c..4d2519c 100644 (file)
@@ -1812,10 +1812,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
 
        ret = phy_attach_direct(pl->netdev, phy_dev, flags,
                                pl->link_interface);
-       if (ret) {
-               phy_device_free(phy_dev);
+       phy_device_free(phy_dev);
+       if (ret)
                return ret;
-       }
 
        ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
        if (ret)
index 2c82fbc..7a2b009 100644 (file)
@@ -57,9 +57,7 @@
 static inline int
 pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
 {
-       return usbnet_read_cmd(dev, req,
-                               USB_DIR_IN | USB_TYPE_VENDOR |
-                               USB_RECIP_DEVICE,
+       return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                                val, index, NULL, 0);
 }
 
index 79d9312..77b06d5 100644 (file)
@@ -102,6 +102,25 @@ config NVDIMM_KEYS
        depends on ENCRYPTED_KEYS
        depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
 
+config NVDIMM_KMSAN
+       bool
+       depends on KMSAN
+       help
+         KMSAN, and other memory debug facilities, increase the size of
+         'struct page' to contain extra metadata. This collides with
+         the NVDIMM capability to store a potentially
+         larger-than-"System RAM" size 'struct page' array in a
+         reservation of persistent memory rather than limited /
+         precious DRAM. However, that reservation needs to persist for
+         the life of the given NVDIMM namespace. If you are using KMSAN
+         to debug an issue unrelated to NVDIMMs or DAX then say N to this
+         option. Otherwise, say Y but understand that any namespaces
+         (with the page array stored pmem) created with this build of
+         the kernel will permanently reserve and strand excess
+         capacity compared to the CONFIG_KMSAN=n case.
+
+         Select N if unsure.
+
 config NVDIMM_TEST_BUILD
        tristate "Build the unit test core"
        depends on m
index 85ca5b4..ec52196 100644 (file)
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
                struct nd_namespace_common *ndns);
 #if IS_ENABLED(CONFIG_ND_CLAIM)
 /* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 128
+#define MAX_STRUCT_PAGE_SIZE 64
 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
 #else
 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
index 61af072..af7d930 100644 (file)
@@ -13,6 +13,8 @@
 #include "pfn.h"
 #include "nd.h"
 
+static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
+
 static void nd_pfn_release(struct device *dev)
 {
        struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
                return -ENXIO;
        }
 
-       /*
-        * Note, we use 64 here for the standard size of struct page,
-        * debugging options may cause it to be larger in which case the
-        * implementation will limit the pfns advertised through
-        * ->direct_access() to those that are included in the memmap.
-        */
        start = nsio->res.start;
        size = resource_size(&nsio->res);
        npfns = PHYS_PFN(size - SZ_8K);
@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        }
        end_trunc = start + size - ALIGN_DOWN(start + size, align);
        if (nd_pfn->mode == PFN_MODE_PMEM) {
+               unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
+
                /*
                 * The altmap should be padded out to the block size used
                 * when populating the vmemmap. This *should* be equal to
                 * PMD_SIZE for most architectures.
                 *
-                * Also make sure size of struct page is less than 128. We
-                * want to make sure we use large enough size here so that
-                * we don't have a dynamic reserve space depending on
-                * struct page size. But we also want to make sure we notice
-                * when we end up adding new elements to struct page.
+                * Also make sure size of struct page is less than
+                * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
+                * face of production kernel configurations that reduce the
+                * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
+                * kernel configurations that increase the 'struct page' size
+                * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
+                * for continuing with the capacity that will be wasted when
+                * reverting to a production kernel configuration. Otherwise,
+                * those configurations are blocked by default.
                 */
-               BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
-               offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
-                       - start;
+               if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
+                       if (page_struct_override)
+                               page_map_size = sizeof(struct page) * npfns;
+                       else {
+                               dev_err(&nd_pfn->dev,
+                                       "Memory debug options prevent using pmem for the page map\n");
+                               return -EINVAL;
+                       }
+               }
+               offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
        } else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K, align) - start;
        else
@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        pfn_sb->version_minor = cpu_to_le16(4);
        pfn_sb->end_trunc = cpu_to_le32(end_trunc);
        pfn_sb->align = cpu_to_le32(nd_pfn->align);
-       pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+       if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
+               pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
+       else
+               pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
        pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
        checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
        pfn_sb->checksum = cpu_to_le64(checksum);
index b57630d..bdb9749 100644 (file)
@@ -45,7 +45,7 @@ struct nvme_dhchap_queue_context {
        int sess_key_len;
 };
 
-struct workqueue_struct *nvme_auth_wq;
+static struct workqueue_struct *nvme_auth_wq;
 
 #define nvme_auth_flags_from_qid(qid) \
        (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
index c34ac33..67763e5 100644 (file)
@@ -965,8 +965,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
        }
 
        of_dma_range_parser_init(&parser, node);
-       for_each_of_range(&parser, &range)
+       for_each_of_range(&parser, &range) {
+               if (range.cpu_addr == OF_BAD_ADDR) {
+                       pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+                              range.bus_addr, node);
+                       continue;
+               }
                num_ranges++;
+       }
+
+       if (!num_ranges) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
        if (!r) {
@@ -975,18 +986,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
        }
 
        /*
-        * Record all info in the generic DMA ranges array for struct device.
+        * Record all info in the generic DMA ranges array for struct device,
+        * returning an error if we don't find any parsable ranges.
         */
        *map = r;
        of_dma_range_parser_init(&parser, node);
        for_each_of_range(&parser, &range) {
                pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
                         range.bus_addr, range.cpu_addr, range.size);
-               if (range.cpu_addr == OF_BAD_ADDR) {
-                       pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
-                              range.bus_addr, node);
+               if (range.cpu_addr == OF_BAD_ADDR)
                        continue;
-               }
                r->cpu_start = range.cpu_addr;
                r->dma_start = range.bus_addr;
                r->size = range.size;
index 81c8c22..b3878a9 100644 (file)
@@ -525,6 +525,7 @@ static int __init of_platform_default_populate_init(void)
        if (IS_ENABLED(CONFIG_PPC)) {
                struct device_node *boot_display = NULL;
                struct platform_device *dev;
+               int display_number = 0;
                int ret;
 
                /* Check if we have a MacOS display without a node spec */
@@ -555,16 +556,23 @@ static int __init of_platform_default_populate_init(void)
                        if (!of_get_property(node, "linux,opened", NULL) ||
                            !of_get_property(node, "linux,boot-display", NULL))
                                continue;
-                       dev = of_platform_device_create(node, "of-display", NULL);
+                       dev = of_platform_device_create(node, "of-display.0", NULL);
+                       of_node_put(node);
                        if (WARN_ON(!dev))
                                return -ENOMEM;
                        boot_display = node;
+                       display_number++;
                        break;
                }
                for_each_node_by_type(node, "display") {
+                       char buf[14];
+                       const char *of_display_format = "of-display.%d";
+
                        if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
                                continue;
-                       of_platform_device_create(node, "of-display", NULL);
+                       ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
+                       if (ret < sizeof(buf))
+                               of_platform_device_create(node, buf, NULL);
                }
 
        } else {
index fba9548..5641786 100644 (file)
@@ -1665,7 +1665,6 @@ int pci_save_state(struct pci_dev *dev)
                return i;
 
        pci_save_ltr_state(dev);
-       pci_save_aspm_l1ss_state(dev);
        pci_save_dpc_state(dev);
        pci_save_aer_state(dev);
        pci_save_ptm_state(dev);
@@ -1772,7 +1771,6 @@ void pci_restore_state(struct pci_dev *dev)
         * LTR itself (in the PCIe capability).
         */
        pci_restore_ltr_state(dev);
-       pci_restore_aspm_l1ss_state(dev);
 
        pci_restore_pcie_state(dev);
        pci_restore_pasid_state(dev);
@@ -3465,11 +3463,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
        if (error)
                pci_err(dev, "unable to allocate suspend buffer for LTR\n");
 
-       error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
-                                           2 * sizeof(u32));
-       if (error)
-               pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
        pci_allocate_vc_save_buffers(dev);
 }
 
index 9ed3b55..9049d07 100644 (file)
@@ -566,14 +566,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
 void pcie_aspm_init_link_state(struct pci_dev *pdev);
 void pcie_aspm_exit_link_state(struct pci_dev *pdev);
 void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
 static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
 static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
 #endif
 
 #ifdef CONFIG_PCIE_ECRC
index 53a1fa3..4b41845 100644 (file)
@@ -470,31 +470,6 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
        pci_write_config_dword(pdev, pos, val);
 }
 
-static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
-{
-       u16 l1ss = dev->l1ss;
-       u32 l1_2_enable;
-
-       /*
-        * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
-        * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
-        */
-       pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
-
-       /*
-        * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
-        * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
-        * enable bits, even though they're all in PCI_L1SS_CTL1.
-        */
-       l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
-       ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
-
-       pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
-       if (l1_2_enable)
-               pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
-                                      ctl1 | l1_2_enable);
-}
-
 /* Calculate L1.2 PM substate timing parameters */
 static void aspm_calc_l1ss_info(struct pcie_link_state *link,
                                u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -504,6 +479,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
        u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
        u32 ctl1 = 0, ctl2 = 0;
        u32 pctl1, pctl2, cctl1, cctl2;
+       u32 pl1_2_enables, cl1_2_enables;
 
        if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
                return;
@@ -552,21 +528,39 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
            ctl2 == pctl2 && ctl2 == cctl2)
                return;
 
-       pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
-                  PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
-                  PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
-       pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
-                         PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
-                         PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
-       aspm_program_l1ss(parent, pctl1, ctl2);
-
-       cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
-                  PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
-                  PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
-       cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
-                         PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
-                         PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
-       aspm_program_l1ss(child, cctl1, ctl2);
+       /* Disable L1.2 while updating.  See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+       pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+       cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+       if (pl1_2_enables || cl1_2_enables) {
+               pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+                                       PCI_L1SS_CTL1_L1_2_MASK, 0);
+               pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+                                       PCI_L1SS_CTL1_L1_2_MASK, 0);
+       }
+
+       /* Program T_POWER_ON times in both ports */
+       pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+       pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+       /* Program Common_Mode_Restore_Time in upstream device */
+       pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+                               PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+       /* Program LTR_L1.2_THRESHOLD time in both ports */
+       pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+                               PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+                               PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+       pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+                               PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+                               PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+       if (pl1_2_enables || cl1_2_enables) {
+               pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+                                       pl1_2_enables);
+               pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+                                       cl1_2_enables);
+       }
 }
 
 static void aspm_l1ss_init(struct pcie_link_state *link)
@@ -757,43 +751,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
                                PCI_L1SS_CTL1_L1SS_MASK, val);
 }
 
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
-       struct pci_cap_saved_state *save_state;
-       u16 l1ss = dev->l1ss;
-       u32 *cap;
-
-       if (!l1ss)
-               return;
-
-       save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
-       if (!save_state)
-               return;
-
-       cap = (u32 *)&save_state->cap.data[0];
-       pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
-       pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
-       struct pci_cap_saved_state *save_state;
-       u32 *cap, ctl1, ctl2;
-       u16 l1ss = dev->l1ss;
-
-       if (!l1ss)
-               return;
-
-       save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
-       if (!save_state)
-               return;
-
-       cap = (u32 *)&save_state->cap.data[0];
-       ctl2 = *cap++;
-       ctl1 = *cap;
-       aspm_program_l1ss(dev, ctl1, ctl2);
-}
-
 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
 {
        pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
index 3945612..9c6ee46 100644 (file)
@@ -93,10 +93,19 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
 static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
                                   const struct aspeed_sig_expr *expr)
 {
+       int ret;
+
        pr_debug("Disabling signal %s for %s\n", expr->signal,
                 expr->function);
 
-       return aspeed_sig_expr_set(ctx, expr, false);
+       ret = aspeed_sig_expr_eval(ctx, expr, true);
+       if (ret < 0)
+               return ret;
+
+       if (ret)
+               return aspeed_sig_expr_set(ctx, expr, false);
+
+       return 0;
 }
 
 /**
@@ -114,7 +123,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
        int ret = 0;
 
        if (!exprs)
-               return true;
+               return -EINVAL;
 
        while (*exprs && !ret) {
                ret = aspeed_sig_expr_disable(ctx, *exprs);
index cc3aaba..e49f271 100644 (file)
@@ -1709,6 +1709,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
 EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
 
 #ifdef CONFIG_PM_SLEEP
+static bool __intel_gpio_is_direct_irq(u32 value)
+{
+       return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+              (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
+}
+
 static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
 {
        const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
@@ -1742,8 +1748,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
         * See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
         */
        value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
-       if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
-           (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
+       if (__intel_gpio_is_direct_irq(value))
                return true;
 
        return false;
@@ -1873,7 +1878,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
        for (i = 0; i < pctrl->soc->npins; i++) {
                const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
 
-               if (!intel_pinctrl_should_save(pctrl, desc->number))
+               if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
+                     /*
+                      * If the firmware mangled the register contents too much,
+                      * check the saved value for the Direct IRQ mode.
+                      */
+                     __intel_gpio_is_direct_irq(pads[i].padcfg0)))
                        continue;
 
                intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
index 89557c7..09c4dce 100644 (file)
@@ -659,7 +659,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
        PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
        PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
        PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
-       PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+       PIN_FIELD_BASE(13, 13, 4, 0x000, 0x10, 27, 3),
        PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
        PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
        PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
@@ -708,7 +708,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
        PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
        PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
        PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
-       PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+       PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 24, 3),
        PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
        PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
        PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
index 9bc6e39..32c3eda 100644 (file)
@@ -365,6 +365,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
 
                        } else {
                                debounce_enable = "  âˆ…";
+                               time = 0;
                        }
                        snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
                        seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
index 99c3745..1909237 100644 (file)
@@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
        if (!pcs->fmask)
                return 0;
        function = pinmux_generic_get_function(pctldev, fselector);
+       if (!function)
+               return -EINVAL;
        func = function->data;
        if (!func)
                return -EINVAL;
index c3c8c34..e22d03c 100644 (file)
@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
 static const char * const swr_tx_clk_groups[] = { "gpio0" };
 static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
 static const char * const swr_rx_clk_groups[] = { "gpio3" };
-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
 static const char * const dmic1_clk_groups[] = { "gpio6" };
 static const char * const dmic1_data_groups[] = { "gpio7" };
 static const char * const dmic2_clk_groups[] = { "gpio8" };
index 99edddf..c3bfb6c 100644 (file)
@@ -366,7 +366,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
         * will be adjusted at the final stage of the IRQ-based SPI transfer
         * execution so not to lose the leftover of the incoming data.
         */
-       level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
+       level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
        dw_writel(dws, DW_SPI_TXFTLR, level);
        dw_writel(dws, DW_SPI_RXFTLR, level - 1);
 
index 1935ca6..a1ea093 100644 (file)
@@ -90,9 +90,21 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
 /*-------------------------------------------------------------------------*/
 
 static ssize_t
+spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message)
+{
+       ssize_t status;
+
+       status = spi_sync(spi, message);
+       if (status == 0)
+               status = message->actual_length;
+
+       return status;
+}
+
+static ssize_t
 spidev_sync(struct spidev_data *spidev, struct spi_message *message)
 {
-       int status;
+       ssize_t status;
        struct spi_device *spi;
 
        mutex_lock(&spidev->spi_lock);
@@ -101,12 +113,10 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
        if (spi == NULL)
                status = -ESHUTDOWN;
        else
-               status = spi_sync(spi, message);
-
-       if (status == 0)
-               status = message->actual_length;
+               status = spidev_sync_unlocked(spi, message);
 
        mutex_unlock(&spidev->spi_lock);
+
        return status;
 }
 
@@ -294,7 +304,7 @@ static int spidev_message(struct spidev_data *spidev,
                spi_message_add_tail(k_tmp, &msg);
        }
 
-       status = spidev_sync(spidev, &msg);
+       status = spidev_sync_unlocked(spidev->spi, &msg);
        if (status < 0)
                goto done;
 
index 1960916..e60a276 100644 (file)
@@ -1197,17 +1197,17 @@ static int nvidia_set_fbinfo(struct fb_info *info)
        return nvidiafb_check_var(&info->var, info);
 }
 
-static u32 nvidia_get_chipset(struct fb_info *info)
+static u32 nvidia_get_chipset(struct pci_dev *pci_dev,
+                             volatile u32 __iomem *REGS)
 {
-       struct nvidia_par *par = info->par;
-       u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
+       u32 id = (pci_dev->vendor << 16) | pci_dev->device;
 
        printk(KERN_INFO PFX "Device ID: %x \n", id);
 
        if ((id & 0xfff0) == 0x00f0 ||
            (id & 0xfff0) == 0x02e0) {
                /* pci-e */
-               id = NV_RD32(par->REGS, 0x1800);
+               id = NV_RD32(REGS, 0x1800);
 
                if ((id & 0x0000ffff) == 0x000010DE)
                        id = 0x10DE0000 | (id >> 16);
@@ -1220,12 +1220,11 @@ static u32 nvidia_get_chipset(struct fb_info *info)
        return id;
 }
 
-static u32 nvidia_get_arch(struct fb_info *info)
+static u32 nvidia_get_arch(u32 Chipset)
 {
-       struct nvidia_par *par = info->par;
        u32 arch = 0;
 
-       switch (par->Chipset & 0x0ff0) {
+       switch (Chipset & 0x0ff0) {
        case 0x0100:            /* GeForce 256 */
        case 0x0110:            /* GeForce2 MX */
        case 0x0150:            /* GeForce2 */
@@ -1278,16 +1277,44 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
        struct fb_info *info;
        unsigned short cmd;
        int ret;
+       volatile u32 __iomem *REGS;
+       int Chipset;
+       u32 Architecture;
 
        NVTRACE_ENTER();
        assert(pd != NULL);
 
+       if (pci_enable_device(pd)) {
+               printk(KERN_ERR PFX "cannot enable PCI device\n");
+               return -ENODEV;
+       }
+
+       /* enable IO and mem if not already done */
+       pci_read_config_word(pd, PCI_COMMAND, &cmd);
+       cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+       pci_write_config_word(pd, PCI_COMMAND, cmd);
+
+       nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
+       nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
+
+       REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
+       if (!REGS) {
+               printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
+               return -ENODEV;
+       }
+
+       Chipset = nvidia_get_chipset(pd, REGS);
+       Architecture = nvidia_get_arch(Chipset);
+       if (Architecture == 0) {
+               printk(KERN_ERR PFX "unknown NV_ARCH\n");
+               goto err_out;
+       }
+
        ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
        if (ret)
-               return ret;
+               goto err_out;
 
        info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
-
        if (!info)
                goto err_out;
 
@@ -1298,11 +1325,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
        if (info->pixmap.addr == NULL)
                goto err_out_kfree;
 
-       if (pci_enable_device(pd)) {
-               printk(KERN_ERR PFX "cannot enable PCI device\n");
-               goto err_out_enable;
-       }
-
        if (pci_request_regions(pd, "nvidiafb")) {
                printk(KERN_ERR PFX "cannot request PCI regions\n");
                goto err_out_enable;
@@ -1318,34 +1340,17 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
        par->paneltweak = paneltweak;
        par->reverse_i2c = reverse_i2c;
 
-       /* enable IO and mem if not already done */
-       pci_read_config_word(pd, PCI_COMMAND, &cmd);
-       cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
-       pci_write_config_word(pd, PCI_COMMAND, cmd);
-
-       nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
        nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
-       nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
-
-       par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
 
-       if (!par->REGS) {
-               printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
-               goto err_out_free_base0;
-       }
+       par->REGS = REGS;
 
-       par->Chipset = nvidia_get_chipset(info);
-       par->Architecture = nvidia_get_arch(info);
-
-       if (par->Architecture == 0) {
-               printk(KERN_ERR PFX "unknown NV_ARCH\n");
-               goto err_out_arch;
-       }
+       par->Chipset = Chipset;
+       par->Architecture = Architecture;
 
        sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
 
        if (NVCommonSetup(info))
-               goto err_out_arch;
+               goto err_out_free_base0;
 
        par->FbAddress = nvidiafb_fix.smem_start;
        par->FbMapSize = par->RamAmountKBytes * 1024;
@@ -1401,7 +1406,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
                goto err_out_iounmap_fb;
        }
 
-
        printk(KERN_INFO PFX
               "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
               info->fix.id,
@@ -1415,15 +1419,14 @@ err_out_iounmap_fb:
 err_out_free_base1:
        fb_destroy_modedb(info->monspecs.modedb);
        nvidia_delete_i2c_busses(par);
-err_out_arch:
-       iounmap(par->REGS);
- err_out_free_base0:
+err_out_free_base0:
        pci_release_regions(pd);
 err_out_enable:
        kfree(info->pixmap.addr);
 err_out_kfree:
        framebuffer_release(info);
 err_out:
+       iounmap(REGS);
        return -ENODEV;
 }
 
index 6a2cf75..ff4b1d5 100644 (file)
@@ -1426,12 +1426,20 @@ static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bi
        u32 bio_size = 0;
        struct bio_vec *bvec;
        struct bvec_iter_all iter_all;
+       int i;
 
        bio_for_each_segment_all(bvec, bio, iter_all)
                bio_size += bvec->bv_len;
 
-       bitmap_set(rbio->error_bitmap, total_sector_nr,
-                  bio_size >> rbio->bioc->fs_info->sectorsize_bits);
+       /*
+        * Since we can have multiple bios touching the error_bitmap, we cannot
+        * call bitmap_set() without protection.
+        *
+        * Instead use set_bit() for each bit, as set_bit() itself is atomic.
+        */
+       for (i = total_sector_nr; i < total_sector_nr +
+            (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
+               set_bit(i, rbio->error_bitmap);
 }
 
 /* Verify the data sectors at read time. */
@@ -1886,7 +1894,7 @@ pstripe:
                sector->uptodate = 1;
        }
        if (failb >= 0) {
-               ret = verify_one_sector(rbio, faila, sector_nr);
+               ret = verify_one_sector(rbio, failb, sector_nr);
                if (ret < 0)
                        goto cleanup;
 
index e65e6b6..d50182b 100644 (file)
@@ -8073,10 +8073,10 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
        /*
         * Check that we don't overflow at later allocations, we request
         * clone_sources_count + 1 items, and compare to unsigned long inside
-        * access_ok.
+        * access_ok. Also set an upper limit for allocation size so this can't
+        * easily exhaust memory. Max number of clone sources is about 200K.
         */
-       if (arg->clone_sources_count >
-           ULONG_MAX / sizeof(struct clone_root) - 1) {
+       if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
                ret = -EINVAL;
                goto out;
        }
index bcfef75..4cdadf3 100644 (file)
@@ -1600,7 +1600,7 @@ again:
        if (ret < 0)
                goto out;
 
-       while (1) {
+       while (search_start < search_end) {
                l = path->nodes[0];
                slot = path->slots[0];
                if (slot >= btrfs_header_nritems(l)) {
@@ -1623,6 +1623,9 @@ again:
                if (key.type != BTRFS_DEV_EXTENT_KEY)
                        goto next;
 
+               if (key.offset > search_end)
+                       break;
+
                if (key.offset > search_start) {
                        hole_size = key.offset - search_start;
                        dev_extent_hole_check(device, &search_start, &hole_size,
@@ -1683,6 +1686,7 @@ next:
        else
                ret = 0;
 
+       ASSERT(max_hole_start + max_hole_size <= search_end);
 out:
        btrfs_free_path(path);
        *start = max_hole_start;
index 01a13de..da7bb91 100644 (file)
@@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
 
        workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
                        zlib_inflate_workspacesize());
-       workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
+       workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
        workspace->level = level;
        workspace->buf = NULL;
        /*
index e163f58..27a245d 100644 (file)
@@ -3685,6 +3685,12 @@ static void handle_session(struct ceph_mds_session *session,
                break;
 
        case CEPH_SESSION_FLUSHMSG:
+               /* flush cap releases */
+               spin_lock(&session->s_cap_lock);
+               if (session->s_num_cap_releases)
+                       ceph_flush_cap_releases(mdsc, session);
+               spin_unlock(&session->s_cap_lock);
+
                send_flushmsg_ack(mdsc, session, seq);
                break;
 
index 22dfc1f..b8d1cba 100644 (file)
@@ -3889,7 +3889,7 @@ uncached_fill_pages(struct TCP_Server_Info *server,
                rdata->got_bytes += result;
        }
 
-       return rdata->got_bytes > 0 && result != -ECONNABORTED ?
+       return result != -ECONNABORTED && rdata->got_bytes > 0 ?
                                                rdata->got_bytes : result;
 }
 
@@ -4665,7 +4665,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
                rdata->got_bytes += result;
        }
 
-       return rdata->got_bytes > 0 && result != -ECONNABORTED ?
+       return result != -ECONNABORTED && rdata->got_bytes > 0 ?
                                                rdata->got_bytes : result;
 }
 
index de78bde..a25ecec 100644 (file)
@@ -838,6 +838,30 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
        }
 }
 
+int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
+{
+       if (cprm->to_skip) {
+               if (!__dump_skip(cprm, cprm->to_skip))
+                       return 0;
+               cprm->to_skip = 0;
+       }
+       return __dump_emit(cprm, addr, nr);
+}
+EXPORT_SYMBOL(dump_emit);
+
+void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
+{
+       cprm->to_skip = pos - cprm->pos;
+}
+EXPORT_SYMBOL(dump_skip_to);
+
+void dump_skip(struct coredump_params *cprm, size_t nr)
+{
+       cprm->to_skip += nr;
+}
+EXPORT_SYMBOL(dump_skip);
+
+#ifdef CONFIG_ELF_CORE
 static int dump_emit_page(struct coredump_params *cprm, struct page *page)
 {
        struct bio_vec bvec = {
@@ -871,30 +895,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
        return 1;
 }
 
-int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
-{
-       if (cprm->to_skip) {
-               if (!__dump_skip(cprm, cprm->to_skip))
-                       return 0;
-               cprm->to_skip = 0;
-       }
-       return __dump_emit(cprm, addr, nr);
-}
-EXPORT_SYMBOL(dump_emit);
-
-void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
-{
-       cprm->to_skip = pos - cprm->pos;
-}
-EXPORT_SYMBOL(dump_skip_to);
-
-void dump_skip(struct coredump_params *cprm, size_t nr)
-{
-       cprm->to_skip += nr;
-}
-EXPORT_SYMBOL(dump_skip);
-
-#ifdef CONFIG_ELF_CORE
 int dump_user_range(struct coredump_params *cprm, unsigned long start,
                    unsigned long len)
 {
index 4fc8018..1220d18 100644 (file)
@@ -127,11 +127,6 @@ struct drm_client_buffer {
        struct drm_client_dev *client;
 
        /**
-        * @handle: Buffer handle
-        */
-       u32 handle;
-
-       /**
         * @pitch: Buffer pitch
         */
        u32 pitch;
index 76ef2e4..333c1fe 100644 (file)
@@ -573,6 +573,14 @@ struct mlx5_debugfs_entries {
        struct dentry *lag_debugfs;
 };
 
+enum mlx5_func_type {
+       MLX5_PF,
+       MLX5_VF,
+       MLX5_SF,
+       MLX5_HOST_PF,
+       MLX5_FUNC_TYPE_NUM,
+};
+
 struct mlx5_ft_pool;
 struct mlx5_priv {
        /* IRQ table valid only for real pci devices PF or VF */
@@ -583,11 +591,10 @@ struct mlx5_priv {
        struct mlx5_nb          pg_nb;
        struct workqueue_struct *pg_wq;
        struct xarray           page_root_xa;
-       u32                     fw_pages;
        atomic_t                reg_pages;
        struct list_head        free_list;
-       u32                     vfs_pages;
-       u32                     host_pf_pages;
+       u32                     fw_pages;
+       u32                     page_counters[MLX5_FUNC_TYPE_NUM];
        u32                     fw_pages_alloc_failed;
        u32                     give_pages_dropped;
        u32                     reclaim_pages_discard;
index 0512fde..7b158fc 100644 (file)
@@ -64,6 +64,7 @@ struct drm_virtgpu_map {
        __u32 pad;
 };
 
+/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
 struct drm_virtgpu_execbuffer {
        __u32 flags;
        __u32 size;
index 874a923..283dec7 100644 (file)
@@ -18,6 +18,7 @@
 #ifndef _UAPI_LINUX_IP_H
 #define _UAPI_LINUX_IP_H
 #include <linux/types.h>
+#include <linux/stddef.h>
 #include <asm/byteorder.h>
 
 #define IPTOS_TOS_MASK         0x1E
index 81f4243..53326df 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/libc-compat.h>
 #include <linux/types.h>
+#include <linux/stddef.h>
 #include <linux/in6.h>
 #include <asm/byteorder.h>
 
index 205dc9e..ca826bd 100644 (file)
@@ -1205,12 +1205,13 @@ void rebuild_sched_domains(void)
 /**
  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
+ * @new_cpus: the temp variable for the new effective_cpus mask
  *
  * Iterate through each task of @cs updating its cpus_allowed to the
  * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
-static void update_tasks_cpumask(struct cpuset *cs)
+static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
 {
        struct css_task_iter it;
        struct task_struct *task;
@@ -1224,7 +1225,10 @@ static void update_tasks_cpumask(struct cpuset *cs)
                if (top_cs && (task->flags & PF_KTHREAD) &&
                    kthread_is_per_cpu(task))
                        continue;
-               set_cpus_allowed_ptr(task, cs->effective_cpus);
+
+               cpumask_and(new_cpus, cs->effective_cpus,
+                           task_cpu_possible_mask(task));
+               set_cpus_allowed_ptr(task, new_cpus);
        }
        css_task_iter_end(&it);
 }
@@ -1509,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
        spin_unlock_irq(&callback_lock);
 
        if (adding || deleting)
-               update_tasks_cpumask(parent);
+               update_tasks_cpumask(parent, tmp->new_cpus);
 
        /*
         * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
@@ -1661,7 +1665,7 @@ update_parent_subparts:
                WARN_ON(!is_in_v2_mode() &&
                        !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
 
-               update_tasks_cpumask(cp);
+               update_tasks_cpumask(cp, tmp->new_cpus);
 
                /*
                 * On legacy hierarchy, if the effective cpumask of any non-
@@ -2309,7 +2313,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
                }
        }
 
-       update_tasks_cpumask(parent);
+       update_tasks_cpumask(parent, tmpmask.new_cpus);
 
        if (parent->child_ecpus_count)
                update_sibling_cpumasks(parent, cs, &tmpmask);
@@ -3348,7 +3352,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
         * as the tasks will be migrated to an ancestor.
         */
        if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
-               update_tasks_cpumask(cs);
+               update_tasks_cpumask(cs, new_cpus);
        if (mems_updated && !nodes_empty(cs->mems_allowed))
                update_tasks_nodemask(cs);
 
@@ -3385,7 +3389,7 @@ hotplug_update_tasks(struct cpuset *cs,
        spin_unlock_irq(&callback_lock);
 
        if (cpus_updated)
-               update_tasks_cpumask(cs);
+               update_tasks_cpumask(cs, new_cpus);
        if (mems_updated)
                update_tasks_nodemask(cs);
 }
@@ -3692,15 +3696,38 @@ void __init cpuset_init_smp(void)
  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
  * attached to the specified @tsk.  Guaranteed to return some non-empty
  * subset of cpu_online_mask, even if this means going outside the
- * tasks cpuset.
+ * tasks cpuset, except when the task is in the top cpuset.
  **/
 
 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
        unsigned long flags;
+       struct cpuset *cs;
 
        spin_lock_irqsave(&callback_lock, flags);
-       guarantee_online_cpus(tsk, pmask);
+       rcu_read_lock();
+
+       cs = task_cs(tsk);
+       if (cs != &top_cpuset)
+               guarantee_online_cpus(tsk, pmask);
+       /*
+        * Tasks in the top cpuset won't get update to their cpumasks
+        * when a hotplug online/offline event happens. So we include all
+        * offline cpus in the allowed cpu list.
+        */
+       if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
+               const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+
+               /*
+                * We first exclude cpus allocated to partitions. If there is no
+                * allowable online cpu left, we fall back to all possible cpus.
+                */
+               cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
+               if (!cpumask_intersects(pmask, cpu_online_mask))
+                       cpumask_copy(pmask, possible_mask);
+       }
+
+       rcu_read_unlock();
        spin_unlock_irqrestore(&callback_lock, flags);
 }
 
index 010cf4e..728f434 100644 (file)
@@ -901,8 +901,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
                 * then we need to wake the new top waiter up to try
                 * to get the lock.
                 */
-               if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
-                       wake_up_state(waiter->task, waiter->wake_state);
+               top_waiter = rt_mutex_top_waiter(lock);
+               if (prerequeue_top_waiter != top_waiter)
+                       wake_up_state(top_waiter->task, top_waiter->wake_state);
                raw_spin_unlock_irq(&lock->wait_lock);
                return 0;
        }
index 78ed5f1..c9e40f6 100644 (file)
@@ -9148,9 +9148,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf,
        if (val > 100)
                return -EINVAL;
 
-       if (!val)
-               val = 1;
-
        tr->buffer_percent = val;
 
        (*ppos)++;
index 685e30e..d036c78 100644 (file)
@@ -1640,13 +1640,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
        end = PFN_DOWN(base + size);
 
        for (; cursor < end; cursor++) {
-               /*
-                * Reserved pages are always initialized by the end of
-                * memblock_free_all() (by memmap_init() and, if deferred
-                * initialization is enabled, memmap_init_reserved_pages()), so
-                * these pages can be released directly to the buddy allocator.
-                */
-               __free_pages_core(pfn_to_page(cursor), 0);
+               memblock_free_pages(pfn_to_page(cursor), cursor, 0);
                totalram_pages_inc();
        }
 }
index 0745aed..3bb3484 100644 (file)
@@ -5631,9 +5631,12 @@ EXPORT_SYMBOL(get_zeroed_page);
  */
 void __free_pages(struct page *page, unsigned int order)
 {
+       /* get PageHead before we drop reference */
+       int head = PageHead(page);
+
        if (put_page_testzero(page))
                free_the_page(page, order);
-       else if (!PageHead(page))
+       else if (!head)
                while (order-- > 0)
                        free_the_page(page + (1 << order), order);
 }
index f33c473..ca4ad6c 100644 (file)
@@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
         * leaving this function.
         */
        ecu = j1939_ecu_get_by_name_locked(priv, name);
+
+       if (ecu && ecu->addr == skcb->addr.sa) {
+               /* The ISO 11783-5 standard, in "4.5.2 - Address claim
+                * requirements", states:
+                *   d) No CF shall begin, or resume, transmission on the
+                *      network until 250 ms after it has successfully claimed
+                *      an address except when responding to a request for
+                *      address-claimed.
+                *
+                * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
+                * prioritization" show that the CF begins the transmission
+                * after 250 ms from the first AC (address-claimed) message
+                * even if it sends another AC message during that time window
+                * to resolve the address contention with another CF.
+                *
+                * As stated in "4.4.2.3 - Address-claimed message":
+                *   In order to successfully claim an address, the CF sending
+                *   an address claimed message shall not receive a contending
+                *   claim from another CF for at least 250 ms.
+                *
+                * As stated in "4.4.3.2 - NAME management (NM) message":
+                *   1) A commanding CF can
+                *      d) request that a CF with a specified NAME transmit
+                *         the address-claimed message with its current NAME.
+                *   2) A target CF shall
+                *      d) send an address-claimed message in response to a
+                *         request for a matching NAME
+                *
+                * Taking the above arguments into account, the 250 ms wait is
+                * requested only during network initialization.
+                *
+                * Do not restart the timer on AC message if both the NAME and
+                * the address match and so if the address has already been
+                * claimed (timer has expired) or the AC message has been sent
+                * to resolve the contention with another CF (timer is still
+                * running).
+                */
+               goto out_ecu_put;
+       }
+
        if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
                ecu = j1939_ecu_create_locked(priv, name);
 
index 032d6d0..909a10e 100644 (file)
@@ -9979,7 +9979,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
                goto err_xa_alloc;
 
        devlink->netdevice_nb.notifier_call = devlink_netdevice_event;
-       ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
+       ret = register_netdevice_notifier(&devlink->netdevice_nb);
        if (ret)
                goto err_register_netdevice_notifier;
 
@@ -10171,8 +10171,7 @@ void devlink_free(struct devlink *devlink)
        xa_destroy(&devlink->snapshot_ids);
        xa_destroy(&devlink->ports);
 
-       WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
-                                                      &devlink->netdevice_nb));
+       WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
 
        xa_erase(&devlinks, devlink->index);
 
@@ -10503,6 +10502,8 @@ static int devlink_netdevice_event(struct notifier_block *nb,
                break;
        case NETDEV_REGISTER:
        case NETDEV_CHANGENAME:
+               if (devlink_net(devlink) != dev_net(netdev))
+                       return NOTIFY_OK;
                /* Set the netdev on top of previously set type. Note this
                 * event happens also during net namespace change so here
                 * we take into account netdev pointer appearing in this
@@ -10512,6 +10513,8 @@ static int devlink_netdevice_event(struct notifier_block *nb,
                                        netdev);
                break;
        case NETDEV_UNREGISTER:
+               if (devlink_net(devlink) != dev_net(netdev))
+                       return NOTIFY_OK;
                /* Clear netdev pointer, but not the type. This event happens
                 * also during net namespace change so we need to clear
                 * pointer to netdev that is going to another net namespace.
index f00a79f..4edd217 100644 (file)
@@ -269,7 +269,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
                            (n->nud_state == NUD_NOARP) ||
                            (tbl->is_multicast &&
                             tbl->is_multicast(n->primary_key)) ||
-                           time_after(tref, n->updated))
+                           !time_in_range(n->updated, tref, jiffies))
                                remove = true;
                        write_unlock(&n->lock);
 
@@ -289,7 +289,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 
 static void neigh_add_timer(struct neighbour *n, unsigned long when)
 {
+       /* Use safe distance from the jiffies - LONG_MAX point while timer
+        * is running in DELAY/PROBE state but still show to user space
+        * large times in the past.
+        */
+       unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
+
        neigh_hold(n);
+       if (!time_in_range(n->confirmed, mint, jiffies))
+               n->confirmed = mint;
+       if (time_before(n->used, n->confirmed))
+               n->used = n->confirmed;
        if (unlikely(mod_timer(&n->timer, when))) {
                printk("NEIGH: BUG, double timer add, state is %x\n",
                       n->nud_state);
@@ -1001,12 +1011,14 @@ static void neigh_periodic_work(struct work_struct *work)
                                goto next_elt;
                        }
 
-                       if (time_before(n->used, n->confirmed))
+                       if (time_before(n->used, n->confirmed) &&
+                           time_is_before_eq_jiffies(n->confirmed))
                                n->used = n->confirmed;
 
                        if (refcount_read(&n->refcnt) == 1 &&
                            (state == NUD_FAILED ||
-                            time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
+                            !time_in_range_open(jiffies, n->used,
+                                                n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
                                *np = n->next;
                                neigh_mark_dead(n);
                                write_unlock(&n->lock);
index f954d58..6f27c24 100644 (file)
@@ -1531,6 +1531,8 @@ set_sndbuf:
                        ret = -EINVAL;
                        break;
                }
+               if ((u8)val == SOCK_TXREHASH_DEFAULT)
+                       val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
                /* Paired with READ_ONCE() in tcp_rtx_synack() */
                WRITE_ONCE(sk->sk_txrehash, (u8)val);
                break;
@@ -3451,7 +3453,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_pacing_rate = ~0UL;
        WRITE_ONCE(sk->sk_pacing_shift, 10);
        sk->sk_incoming_cpu = -1;
-       sk->sk_txrehash = SOCK_TXREHASH_DEFAULT;
 
        sk_rx_queue_clear(sk);
        /*
index 6c0ec27..cf11f10 100644 (file)
@@ -347,6 +347,7 @@ lookup_protocol:
        sk->sk_destruct    = inet_sock_destruct;
        sk->sk_protocol    = protocol;
        sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+       sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
 
        inet->uc_ttl    = -1;
        inet->mc_loop   = 1;
index d1f8375..f2c43f6 100644 (file)
@@ -1225,9 +1225,6 @@ int inet_csk_listen_start(struct sock *sk)
        sk->sk_ack_backlog = 0;
        inet_csk_delack_init(sk);
 
-       if (sk->sk_txrehash == SOCK_TXREHASH_DEFAULT)
-               sk->sk_txrehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
-
        /* There is race window here: we announce ourselves listening,
         * but this transition is still not validated by get_port().
         * It is OK, because this socket enters to hash table only
index fee9163..8479347 100644 (file)
@@ -222,6 +222,7 @@ lookup_protocol:
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
        np->repflow     = net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ESTABLISHED;
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
+       sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
 
        /* Init the ipv4 part of the socket since we can have sockets
         * using v6 API for ipv4.
index 2ea7eae..10fe977 100644 (file)
@@ -998,8 +998,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
 {
        int addrlen = sizeof(struct sockaddr_in);
        struct sockaddr_storage addr;
-       struct mptcp_sock *msk;
        struct socket *ssock;
+       struct sock *newsk;
        int backlog = 1024;
        int err;
 
@@ -1008,11 +1008,13 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
        if (err)
                return err;
 
-       msk = mptcp_sk(entry->lsk->sk);
-       if (!msk)
+       newsk = entry->lsk->sk;
+       if (!newsk)
                return -EINVAL;
 
-       ssock = __mptcp_nmpc_socket(msk);
+       lock_sock(newsk);
+       ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
+       release_sock(newsk);
        if (!ssock)
                return -EINVAL;
 
index 8cd6cc6..bc6c1f6 100644 (file)
@@ -2897,6 +2897,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
        struct mptcp_subflow_context *subflow;
        struct mptcp_sock *msk = mptcp_sk(sk);
        bool do_cancel_work = false;
+       int subflows_alive = 0;
 
        sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -2922,6 +2923,8 @@ cleanup:
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                bool slow = lock_sock_fast_nested(ssk);
 
+               subflows_alive += ssk->sk_state != TCP_CLOSE;
+
                /* since the close timeout takes precedence on the fail one,
                 * cancel the latter
                 */
@@ -2937,6 +2940,12 @@ cleanup:
        }
        sock_orphan(sk);
 
+       /* all the subflows are closed, only timeout can change the msk
+        * state, let's not keep resources busy for no reasons
+        */
+       if (subflows_alive == 0)
+               inet_sk_state_store(sk, TCP_CLOSE);
+
        sock_hold(sk);
        pr_debug("msk=%p state=%d", sk, sk->sk_state);
        if (msk->token)
index d4b1e6e..7f2c372 100644 (file)
@@ -760,14 +760,21 @@ static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
 static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
                                          sockptr_t optval, unsigned int optlen)
 {
+       struct sock *sk = (struct sock *)msk;
        struct socket *sock;
+       int ret = -EINVAL;
 
        /* Limit to first subflow, before the connection establishment */
+       lock_sock(sk);
        sock = __mptcp_nmpc_socket(msk);
        if (!sock)
-               return -EINVAL;
+               goto unlock;
 
-       return tcp_setsockopt(sock->sk, level, optname, optval, optlen);
+       ret = tcp_setsockopt(sock->sk, level, optname, optval, optlen);
+
+unlock:
+       release_sock(sk);
+       return ret;
 }
 
 static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
index ec54413..32904c7 100644 (file)
@@ -1399,6 +1399,7 @@ void __mptcp_error_report(struct sock *sk)
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                int err = sock_error(ssk);
+               int ssk_state;
 
                if (!err)
                        continue;
@@ -1409,7 +1410,14 @@ void __mptcp_error_report(struct sock *sk)
                if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
                        continue;
 
-               inet_sk_state_store(sk, inet_sk_state_load(ssk));
+               /* We need to propagate only transition to CLOSE state.
+                * Orphaned socket will see such state change via
+                * subflow_sched_work_if_closed() and that path will properly
+                * destroy the msk as needed.
+                */
+               ssk_state = inet_sk_state_load(ssk);
+               if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+                       inet_sk_state_store(sk, ssk_state);
                sk->sk_err = -err;
 
                /* This barrier is coupled with smp_rmb() in mptcp_poll() */
@@ -1679,7 +1687,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
        if (err)
                return err;
 
-       lock_sock(sf->sk);
+       lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
 
        /* the newly created socket has to be in the same cgroup as its parent */
        mptcp_attach_cgroup(sk, sf->sk);
index b47e4f0..c19c935 100644 (file)
@@ -104,9 +104,9 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
        spin_lock_irqsave(&q->lock, flags);
        head = &q->zcookie_head;
        if (!list_empty(head)) {
-               info = list_entry(head, struct rds_msg_zcopy_info,
-                                 rs_zcookie_next);
-               if (info && rds_zcookie_add(info, cookie)) {
+               info = list_first_entry(head, struct rds_msg_zcopy_info,
+                                       rs_zcookie_next);
+               if (rds_zcookie_add(info, cookie)) {
                        spin_unlock_irqrestore(&q->lock, flags);
                        kfree(rds_info_from_znotifier(znotif));
                        /* caller invokes rds_wake_sk_sleep() */
index cc28e41..92f2975 100644 (file)
@@ -433,7 +433,7 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
                while (m) {
                        unsigned int prio = ffz(~m);
 
-                       if (WARN_ON_ONCE(prio > ARRAY_SIZE(p->inner.clprio)))
+                       if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
                                break;
                        m &= ~(1 << prio);
 
index a0f62fa..8cbf45a 100644 (file)
@@ -5,6 +5,7 @@
  * Based on code and translator idea by: Florian Westphal <fw@strlen.de>
  */
 #include <linux/compat.h>
+#include <linux/nospec.h>
 #include <linux/xfrm.h>
 #include <net/xfrm.h>
 
@@ -302,7 +303,7 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
        nla_for_each_attr(nla, attrs, len, remaining) {
                int err;
 
-               switch (type) {
+               switch (nlh_src->nlmsg_type) {
                case XFRM_MSG_NEWSPDINFO:
                        err = xfrm_nla_cpy(dst, nla, nla_len(nla));
                        break;
@@ -437,6 +438,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
                NL_SET_ERR_MSG(extack, "Bad attribute");
                return -EOPNOTSUPP;
        }
+       type = array_index_nospec(type, XFRMA_MAX + 1);
        if (nla_len(nla) < compat_policy[type].len) {
                NL_SET_ERR_MSG(extack, "Attribute bad length");
                return -EOPNOTSUPP;
index c06e54a..436d296 100644 (file)
@@ -279,8 +279,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
 
        if (x->props.flags & XFRM_STATE_DECAP_DSCP)
-               ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
-                              ipipv6_hdr(skb));
+               ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
        if (!(x->props.flags & XFRM_STATE_NOECN))
                ipip6_ecn_decapsulate(skb);
 
index 1f99dc4..35279c2 100644 (file)
@@ -310,6 +310,52 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->mark = 0;
 }
 
+static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
+                      int encap_type, unsigned short family)
+{
+       struct sec_path *sp;
+
+       sp = skb_sec_path(skb);
+       if (sp && (sp->len || sp->olen) &&
+           !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+               goto discard;
+
+       XFRM_SPI_SKB_CB(skb)->family = family;
+       if (family == AF_INET) {
+               XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+       } else {
+               XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+       }
+
+       return xfrm_input(skb, nexthdr, spi, encap_type);
+discard:
+       kfree_skb(skb);
+       return 0;
+}
+
+static int xfrmi4_rcv(struct sk_buff *skb)
+{
+       return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
+}
+
+static int xfrmi6_rcv(struct sk_buff *skb)
+{
+       return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
+                          0, 0, AF_INET6);
+}
+
+static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+{
+       return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
+}
+
+static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+{
+       return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
+}
+
 static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
 {
        const struct xfrm_mode *inner_mode;
@@ -945,8 +991,8 @@ static struct pernet_operations xfrmi_net_ops = {
 };
 
 static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
-       .handler        =       xfrm6_rcv,
-       .input_handler  =       xfrm_input,
+       .handler        =       xfrmi6_rcv,
+       .input_handler  =       xfrmi6_input,
        .cb_handler     =       xfrmi_rcv_cb,
        .err_handler    =       xfrmi6_err,
        .priority       =       10,
@@ -996,8 +1042,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
 #endif
 
 static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
-       .handler        =       xfrm4_rcv,
-       .input_handler  =       xfrm_input,
+       .handler        =       xfrmi4_rcv,
+       .input_handler  =       xfrmi4_input,
        .cb_handler     =       xfrmi_rcv_cb,
        .err_handler    =       xfrmi4_err,
        .priority       =       10,
index e9eb82c..5c61ec0 100644 (file)
@@ -336,7 +336,7 @@ static void xfrm_policy_timer(struct timer_list *t)
        }
        if (xp->lft.hard_use_expires_seconds) {
                time64_t tmo = xp->lft.hard_use_expires_seconds +
-                       (xp->curlft.use_time ? : xp->curlft.add_time) - now;
+                       (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
                if (tmo <= 0)
                        goto expired;
                if (tmo < next)
@@ -354,7 +354,7 @@ static void xfrm_policy_timer(struct timer_list *t)
        }
        if (xp->lft.soft_use_expires_seconds) {
                time64_t tmo = xp->lft.soft_use_expires_seconds +
-                       (xp->curlft.use_time ? : xp->curlft.add_time) - now;
+                       (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
                if (tmo <= 0) {
                        warn = 1;
                        tmo = XFRM_KM_TIMEOUT;
@@ -3661,7 +3661,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                return 1;
        }
 
-       pol->curlft.use_time = ktime_get_real_seconds();
+       /* This lockless write can happen from different cpus. */
+       WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
 
        pols[0] = pol;
        npols++;
@@ -3676,7 +3677,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                                xfrm_pol_put(pols[0]);
                                return 0;
                        }
-                       pols[1]->curlft.use_time = ktime_get_real_seconds();
+                       /* This write can happen from different cpus. */
+                       WRITE_ONCE(pols[1]->curlft.use_time,
+                                  ktime_get_real_seconds());
                        npols++;
                }
        }
@@ -3742,6 +3745,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                        goto reject;
                }
 
+               if (if_id)
+                       secpath_reset(skb);
+
                xfrm_pols_put(pols, npols);
                return 1;
        }
index 89c731f..00afe83 100644 (file)
@@ -577,7 +577,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
        if (x->km.state == XFRM_STATE_EXPIRED)
                goto expired;
        if (x->lft.hard_add_expires_seconds) {
-               long tmo = x->lft.hard_add_expires_seconds +
+               time64_t tmo = x->lft.hard_add_expires_seconds +
                        x->curlft.add_time - now;
                if (tmo <= 0) {
                        if (x->xflags & XFRM_SOFT_EXPIRE) {
@@ -594,8 +594,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
                        next = tmo;
        }
        if (x->lft.hard_use_expires_seconds) {
-               long tmo = x->lft.hard_use_expires_seconds +
-                       (x->curlft.use_time ? : now) - now;
+               time64_t tmo = x->lft.hard_use_expires_seconds +
+                       (READ_ONCE(x->curlft.use_time) ? : now) - now;
                if (tmo <= 0)
                        goto expired;
                if (tmo < next)
@@ -604,7 +604,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
        if (x->km.dying)
                goto resched;
        if (x->lft.soft_add_expires_seconds) {
-               long tmo = x->lft.soft_add_expires_seconds +
+               time64_t tmo = x->lft.soft_add_expires_seconds +
                        x->curlft.add_time - now;
                if (tmo <= 0) {
                        warn = 1;
@@ -616,8 +616,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
                }
        }
        if (x->lft.soft_use_expires_seconds) {
-               long tmo = x->lft.soft_use_expires_seconds +
-                       (x->curlft.use_time ? : now) - now;
+               time64_t tmo = x->lft.soft_use_expires_seconds +
+                       (READ_ONCE(x->curlft.use_time) ? : now) - now;
                if (tmo <= 0)
                        warn = 1;
                else if (tmo < next)
@@ -1906,7 +1906,7 @@ out:
 
                hrtimer_start(&x1->mtimer, ktime_set(1, 0),
                              HRTIMER_MODE_REL_SOFT);
-               if (x1->curlft.use_time)
+               if (READ_ONCE(x1->curlft.use_time))
                        xfrm_state_check_expire(x1);
 
                if (x->props.smark.m || x->props.smark.v || x->if_id) {
@@ -1940,8 +1940,8 @@ int xfrm_state_check_expire(struct xfrm_state *x)
 {
        xfrm_dev_state_update_curlft(x);
 
-       if (!x->curlft.use_time)
-               x->curlft.use_time = ktime_get_real_seconds();
+       if (!READ_ONCE(x->curlft.use_time))
+               WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
 
        if (x->curlft.bytes >= x->lft.hard_byte_limit ||
            x->curlft.packets >= x->lft.hard_packet_limit) {
index db9518d..1134a49 100644 (file)
@@ -9423,6 +9423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
@@ -9433,6 +9434,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -9480,6 +9486,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+       SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
        SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
        SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
@@ -9523,6 +9530,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+       SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
@@ -9701,6 +9709,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+       SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
        SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
        SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP),
index d3f58a3..b5b0d43 100644 (file)
@@ -493,12 +493,11 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
                dev_dbg(chip->card->dev,
                        "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
                            *r_needed, *r_freed);
-               for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
-                       for (i = 0; i != chip->rmh.stat_len; ++i)
-                               dev_dbg(chip->card->dev,
-                                       "  stat[%d]: %x, %x\n", i,
-                                           chip->rmh.stat[i],
-                                           chip->rmh.stat[i] & MASK_DATA_SIZE);
+               for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
+                    ++i) {
+                       dev_dbg(chip->card->dev, "  stat[%d]: %x, %x\n", i,
+                               chip->rmh.stat[i],
+                               chip->rmh.stat[i] & MASK_DATA_SIZE);
                }
        }
 
index 9ddf6a3..28a0565 100644 (file)
@@ -729,14 +729,16 @@ static int es8326_probe(struct snd_soc_component *component)
        }
        dev_dbg(component->dev, "jack-pol %x", es8326->jack_pol);
 
-       ret = device_property_read_u8(component->dev, "everest,interrupt-src", &es8326->jack_pol);
+       ret = device_property_read_u8(component->dev, "everest,interrupt-src",
+                                     &es8326->interrupt_src);
        if (ret != 0) {
                dev_dbg(component->dev, "interrupt-src return %d", ret);
                es8326->interrupt_src = ES8326_HP_DET_SRC_PIN9;
        }
        dev_dbg(component->dev, "interrupt-src %x", es8326->interrupt_src);
 
-       ret = device_property_read_u8(component->dev, "everest,interrupt-clk", &es8326->jack_pol);
+       ret = device_property_read_u8(component->dev, "everest,interrupt-clk",
+                                     &es8326->interrupt_clk);
        if (ret != 0) {
                dev_dbg(component->dev, "interrupt-clk return %d", ret);
                es8326->interrupt_clk = 0x45;
index 3f981a9..c54ecf3 100644 (file)
@@ -167,7 +167,7 @@ static int rt715_sdca_read_prop(struct sdw_slave *slave)
        }
 
        /* set the timeout values */
-       prop->clk_stop_timeout = 20;
+       prop->clk_stop_timeout = 200;
 
        return 0;
 }
index beb4ec6..4e38eb7 100644 (file)
@@ -154,6 +154,7 @@ static const uint32_t tas5805m_volume[] = {
 #define TAS5805M_VOLUME_MIN    0
 
 struct tas5805m_priv {
+       struct i2c_client               *i2c;
        struct regulator                *pvdd;
        struct gpio_desc                *gpio_pdn_n;
 
@@ -165,6 +166,9 @@ struct tas5805m_priv {
        int                             vol[2];
        bool                            is_powered;
        bool                            is_muted;
+
+       struct work_struct              work;
+       struct mutex                    lock;
 };
 
 static void set_dsp_scale(struct regmap *rm, int offset, int vol)
@@ -181,13 +185,11 @@ static void set_dsp_scale(struct regmap *rm, int offset, int vol)
        regmap_bulk_write(rm, offset, v, ARRAY_SIZE(v));
 }
 
-static void tas5805m_refresh(struct snd_soc_component *component)
+static void tas5805m_refresh(struct tas5805m_priv *tas5805m)
 {
-       struct tas5805m_priv *tas5805m =
-               snd_soc_component_get_drvdata(component);
        struct regmap *rm = tas5805m->regmap;
 
-       dev_dbg(component->dev, "refresh: is_muted=%d, vol=%d/%d\n",
+       dev_dbg(&tas5805m->i2c->dev, "refresh: is_muted=%d, vol=%d/%d\n",
                tas5805m->is_muted, tas5805m->vol[0], tas5805m->vol[1]);
 
        regmap_write(rm, REG_PAGE, 0x00);
@@ -201,6 +203,9 @@ static void tas5805m_refresh(struct snd_soc_component *component)
        set_dsp_scale(rm, 0x24, tas5805m->vol[0]);
        set_dsp_scale(rm, 0x28, tas5805m->vol[1]);
 
+       regmap_write(rm, REG_PAGE, 0x00);
+       regmap_write(rm, REG_BOOK, 0x00);
+
        /* Set/clear digital soft-mute */
        regmap_write(rm, REG_DEVICE_CTRL_2,
                (tas5805m->is_muted ? DCTRL2_MUTE : 0) |
@@ -226,8 +231,11 @@ static int tas5805m_vol_get(struct snd_kcontrol *kcontrol,
        struct tas5805m_priv *tas5805m =
                snd_soc_component_get_drvdata(component);
 
+       mutex_lock(&tas5805m->lock);
        ucontrol->value.integer.value[0] = tas5805m->vol[0];
        ucontrol->value.integer.value[1] = tas5805m->vol[1];
+       mutex_unlock(&tas5805m->lock);
+
        return 0;
 }
 
@@ -243,11 +251,13 @@ static int tas5805m_vol_put(struct snd_kcontrol *kcontrol,
                snd_soc_kcontrol_component(kcontrol);
        struct tas5805m_priv *tas5805m =
                snd_soc_component_get_drvdata(component);
+       int ret = 0;
 
        if (!(volume_is_valid(ucontrol->value.integer.value[0]) &&
              volume_is_valid(ucontrol->value.integer.value[1])))
                return -EINVAL;
 
+       mutex_lock(&tas5805m->lock);
        if (tas5805m->vol[0] != ucontrol->value.integer.value[0] ||
            tas5805m->vol[1] != ucontrol->value.integer.value[1]) {
                tas5805m->vol[0] = ucontrol->value.integer.value[0];
@@ -256,11 +266,12 @@ static int tas5805m_vol_put(struct snd_kcontrol *kcontrol,
                        tas5805m->vol[0], tas5805m->vol[1],
                        tas5805m->is_powered);
                if (tas5805m->is_powered)
-                       tas5805m_refresh(component);
-               return 1;
+                       tas5805m_refresh(tas5805m);
+               ret = 1;
        }
+       mutex_unlock(&tas5805m->lock);
 
-       return 0;
+       return ret;
 }
 
 static const struct snd_kcontrol_new tas5805m_snd_controls[] = {
@@ -294,54 +305,83 @@ static int tas5805m_trigger(struct snd_pcm_substream *substream, int cmd,
        struct snd_soc_component *component = dai->component;
        struct tas5805m_priv *tas5805m =
                snd_soc_component_get_drvdata(component);
-       struct regmap *rm = tas5805m->regmap;
-       unsigned int chan, global1, global2;
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               dev_dbg(component->dev, "DSP startup\n");
-
-               /* We mustn't issue any I2C transactions until the I2S
-                * clock is stable. Furthermore, we must allow a 5ms
-                * delay after the first set of register writes to
-                * allow the DSP to boot before configuring it.
-                */
-               usleep_range(5000, 10000);
-               send_cfg(rm, dsp_cfg_preboot,
-                       ARRAY_SIZE(dsp_cfg_preboot));
-               usleep_range(5000, 15000);
-               send_cfg(rm, tas5805m->dsp_cfg_data,
-                       tas5805m->dsp_cfg_len);
-
-               tas5805m->is_powered = true;
-               tas5805m_refresh(component);
+               dev_dbg(component->dev, "clock start\n");
+               schedule_work(&tas5805m->work);
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               dev_dbg(component->dev, "DSP shutdown\n");
+               break;
 
-               tas5805m->is_powered = false;
+       default:
+               return -EINVAL;
+       }
 
-               regmap_write(rm, REG_PAGE, 0x00);
-               regmap_write(rm, REG_BOOK, 0x00);
+       return 0;
+}
 
-               regmap_read(rm, REG_CHAN_FAULT, &chan);
-               regmap_read(rm, REG_GLOBAL_FAULT1, &global1);
-               regmap_read(rm, REG_GLOBAL_FAULT2, &global2);
+static void do_work(struct work_struct *work)
+{
+       struct tas5805m_priv *tas5805m =
+              container_of(work, struct tas5805m_priv, work);
+       struct regmap *rm = tas5805m->regmap;
 
-               dev_dbg(component->dev,
-                       "fault regs: CHAN=%02x, GLOBAL1=%02x, GLOBAL2=%02x\n",
-                       chan, global1, global2);
+       dev_dbg(&tas5805m->i2c->dev, "DSP startup\n");
 
-               regmap_write(rm, REG_DEVICE_CTRL_2, DCTRL2_MODE_HIZ);
-               break;
+       mutex_lock(&tas5805m->lock);
+       /* We mustn't issue any I2C transactions until the I2S
+        * clock is stable. Furthermore, we must allow a 5ms
+        * delay after the first set of register writes to
+        * allow the DSP to boot before configuring it.
+        */
+       usleep_range(5000, 10000);
+       send_cfg(rm, dsp_cfg_preboot, ARRAY_SIZE(dsp_cfg_preboot));
+       usleep_range(5000, 15000);
+       send_cfg(rm, tas5805m->dsp_cfg_data, tas5805m->dsp_cfg_len);
+
+       tas5805m->is_powered = true;
+       tas5805m_refresh(tas5805m);
+       mutex_unlock(&tas5805m->lock);
+}
 
-       default:
-               return -EINVAL;
+static int tas5805m_dac_event(struct snd_soc_dapm_widget *w,
+                             struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+       struct tas5805m_priv *tas5805m =
+               snd_soc_component_get_drvdata(component);
+       struct regmap *rm = tas5805m->regmap;
+
+       if (event & SND_SOC_DAPM_PRE_PMD) {
+               unsigned int chan, global1, global2;
+
+               dev_dbg(component->dev, "DSP shutdown\n");
+               cancel_work_sync(&tas5805m->work);
+
+               mutex_lock(&tas5805m->lock);
+               if (tas5805m->is_powered) {
+                       tas5805m->is_powered = false;
+
+                       regmap_write(rm, REG_PAGE, 0x00);
+                       regmap_write(rm, REG_BOOK, 0x00);
+
+                       regmap_read(rm, REG_CHAN_FAULT, &chan);
+                       regmap_read(rm, REG_GLOBAL_FAULT1, &global1);
+                       regmap_read(rm, REG_GLOBAL_FAULT2, &global2);
+
+                       dev_dbg(component->dev, "fault regs: CHAN=%02x, "
+                               "GLOBAL1=%02x, GLOBAL2=%02x\n",
+                               chan, global1, global2);
+
+                       regmap_write(rm, REG_DEVICE_CTRL_2, DCTRL2_MODE_HIZ);
+               }
+               mutex_unlock(&tas5805m->lock);
        }
 
        return 0;
@@ -354,7 +394,8 @@ static const struct snd_soc_dapm_route tas5805m_audio_map[] = {
 
 static const struct snd_soc_dapm_widget tas5805m_dapm_widgets[] = {
        SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
-       SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0,
+               tas5805m_dac_event, SND_SOC_DAPM_PRE_PMD),
        SND_SOC_DAPM_OUTPUT("OUT")
 };
 
@@ -375,11 +416,14 @@ static int tas5805m_mute(struct snd_soc_dai *dai, int mute, int direction)
        struct tas5805m_priv *tas5805m =
                snd_soc_component_get_drvdata(component);
 
+       mutex_lock(&tas5805m->lock);
        dev_dbg(component->dev, "set mute=%d (is_powered=%d)\n",
                mute, tas5805m->is_powered);
+
        tas5805m->is_muted = mute;
        if (tas5805m->is_powered)
-               tas5805m_refresh(component);
+               tas5805m_refresh(tas5805m);
+       mutex_unlock(&tas5805m->lock);
 
        return 0;
 }
@@ -434,6 +478,7 @@ static int tas5805m_i2c_probe(struct i2c_client *i2c)
        if (!tas5805m)
                return -ENOMEM;
 
+       tas5805m->i2c = i2c;
        tas5805m->pvdd = devm_regulator_get(dev, "pvdd");
        if (IS_ERR(tas5805m->pvdd)) {
                dev_err(dev, "failed to get pvdd supply: %ld\n",
@@ -507,6 +552,9 @@ static int tas5805m_i2c_probe(struct i2c_client *i2c)
        gpiod_set_value(tas5805m->gpio_pdn_n, 1);
        usleep_range(10000, 15000);
 
+       INIT_WORK(&tas5805m->work, do_work);
+       mutex_init(&tas5805m->lock);
+
        /* Don't register through devm. We need to be able to unregister
         * the component prior to deasserting PDN#
         */
@@ -527,6 +575,7 @@ static void tas5805m_i2c_remove(struct i2c_client *i2c)
        struct device *dev = &i2c->dev;
        struct tas5805m_priv *tas5805m = dev_get_drvdata(dev);
 
+       cancel_work_sync(&tas5805m->work);
        snd_soc_unregister_component(dev);
        gpiod_set_value(tas5805m->gpio_pdn_n, 0);
        usleep_range(10000, 15000);
index 1c9be8a..35a52c3 100644 (file)
@@ -1141,6 +1141,7 @@ static int fsl_sai_check_version(struct device *dev)
 
        sai->verid.version = val &
                (FSL_SAI_VERID_MAJOR_MASK | FSL_SAI_VERID_MINOR_MASK);
+       sai->verid.version >>= FSL_SAI_VERID_MINOR_SHIFT;
        sai->verid.feature = val & FSL_SAI_VERID_FEATURE_MASK;
 
        ret = regmap_read(sai->regmap, FSL_SAI_PARAM, &val);
index c3be24b..a79a2fb 100644 (file)
@@ -1401,13 +1401,17 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 
        template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
        kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
-       if (!kc)
+       if (!kc) {
+               ret = -ENOMEM;
                goto hdr_err;
+       }
 
        kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
                                     GFP_KERNEL);
-       if (!kcontrol_type)
+       if (!kcontrol_type) {
+               ret = -ENOMEM;
                goto hdr_err;
+       }
 
        for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
                control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
index 6bd2888..d5ccd4d 100644 (file)
@@ -318,7 +318,6 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
 {
        struct snd_sof_dev *sdev = context;
        const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
-       unsigned int base = desc->dsp_intr_base;
        unsigned int val, count = ACP_HW_SEM_RETRY_COUNT;
 
        val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat);
@@ -328,28 +327,20 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
                return IRQ_HANDLED;
        }
 
-       val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
-       if (val & ACP_DSP_TO_HOST_IRQ) {
-               while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
-                       /* Wait until acquired HW Semaphore lock or timeout */
-                       count--;
-                       if (!count) {
-                               dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
-                               return IRQ_NONE;
-                       }
+       while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
+               /* Wait until acquired HW Semaphore lock or timeout */
+               count--;
+               if (!count) {
+                       dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
+                       return IRQ_NONE;
                }
-
-               sof_ops(sdev)->irq_thread(irq, sdev);
-               val |= ACP_DSP_TO_HOST_IRQ;
-               snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
-
-               /* Unlock or Release HW Semaphore */
-               snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
-
-               return IRQ_HANDLED;
        }
 
-       return IRQ_NONE;
+       sof_ops(sdev)->irq_thread(irq, sdev);
+       /* Unlock or Release HW Semaphore */
+       snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
+
+       return IRQ_HANDLED;
 };
 
 static irqreturn_t acp_irq_handler(int irq, void *dev_id)
@@ -360,8 +351,11 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
        unsigned int val;
 
        val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
-       if (val)
+       if (val) {
+               val |= ACP_DSP_TO_HOST_IRQ;
+               snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
                return IRQ_WAKE_THREAD;
+       }
 
        return IRQ_NONE;
 }
index 8056422..0d6b82a 100644 (file)
@@ -349,6 +349,9 @@ int
 snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan,
                    int param)
 {
+       if (param >= ARRAY_SIZE(chan->control))
+               return -EINVAL;
+
        return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects),
                                     port, chan, param,
                                     chan->control[param],
index 85973e5..fdb7f5d 100644 (file)
@@ -15,10 +15,6 @@ bool mirrored_kernelcore = false;
 
 struct page {};
 
-void __free_pages_core(struct page *page, unsigned int order)
-{
-}
-
 void memblock_free_pages(struct page *page, unsigned long pfn,
                         unsigned int order)
 {
index 9c79bbc..aff0a59 100755 (executable)
@@ -246,7 +246,7 @@ test_vlan_ingress_modify()
        bridge vlan add dev $swp2 vid 300
 
        tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \
-               protocol 802.1Q flower skip_sw vlan_id 200 \
+               protocol 802.1Q flower skip_sw vlan_id 200 src_mac $h1_mac \
                action vlan modify id 300 \
                action goto chain $(IS2 0 0)
 
index 1c4f866..3d8e4eb 100755 (executable)
@@ -914,14 +914,14 @@ sysctl_set()
        local value=$1; shift
 
        SYSCTL_ORIG[$key]=$(sysctl -n $key)
-       sysctl -qw $key=$value
+       sysctl -qw $key="$value"
 }
 
 sysctl_restore()
 {
        local key=$1; shift
 
-       sysctl -qw $key=${SYSCTL_ORIG["$key"]}
+       sysctl -qw $key="${SYSCTL_ORIG[$key]}"
 }
 
 forwarding_enable()
index d11d3d5..079f8f4 100755 (executable)
@@ -498,6 +498,12 @@ kill_events_pids()
        kill_wait $evts_ns2_pid
 }
 
+kill_tests_wait()
+{
+       kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
+       wait
+}
+
 pm_nl_set_limits()
 {
        local ns=$1
@@ -1694,6 +1700,7 @@ chk_subflow_nr()
        local subflow_nr=$3
        local cnt1
        local cnt2
+       local dump_stats
 
        if [ -n "${need_title}" ]; then
                printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
@@ -1711,7 +1718,12 @@ chk_subflow_nr()
                echo "[ ok ]"
        fi
 
-       [ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
+       if [ "${dump_stats}" = 1 ]; then
+               ss -N $ns1 -tOni
+               ss -N $ns1 -tOni | grep token
+               ip -n $ns1 mptcp endpoint
+               dump_stats
+       fi
 }
 
 chk_link_usage()
@@ -3049,7 +3061,7 @@ endpoint_tests()
                pm_nl_set_limits $ns1 2 2
                pm_nl_set_limits $ns2 2 2
                pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 2>/dev/null &
 
                wait_mpj $ns1
                pm_nl_check_endpoint 1 "creation" \
@@ -3062,14 +3074,14 @@ endpoint_tests()
                pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
                pm_nl_check_endpoint 0 "modif is allowed" \
                        $ns2 10.0.2.2 id 1 flags signal
-               wait
+               kill_tests_wait
        fi
 
        if reset "delete and re-add"; then
                pm_nl_set_limits $ns1 1 1
                pm_nl_set_limits $ns2 1 1
                pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
-               run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
+               run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
 
                wait_mpj $ns2
                pm_nl_del_endpoint $ns2 2 10.0.2.2
@@ -3079,7 +3091,7 @@ endpoint_tests()
                pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
                wait_mpj $ns2
                chk_subflow_nr "" "after re-add" 2
-               wait
+               kill_tests_wait
        fi
 }
 
index 704997f..8c3ac0a 100755 (executable)
@@ -293,19 +293,11 @@ setup-vm() {
        elif [[ -n $vtype && $vtype == "vnifilterg" ]]; then
           # Add per vni group config with 'bridge vni' api
           if [ -n "$group" ]; then
-             if [ "$family" == "v4" ]; then
-                if [ $mcast -eq 1 ]; then
-                   bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
-                else
-                   bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
-                fi
-             else
-                if [ $mcast -eq 1 ]; then
-                   bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group6 $group
-                else
-                   bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote6 $group
-                fi
-             fi
+               if [ $mcast -eq 1 ]; then
+                       bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
+               else
+                       bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
+               fi
           fi
        fi
        done