OSDN Git Service

Merge branches 'pm-sleep', 'pm-domains', 'pm-opp' and 'powercap'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 26 Nov 2019 09:27:49 +0000 (10:27 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 26 Nov 2019 09:27:49 +0000 (10:27 +0100)
* pm-sleep:
  PM / wakeirq: remove unnecessary parentheses
  PM / core: Clean up some function headers in power.h
  PM / hibernate: memory_bm_find_bit(): Tighten node optimisation

* pm-domains:
  PM / Domains: Convert to dev_to_genpd_safe() in genpd_syscore_switch()
  mmc: tmio: Avoid boilerplate code in ->runtime_suspend()
  PM / Domains: Implement the ->start() callback for genpd
  PM / Domains: Introduce dev_pm_domain_start()

* pm-opp:
  PM / OPP: Support adjusting OPP voltages at runtime

* powercap:
  powercap/intel_rapl: add support for Cometlake desktop
  powercap/intel_rapl: add support for CometLake Mobile

1128 files changed:
.mailmap
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/hw-vuln/index.rst
Documentation/admin-guide/hw-vuln/multihit.rst [new file with mode: 0644]
Documentation/admin-guide/hw-vuln/tsx_async_abort.rst [new file with mode: 0644]
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/silicon-errata.rst
Documentation/devicetree/bindings/arm/omap/omap.txt
Documentation/devicetree/bindings/arm/rockchip.yaml
Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
Documentation/devicetree/bindings/devfreq/exynos-bus.txt
Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/networking/device_drivers/intel/e100.rst
Documentation/networking/device_drivers/intel/e1000.rst
Documentation/networking/device_drivers/intel/e1000e.rst
Documentation/networking/device_drivers/intel/fm10k.rst
Documentation/networking/device_drivers/intel/i40e.rst
Documentation/networking/device_drivers/intel/iavf.rst
Documentation/networking/device_drivers/intel/ice.rst
Documentation/networking/device_drivers/intel/igb.rst
Documentation/networking/device_drivers/intel/igbvf.rst
Documentation/networking/device_drivers/intel/ixgbe.rst
Documentation/networking/device_drivers/intel/ixgbevf.rst
Documentation/networking/device_drivers/pensando/ionic.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/tls-offload.rst
Documentation/x86/index.rst
Documentation/x86/tsx_async_abort.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/hsdk_defconfig
arch/arc/kernel/perf_event.c
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am3517_mt_ventoux.dts
arch/arm/boot/dts/am3874-iceboard.dts
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
arch/arm/boot/dts/imx6-logicpd-som.dtsi
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3-beagle.dts
arch/arm/boot/dts/omap3-cm-t3530.dts
arch/arm/boot/dts/omap3-cm-t3730.dts
arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap3-ha-lcd.dts
arch/arm/boot/dts/omap3-ha.dts
arch/arm/boot/dts/omap3-igep0020-rev-f.dts
arch/arm/boot/dts/omap3-igep0020.dts
arch/arm/boot/dts/omap3-igep0030-rev-g.dts
arch/arm/boot/dts/omap3-igep0030.dts
arch/arm/boot/dts/omap3-ldp.dts
arch/arm/boot/dts/omap3-lilly-a83x.dtsi
arch/arm/boot/dts/omap3-lilly-dbb056.dts
arch/arm/boot/dts/omap3-n9.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap3-n950.dts
arch/arm/boot/dts/omap3-overo-storm-alto35.dts
arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
arch/arm/boot/dts/omap3-overo-storm-palo35.dts
arch/arm/boot/dts/omap3-overo-storm-palo43.dts
arch/arm/boot/dts/omap3-overo-storm-summit.dts
arch/arm/boot/dts/omap3-overo-storm-tobi.dts
arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
arch/arm/boot/dts/omap3-pandora-1ghz.dts
arch/arm/boot/dts/omap3-sbc-t3530.dts
arch/arm/boot/dts/omap3-sbc-t3730.dts
arch/arm/boot/dts/omap3-sniper.dts
arch/arm/boot/dts/omap3-thunder.dts
arch/arm/boot/dts/omap3-zoom3.dts
arch/arm/boot/dts/omap3430-sdp.dts
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap36xx.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-sdp.dts
arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap54xx-clocks.dtsi
arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
arch/arm/boot/dts/stm32mp157c-ev1.dts
arch/arm/boot/dts/stm32mp157c.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
arch/arm/boot/dts/vf610-zii-scu4-aib.dts
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/domain.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/head-common.S
arch/arm/kernel/head-nommu.S
arch/arm/mach-davinci/dm365.c
arch/arm/mach-imx/cpuidle-imx6q.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-sunxi/mc_smp.c
arch/arm/mach-tegra/cpuidle-tegra20.c
arch/arm/mm/alignment.c
arch/arm/mm/proc-v7m.S
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/vdso/vsyscall.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kvm/sys_regs.c
arch/mips/bcm63xx/prom.c
arch/mips/include/asm/bmips.h
arch/mips/include/asm/vdso/gettimeofday.h
arch/mips/include/asm/vdso/vsyscall.h
arch/mips/kernel/smp-bmips.c
arch/mips/mm/tlbex.c
arch/mips/sgi-ip27/Kconfig
arch/mips/sgi-ip27/ip27-init.c
arch/mips/sgi-ip27/ip27-memory.c
arch/parisc/kernel/entry.S
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/elf.h
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/book3s_xive.h
arch/powerpc/kvm/book3s_xive_native.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/smp.c
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/irq.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/switch_to.h
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/head.h [new file with mode: 0644]
arch/riscv/kernel/irq.c
arch/riscv/kernel/module-sections.c
arch/riscv/kernel/process.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/reset.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/time.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso.c
arch/riscv/mm/context.c
arch/riscv/mm/fault.c
arch/riscv/mm/init.c
arch/riscv/mm/sifive_l2_cache.c
arch/s390/boot/startup.c
arch/s390/include/asm/unwind.h
arch/s390/kernel/idle.c
arch/s390/kernel/machine_kexec_reloc.c
arch/s390/kernel/unwind_bc.c
arch/s390/mm/cmm.c
arch/sparc/vdso/Makefile
arch/um/drivers/ubd_kern.c
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/vmware.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/tsx.c [new file with mode: 0644]
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/tsc.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/xen/enlighten_pv.c
block/bfq-iosched.c
block/bio.c
block/blk-cgroup.c
block/blk-iocost.c
drivers/acpi/nfit/core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/amba/bus.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libahci_platform.c
drivers/base/cpu.c
drivers/base/memory.c
drivers/base/power/common.c
drivers/base/power/domain.c
drivers/base/power/qos.c
drivers/block/drbd/drbd_main.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/block/rsxx/core.c
drivers/bus/ti-sysc.c
drivers/char/hw_random/core.c
drivers/char/random.c
drivers/clk/at91/clk-main.c
drivers/clk/at91/sam9x60.c
drivers/clk/at91/sckc.c
drivers/clk/clk-ast2600.c
drivers/clk/imx/clk-imx8mm.c
drivers/clk/imx/clk-imx8mn.c
drivers/clk/meson/g12a.c
drivers/clk/meson/gxbb.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-exynos5433.c
drivers/clk/sunxi-ng/ccu-sun9i-a80.c
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/clkctrl.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/timer-mediatek.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/arm_big_little.c [deleted file]
drivers/cpufreq/arm_big_little.h [deleted file]
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/imx-cpufreq-dt.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/cpufreq/scpi-cpufreq.c
drivers/cpufreq/sun50i-cpufreq-nvmem.c
drivers/cpufreq/ti-cpufreq.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/cpuidle/cpuidle-haltpoll.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/driver.c
drivers/cpuidle/governor.c
drivers/cpuidle/governors/haltpoll.c
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/menu.c
drivers/cpuidle/governors/teo.c
drivers/cpuidle/poll_state.c
drivers/cpuidle/sysfs.c
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/devfreq/devfreq.c
drivers/devfreq/event/exynos-ppmu.c
drivers/devfreq/governor.h
drivers/devfreq/tegra30-devfreq.c
drivers/dma/imx-sdma.c
drivers/dma/qcom/bam_dma.c
drivers/dma/sprd-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/cppi41.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/ghes_edac.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/test/efi_test.c
drivers/firmware/efi/tpm.c
drivers/gpio/gpio-merrifield.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_self_refresh_helper.c
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/i915/display/intel_crt.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_mocs.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_getparam.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_pm.h
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/v3d/v3d_gem.c
drivers/hid/hid-axff.c
drivers/hid/hid-core.c
drivers/hid/hid-dr.c
drivers/hid/hid-emsff.c
drivers/hid/hid-gaff.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-holtekff.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg2ff.c
drivers/hid/hid-lg3ff.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-lgff.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-microsoft.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-sony.c
drivers/hid/hid-tmff.c
drivers/hid/hid-zpff.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
drivers/hid/wacom.h
drivers/hid/wacom_wac.c
drivers/hwmon/ina3221.c
drivers/hwmon/nct7904.c
drivers/hwtracing/intel_th/gth.c
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/i2c-core-acpi.c
drivers/i2c/i2c-core-of.c
drivers/iio/adc/stm32-adc.c
drivers/iio/imu/adis16480.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
drivers/iio/proximity/srf04.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/tid_rdma.h
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/hns_roce_hem.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_srq.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/infiniband/sw/siw/siw_verbs.c
drivers/input/ff-memless.c
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_f11.c
drivers/input/rmi4/rmi_f12.c
drivers/input/rmi4/rmi_f54.c
drivers/input/touchscreen/cyttsp4_core.c
drivers/input/touchscreen/st1232.c
drivers/interconnect/core.c
drivers/interconnect/qcom/qcs404.c
drivers/interconnect/qcom/sdm845.c
drivers/iommu/amd_iommu_quirks.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-sifive-plic.c
drivers/isdn/capi/capi.c
drivers/macintosh/windfarm_cpufreq_clamp.c
drivers/mfd/mt6397-core.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/tmio_mmc.h
drivers/mmc/host/tmio_mmc_core.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/rx-offload.c
drivers/net/can/slcan.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/mcba_usb.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/usb_8dev.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6xxx/ptp.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/cortina/gemini.h
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
drivers/net/ethernet/freescale/dpaa2/dprtc.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/marvell/mvneta_bm.h
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
drivers/net/ethernet/marvell/octeontx2/af/common.h
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
drivers/net/ethernet/marvell/octeontx2/af/npc.h
drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/microchip/lan743x_ptp.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot.h
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/fjes/fjes_main.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/netdevsim/dev.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phylink.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/slip/slip.c
drivers/net/team/team.c
drivers/net/usb/ax88172a.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/op-rfkill.c
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/mediatek/mt76/pci.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/virt_wifi.c
drivers/nfc/fdp/i2c.c
drivers/nfc/nxp-nci/i2c.c
drivers/nfc/st21nfca/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/of/of_reserved_mem.c
drivers/of/unittest.c
drivers/opp/core.c
drivers/opp/of.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinmux-aspeed.h
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/bcm/pinctrl-ns2-mux.c
drivers/pinctrl/berlin/pinctrl-as370.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-stmfx.c
drivers/powercap/intel_rapl_common.c
drivers/ptp/ptp_chardev.c
drivers/pwm/core.c
drivers/pwm/pwm-bcm-iproc.c
drivers/regulator/core.c
drivers/regulator/da9062-regulator.c
drivers/regulator/fixed.c
drivers/regulator/lochnagar-regulator.c
drivers/regulator/of_regulator.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/regulator/ti-abb-regulator.c
drivers/reset/core.c
drivers/s390/crypto/zcrypt_api.c
drivers/scsi/Kconfig
drivers/scsi/ch.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/sni_53c710.c
drivers/scsi/ufs/ufs_bsg.c
drivers/soc/imx/gpc.c
drivers/soc/imx/soc-imx-scu.c
drivers/soundwire/Kconfig
drivers/soundwire/intel.c
drivers/soundwire/slave.c
drivers/staging/wlan-ng/cfg80211.c
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/target_core_device.c
drivers/thermal/cpu_cooling.c
drivers/thunderbolt/nhi_ops.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250_men_mcb.c
drivers/usb/cdns3/core.c
drivers/usb/cdns3/gadget.c
drivers/usb/cdns3/host-export.h
drivers/usb/cdns3/host.c
drivers/usb/class/usblp.c
drivers/usb/core/config.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/fsl_udc_core.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/xhci-debugfs.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/ldusb.c
drivers/usb/misc/legousbtower.c
drivers/usb/mtu3/mtu3_core.c
drivers/usb/renesas_usbhs/common.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/whiteheat.c
drivers/usb/serial/whiteheat.h
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/usbip/vhci_tx.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vringh.c
drivers/video/fbdev/c2p_core.h
drivers/virtio/virtio_ring.c
drivers/watchdog/bd70528_wdt.c
drivers/watchdog/cpwd.c
drivers/watchdog/imx_sc_wdt.c
drivers/watchdog/meson_gxbb_wdt.c
drivers/watchdog/pm8916_wdt.c
fs/afs/dir.c
fs/aio.c
fs/autofs/expire.c
fs/btrfs/block-group.c
fs/btrfs/ctree.h
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/space-info.c
fs/btrfs/tree-checker.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/super.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.h
fs/cifs/transport.c
fs/configfs/symlink.c
fs/dax.c
fs/ecryptfs/inode.c
fs/exportfs/expfs.c
fs/fs-writeback.c
fs/fuse/Makefile
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/virtio_fs.c
fs/gfs2/ops_fstype.c
fs/io_uring.c
fs/namespace.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/nfs4proc.c
fs/ocfs2/file.c
include/acpi/processor.h
include/asm-generic/vdso/vsyscall.h
include/drm/drm_gem_shmem_helper.h
include/drm/drm_self_refresh_helper.h
include/dt-bindings/pmu/exynos_ppmu.h [new file with mode: 0644]
include/linux/bpf.h
include/linux/can/core.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/dynamic_debug.h
include/linux/efi.h
include/linux/export.h
include/linux/filter.h
include/linux/gfp.h
include/linux/idr.h
include/linux/if_macvlan.h
include/linux/if_team.h
include/linux/if_vlan.h
include/linux/intel-iommu.h
include/linux/kvm_host.h
include/linux/memory.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/netdevice.h
include/linux/page-flags.h
include/linux/perf_event.h
include/linux/platform_data/dma-imx-sdma.h
include/linux/pm.h
include/linux/pm_domain.h
include/linux/pm_opp.h
include/linux/pm_qos.h
include/linux/radix-tree.h
include/linux/reset-controller.h
include/linux/reset.h
include/linux/security.h
include/linux/skbuff.h
include/linux/skmsg.h
include/linux/socket.h
include/linux/sunrpc/bc_xprt.h
include/linux/sysfs.h
include/linux/virtio_vsock.h
include/net/bonding.h
include/net/busy_poll.h
include/net/devlink.h
include/net/flow_dissector.h
include/net/fq.h
include/net/fq_impl.h
include/net/hwbm.h
include/net/ip.h
include/net/ip_vs.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/net/sock.h
include/net/tls.h
include/net/vxlan.h
include/rdma/ib_verbs.h
include/sound/simple_card_utils.h
include/trace/events/btrfs.h
include/trace/events/tcp.h
include/uapi/linux/can.h
include/uapi/linux/can/bcm.h
include/uapi/linux/can/error.h
include/uapi/linux/can/gw.h
include/uapi/linux/can/j1939.h
include/uapi/linux/can/netlink.h
include/uapi/linux/can/raw.h
include/uapi/linux/can/vxcan.h
include/uapi/linux/devlink.h
include/uapi/linux/fuse.h
include/uapi/linux/nvme_ioctl.h
include/uapi/linux/ptp_clock.h
include/uapi/linux/sched.h
kernel/audit_watch.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/syscall.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/cpu.c
kernel/events/core.c
kernel/fork.c
kernel/irq/irqdomain.c
kernel/power/qos.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c
kernel/sched/topology.c
kernel/signal.c
kernel/stacktrace.c
kernel/time/ntp.c
kernel/time/posix-cpu-timers.c
kernel/time/sched_clock.c
kernel/time/vsyscall.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events_hist.c
lib/Kconfig
lib/dump_stack.c
lib/idr.c
lib/radix-tree.c
lib/test_xarray.c
lib/vdso/gettimeofday.c
lib/xarray.c
lib/xz/xz_dec_lzma2.c
mm/debug.c
mm/hugetlb_cgroup.c
mm/khugepaged.c
mm/madvise.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/page_io.c
mm/slab.h
mm/slub.c
mm/vmstat.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/atm/common.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/hard-interface.c
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bridge/br_device.c
net/bridge/netfilter/ebt_dnat.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/caif/caif_socket.c
net/can/af_can.c
net/can/j1939/main.c
net/can/j1939/socket.c
net/can/j1939/transport.c
net/core/datagram.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/devlink.c
net/core/ethtool.c
net/core/flow_dissector.c
net/core/lwt_bpf.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/sock.c
net/dccp/ipv4.c
net/decnet/af_decnet.c
net/dsa/master.c
net/dsa/slave.c
net/dsa/tag_8021q.c
net/ieee802154/6lowpan/core.c
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf_core.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_gre.c
net/ipv6/route.c
net/ipv6/seg6_local.c
net/ipv6/udp.c
net/l2tp/l2tp_eth.c
net/mac80211/main.c
net/mac80211/sta_info.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ipmac.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_pe.c
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nft_bitwise.c
net/netfilter/nft_cmp.c
net/netfilter/nft_payload.c
net/netrom/af_netrom.c
net/nfc/llcp_sock.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/openvswitch/vport-internal_dev.c
net/phonet/socket.c
net/rds/ib_cm.c
net/rose/af_rose.c
net/rxrpc/ar-internal.h
net/rxrpc/recvmsg.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_pnet.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/backchannel.c
net/tipc/core.c
net/tipc/core.h
net/tipc/socket.c
net/tls/tls_device.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/chan.c
net/wireless/nl80211.c
net/wireless/util.c
net/xdp/xdp_umem.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_state.c
samples/bpf/Makefile
scripts/gdb/linux/symbols.py
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/nsdeps
scripts/tools-support-relr.sh
security/lockdown/lockdown.c
sound/core/compress_offload.c
sound/core/pcm_lib.c
sound/core/timer.c
sound/firewire/bebob/bebob_focusrite.c
sound/firewire/bebob/bebob_stream.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/max98373.c
sound/soc/codecs/msm8916-wcd-analog.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rt5651.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/rockchip/rockchip_max98090.c
sound/soc/samsung/arndale_rt5631.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/dma.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/control.c
sound/soc/sof/debug.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/bdw.c
sound/soc/sof/intel/byt.c
sound/soc/sof/intel/hda-ctrl.c
sound/soc/sof/intel/hda-loader.c
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/hda.h
sound/soc/sof/ipc.c
sound/soc/sof/loader.c
sound/soc/sof/pcm.c
sound/soc/sof/topology.c
sound/soc/stm/stm32_sai_sub.c
sound/soc/ti/sdma-pcm.c
sound/usb/endpoint.c
sound/usb/mixer.c
sound/usb/quirks.c
sound/usb/validate.c
tools/arch/x86/include/uapi/asm/svm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/gpio/Makefile
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/sched.h
tools/perf/builtin-c2c.c
tools/perf/builtin-kmem.c
tools/perf/jvmti/Build
tools/perf/perf-sys.h
tools/perf/util/annotate.c
tools/perf/util/copyfile.c
tools/perf/util/evlist.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event.h
tools/perf/util/util.c
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_sysctl.c
tools/testing/selftests/bpf/test_tc_edt.sh
tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/assert.c
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/sync_regs_test.c
tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/l2tp.sh [changed mode: 0644->0755]
tools/testing/selftests/net/reuseport_dualstack.c
tools/testing/selftests/net/tls.c
tools/testing/selftests/ptp/testptp.c
tools/testing/selftests/vm/gup_benchmark.c
tools/usb/usbip/libsrc/usbip_device_driver.c
virt/kvm/arm/pmu.c
virt/kvm/kvm_main.c

index edcac87..fd62192 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -108,6 +108,10 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
 Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
+Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
+Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
+Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
+Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
 Jean Tourrilhes <jt@hpl.hp.com>
 <jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
@@ -196,7 +200,8 @@ Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
 Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
-Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
 Peter A Jonsson <pj@ludd.ltu.se>
 Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
@@ -229,6 +234,7 @@ Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
 Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
 Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
+Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
index 06d0931..fc20cde 100644 (file)
@@ -486,6 +486,8 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
                /sys/devices/system/cpu/vulnerabilities/l1tf
                /sys/devices/system/cpu/vulnerabilities/mds
+               /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+               /sys/devices/system/cpu/vulnerabilities/itlb_multihit
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
index 49311f3..0795e3c 100644 (file)
@@ -12,3 +12,5 @@ are configurable at compile, boot or run time.
    spectre
    l1tf
    mds
+   tsx_async_abort
+   multihit.rst
diff --git a/Documentation/admin-guide/hw-vuln/multihit.rst b/Documentation/admin-guide/hw-vuln/multihit.rst
new file mode 100644 (file)
index 0000000..ba9988d
--- /dev/null
@@ -0,0 +1,163 @@
+iTLB multihit
+=============
+
+iTLB multihit is an erratum where some processors may incur a machine check
+error, possibly resulting in an unrecoverable CPU lockup, when an
+instruction fetch hits multiple entries in the instruction TLB. This can
+occur when the page size is changed along with either the physical address
+or cache type. A malicious guest running on a virtualized system can
+exploit this erratum to perform a denial of service attack.
+
+
+Affected processors
+-------------------
+
+Variations of this erratum are present on most Intel Core and Xeon processor
+models. The erratum is not present on:
+
+   - non-Intel processors
+
+   - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont)
+
+   - Intel processors that have the PSCHANGE_MC_NO bit set in the
+     IA32_ARCH_CAPABILITIES MSR.
+
+
+Related CVEs
+------------
+
+The following CVE entry is related to this issue:
+
+   ==============  =================================================
+   CVE-2018-12207  Machine Check Error Avoidance on Page Size Change
+   ==============  =================================================
+
+
+Problem
+-------
+
+Privileged software, including OS and virtual machine managers (VMM), are in
+charge of memory management. A key component in memory management is the control
+of the page tables. Modern processors use virtual memory, a technique that creates
+the illusion of a very large memory for processors. This virtual space is split
+into pages of a given size. Page tables translate virtual addresses to physical
+addresses.
+
+To reduce latency when performing a virtual to physical address translation,
+processors include a structure, called TLB, that caches recent translations.
+There are separate TLBs for instruction (iTLB) and data (dTLB).
+
+Under this errata, instructions are fetched from a linear address translated
+using a 4 KB translation cached in the iTLB. Privileged software modifies the
+paging structure so that the same linear address using large page size (2 MB, 4
+MB, 1 GB) with a different physical address or memory type.  After the page
+structure modification but before the software invalidates any iTLB entries for
+the linear address, a code fetch that happens on the same linear address may
+cause a machine-check error which can result in a system hang or shutdown.
+
+
+Attack scenarios
+----------------
+
+Attacks against the iTLB multihit erratum can be mounted from malicious
+guests in a virtualized system.
+
+
+iTLB multihit system information
+--------------------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current iTLB
+multihit status of the system:whether the system is vulnerable and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/itlb_multihit
+
+The possible values in this file are:
+
+.. list-table::
+
+     * - Not affected
+       - The processor is not vulnerable.
+     * - KVM: Mitigation: Split huge pages
+       - Software changes mitigate this issue.
+     * - KVM: Vulnerable
+       - The processor is vulnerable, but no mitigation enabled
+
+
+Enumeration of the erratum
+--------------------------------
+
+A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr
+and will be set on CPU's which are mitigated against this issue.
+
+   =======================================   ===========   ===============================
+   IA32_ARCH_CAPABILITIES MSR                Not present   Possibly vulnerable,check model
+   IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO]    '0'           Likely vulnerable,check model
+   IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO]    '1'           Not vulnerable
+   =======================================   ===========   ===============================
+
+
+Mitigation mechanism
+-------------------------
+
+This erratum can be mitigated by restricting the use of large page sizes to
+non-executable pages.  This forces all iTLB entries to be 4K, and removes
+the possibility of multiple hits.
+
+In order to mitigate the vulnerability, KVM initially marks all huge pages
+as non-executable. If the guest attempts to execute in one of those pages,
+the page is broken down into 4K pages, which are then marked executable.
+
+If EPT is disabled or not available on the host, KVM is in control of TLB
+flushes and the problematic situation cannot happen.  However, the shadow
+EPT paging mechanism used by nested virtualization is vulnerable, because
+the nested guest can trigger multiple iTLB hits by modifying its own
+(non-nested) page tables.  For simplicity, KVM will make large pages
+non-executable in all shadow paging modes.
+
+Mitigation control on the kernel command line and KVM - module parameter
+------------------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism for marking huge pages as
+non-executable can be controlled with a module parameter "nx_huge_pages=".
+The kernel command line allows to control the iTLB multihit mitigations at
+boot time with the option "kvm.nx_huge_pages=".
+
+The valid arguments for these options are:
+
+  ==========  ================================================================
+  force       Mitigation is enabled. In this case, the mitigation implements
+              non-executable huge pages in Linux kernel KVM module. All huge
+              pages in the EPT are marked as non-executable.
+              If a guest attempts to execute in one of those pages, the page is
+              broken down into 4K pages, which are then marked executable.
+
+  off        Mitigation is disabled.
+
+  auto        Enable mitigation only if the platform is affected and the kernel
+              was not booted with the "mitigations=off" command line parameter.
+             This is the default option.
+  ==========  ================================================================
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The system is protected by the kernel unconditionally and no further
+   action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   If the guest comes from a trusted source, you may assume that the guest will
+   not attempt to maliciously exploit these errata and no further action is
+   required.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   If the guest comes from an untrusted source, the guest host kernel will need
+   to apply iTLB multihit mitigation via the kernel command line or kvm
+   module parameter.
diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
new file mode 100644 (file)
index 0000000..fddbd75
--- /dev/null
@@ -0,0 +1,276 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TAA - TSX Asynchronous Abort
+======================================
+
+TAA is a hardware vulnerability that allows unprivileged speculative access to
+data which is available in various CPU internal buffers by using asynchronous
+aborts within an Intel TSX transactional region.
+
+Affected processors
+-------------------
+
+This vulnerability only affects Intel processors that support Intel
+Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8)
+is 0 in the IA32_ARCH_CAPABILITIES MSR.  On processors where the MDS_NO bit
+(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations
+also mitigate against TAA.
+
+Whether a processor is affected or not can be read out from the TAA
+vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entry is related to this TAA issue:
+
+   ==============  =====  ===================================================
+   CVE-2019-11135  TAA    TSX Asynchronous Abort (TAA) condition on some
+                          microprocessors utilizing speculative execution may
+                          allow an authenticated user to potentially enable
+                          information disclosure via a side channel with
+                          local access.
+   ==============  =====  ===================================================
+
+Problem
+-------
+
+When performing store, load or L1 refill operations, processors write
+data into temporary microarchitectural structures (buffers). The data in
+those buffers can be forwarded to load operations as an optimization.
+
+Intel TSX is an extension to the x86 instruction set architecture that adds
+hardware transactional memory support to improve performance of multi-threaded
+software. TSX lets the processor expose and exploit concurrency hidden in an
+application due to dynamically avoiding unnecessary synchronization.
+
+TSX supports atomic memory transactions that are either committed (success) or
+aborted. During an abort, operations that happened within the transactional region
+are rolled back. An asynchronous abort takes place, among other options, when a
+different thread accesses a cache line that is also used within the transactional
+region when that access might lead to a data race.
+
+Immediately after an uncompleted asynchronous abort, certain speculatively
+executed loads may read data from those internal buffers and pass it to dependent
+operations. This can be then used to infer the value via a cache side channel
+attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+The victim of a malicious actor does not need to make use of TSX. Only the
+attacker needs to begin a TSX transaction and raise an asynchronous abort
+which in turn potenitally leaks data stored in the buffers.
+
+More detailed technical information is available in the TAA specific x86
+architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the TAA vulnerability can be implemented from unprivileged
+applications running on hosts or guests.
+
+As for MDS, the attacker has no control over the memory addresses that can
+be leaked. Only the victim is responsible for bringing data to the CPU. As
+a result, the malicious actor has to sample as much data as possible and
+then postprocess it to try to infer any useful information from it.
+
+A potential attacker only has read access to the data. Also, there is no direct
+privilege escalation by using this technique.
+
+
+.. _tsx_async_abort_sys_info:
+
+TAA system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current TAA status
+of mitigated systems. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+
+The possible values in this file are:
+
+.. list-table::
+
+   * - 'Vulnerable'
+     - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied.
+   * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+     - The system tries to clear the buffers but the microcode might not support the operation.
+   * - 'Mitigation: Clear CPU buffers'
+     - The microcode has been updated to clear the buffers. TSX is still enabled.
+   * - 'Mitigation: TSX disabled'
+     - TSX is disabled.
+   * - 'Not affected'
+     - The CPU is not affected by this issue.
+
+.. _ucode_needed:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the processor is vulnerable, but the availability of the microcode-based
+mitigation mechanism is not advertised via CPUID the kernel selects a best
+effort mitigation mode.  This mode invokes the mitigation instructions
+without a guarantee that they clear the CPU buffers.
+
+This is done to address virtualization scenarios where the host has the
+microcode update applied, but the hypervisor is not yet updated to expose the
+CPUID to the guest. If the host has updated microcode the protection takes
+effect; otherwise a few CPU cycles are wasted pointlessly.
+
+The state in the tsx_async_abort sysfs file reflects this situation
+accordingly.
+
+
+Mitigation mechanism
+--------------------
+
+The kernel detects the affected CPUs and the presence of the microcode which is
+required. If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default.
+
+
+The mitigation can be controlled at boot time via a kernel command line option.
+See :ref:`taa_mitigation_control_command_line`.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Affected systems where the host has TAA microcode and TAA is mitigated by
+having disabled TSX previously, are not vulnerable regardless of the status
+of the VMs.
+
+In all other cases, if the host either does not have the TAA microcode or
+the kernel is not mitigated, the system might be vulnerable.
+
+
+.. _taa_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the TAA mitigations at boot time with
+the option "tsx_async_abort=". The valid arguments for this option are:
+
+  ============  =============================================================
+  off          This option disables the TAA mitigation on affected platforms.
+                If the system has TSX enabled (see next parameter) and the CPU
+                is affected, the system is vulnerable.
+
+  full         TAA mitigation is enabled. If TSX is enabled, on an affected
+                system it will clear CPU buffers on ring transitions. On
+                systems which are MDS-affected and deploy MDS mitigation,
+                TAA is also mitigated. Specifying this option on those
+                systems will have no effect.
+
+  full,nosmt    The same as tsx_async_abort=full, with SMT disabled on
+                vulnerable CPUs that have TSX enabled. This is the complete
+                mitigation. When TSX is disabled, SMT is not disabled because
+                CPU is not vulnerable to cross-thread TAA attacks.
+  ============  =============================================================
+
+Not specifying this option is equivalent to "tsx_async_abort=full".
+
+The kernel command line also allows to control the TSX feature using the
+parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
+to control the TSX feature and the enumeration of the TSX feature bits (RTM
+and HLE) in CPUID.
+
+The valid options are:
+
+  ============  =============================================================
+  off          Disables TSX on the system.
+
+                Note that this option takes effect only on newer CPUs which are
+                not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1
+                and which get the new IA32_TSX_CTRL MSR through a microcode
+                update. This new MSR allows for the reliable deactivation of
+                the TSX functionality.
+
+  on           Enables TSX.
+
+                Although there are mitigations for all known security
+                vulnerabilities, TSX has been known to be an accelerator for
+                several previous speculation-related CVEs, and so there may be
+                unknown security risks associated with leaving it enabled.
+
+  auto         Disables TSX if X86_BUG_TAA is present, otherwise enables TSX
+                on the system.
+  ============  =============================================================
+
+Not specifying this option is equivalent to "tsx=off".
+
+The following combinations of the "tsx_async_abort" and "tsx" are possible. For
+affected platforms tsx=auto is equivalent to tsx=off and the result will be:
+
+  =========  ==========================   =========================================
+  tsx=on     tsx_async_abort=full         The system will use VERW to clear CPU
+                                          buffers. Cross-thread attacks are still
+                                         possible on SMT machines.
+  tsx=on     tsx_async_abort=full,nosmt   As above, cross-thread attacks on SMT
+                                          mitigated.
+  tsx=on     tsx_async_abort=off          The system is vulnerable.
+  tsx=off    tsx_async_abort=full         TSX might be disabled if microcode
+                                          provides a TSX control MSR. If so,
+                                         system is not vulnerable.
+  tsx=off    tsx_async_abort=full,nosmt   Ditto
+  tsx=off    tsx_async_abort=off          ditto
+  =========  ==========================   =========================================
+
+
+For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU
+buffers.  For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0)
+"tsx" command line argument has no effect.
+
+For the affected platforms below table indicates the mitigation status for the
+combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO
+and TSX_CTRL_MSR.
+
+  =======  =========  =============  ========================================
+  MDS_NO   MD_CLEAR   TSX_CTRL_MSR   Status
+  =======  =========  =============  ========================================
+    0          0            0        Vulnerable (needs microcode)
+    0          1            0        MDS and TAA mitigated via VERW
+    1          1            0        MDS fixed, TAA vulnerable if TSX enabled
+                                     because MD_CLEAR has no meaning and
+                                     VERW is not guaranteed to clear buffers
+    1          X            1        MDS fixed, TAA can be mitigated by
+                                     VERW or TSX_CTRL_MSR
+  =======  =========  =============  ========================================
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If all user space applications are from a trusted source and do not execute
+untrusted code which is supplied externally, then the mitigation can be
+disabled. The same applies to virtualized environments with trusted guests.
+
+
+2. Untrusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If there are untrusted applications or guests on the system, enabling TSX
+might allow a malicious actor to leak data from the host or from other
+processes running on the same physical core.
+
+If the microcode is available and the TSX is disabled on the host, attacks
+are prevented in a virtualized environment as well, even if the VMs do not
+explicitly enable the mitigation.
+
+
+.. _taa_default_mitigations:
+
+Default mitigations
+-------------------
+
+The kernel's default action for vulnerable processors is:
+
+  - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off).
index a84a83f..8dee8f6 100644 (file)
                        KVM MMU at runtime.
                        Default is 0 (off)
 
+       kvm.nx_huge_pages=
+                       [KVM] Controls the software workaround for the
+                       X86_BUG_ITLB_MULTIHIT bug.
+                       force   : Always deploy workaround.
+                       off     : Never deploy workaround.
+                       auto    : Deploy workaround based on the presence of
+                                 X86_BUG_ITLB_MULTIHIT.
+
+                       Default is 'auto'.
+
+                       If the software workaround is enabled for the host,
+                       guests do need not to enable it for nested guests.
+
+       kvm.nx_huge_pages_recovery_ratio=
+                       [KVM] Controls how many 4KiB pages are periodically zapped
+                       back to huge pages.  0 disables the recovery, otherwise if
+                       the value is N KVM will zap 1/Nth of the 4KiB pages every
+                       minute.  The default is 60.
+
        kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
                        Default is 1 (enabled)
 
                                               ssbd=force-off [ARM64]
                                               l1tf=off [X86]
                                               mds=off [X86]
+                                              tsx_async_abort=off [X86]
+                                              kvm.nx_huge_pages=off [X86]
+
+                               Exceptions:
+                                              This does not have any effect on
+                                              kvm.nx_huge_pages when
+                                              kvm.nx_huge_pages=force.
 
                        auto (default)
                                Mitigate all CPU vulnerabilities, but leave SMT
                                be fully mitigated, even if it means losing SMT.
                                Equivalent to: l1tf=flush,nosmt [X86]
                                               mds=full,nosmt [X86]
+                                              tsx_async_abort=full,nosmt [X86]
 
        mminit_loglevel=
                        [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
                        interruptions from clocksource watchdog are not
                        acceptable).
 
+       tsx=            [X86] Control Transactional Synchronization
+                       Extensions (TSX) feature in Intel processors that
+                       support TSX control.
+
+                       This parameter controls the TSX feature. The options are:
+
+                       on      - Enable TSX on the system. Although there are
+                               mitigations for all known security vulnerabilities,
+                               TSX has been known to be an accelerator for
+                               several previous speculation-related CVEs, and
+                               so there may be unknown security risks associated
+                               with leaving it enabled.
+
+                       off     - Disable TSX on the system. (Note that this
+                               option takes effect only on newer CPUs which are
+                               not vulnerable to MDS, i.e., have
+                               MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get
+                               the new IA32_TSX_CTRL MSR through a microcode
+                               update. This new MSR allows for the reliable
+                               deactivation of the TSX functionality.)
+
+                       auto    - Disable TSX if X86_BUG_TAA is present,
+                                 otherwise enable TSX on the system.
+
+                       Not specifying this option is equivalent to tsx=off.
+
+                       See Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
+                       for more details.
+
+       tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async
+                       Abort (TAA) vulnerability.
+
+                       Similar to Micro-architectural Data Sampling (MDS)
+                       certain CPUs that support Transactional
+                       Synchronization Extensions (TSX) are vulnerable to an
+                       exploit against CPU internal buffers which can forward
+                       information to a disclosure gadget under certain
+                       conditions.
+
+                       In vulnerable processors, the speculatively forwarded
+                       data can be used in a cache side channel attack, to
+                       access data to which the attacker does not have direct
+                       access.
+
+                       This parameter controls the TAA mitigation.  The
+                       options are:
+
+                       full       - Enable TAA mitigation on vulnerable CPUs
+                                    if TSX is enabled.
+
+                       full,nosmt - Enable TAA mitigation and disable SMT on
+                                    vulnerable CPUs. If TSX is disabled, SMT
+                                    is not disabled because CPU is not
+                                    vulnerable to cross-thread TAA attacks.
+                       off        - Unconditionally disable TAA mitigation
+
+                       Not specifying this option is equivalent to
+                       tsx_async_abort=full.  On CPUs which are MDS affected
+                       and deploy MDS mitigation, TAA mitigation is not
+                       required and doesn't provide any additional
+                       mitigation.
+
+                       For details see:
+                       Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
+
        turbografx.map[2|3]=    [HW,JOY]
                        TurboGraFX parallel port interface
                        Format:
index ab7ed2f..5a09661 100644 (file)
@@ -91,6 +91,11 @@ stable kernels.
 | ARM            | MMU-500         | #841119,826419  | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_845719        |
++----------------+-----------------+-----------------+-----------------------------+
+| Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_843419        |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX ITS    | #22375,24313    | CAVIUM_ERRATUM_22375        |
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144        |
@@ -126,7 +131,7 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 +----------------+-----------------+-----------------+-----------------------------+
-| Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
+| Qualcomm Tech. | Kryo/Falkor v1  | E1009           | QCOM_FALKOR_ERRATUM_1009    |
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
 +----------------+-----------------+-----------------+-----------------------------+
index b301f75..e77635c 100644 (file)
@@ -43,7 +43,7 @@ SoC Families:
 
 - OMAP2 generic - defaults to OMAP2420
   compatible = "ti,omap2"
-- OMAP3 generic - defaults to OMAP3430
+- OMAP3 generic
   compatible = "ti,omap3"
 - OMAP4 generic - defaults to OMAP4430
   compatible = "ti,omap4"
@@ -51,6 +51,8 @@ SoC Families:
   compatible = "ti,omap5"
 - DRA7 generic - defaults to DRA742
   compatible = "ti,dra7"
+- AM33x generic
+  compatible = "ti,am33xx"
 - AM43x generic - defaults to AM4372
   compatible = "ti,am43"
 
@@ -63,12 +65,14 @@ SoCs:
 
 - OMAP3430
   compatible = "ti,omap3430", "ti,omap3"
+  legacy: "ti,omap34xx" - please do not use any more
 - AM3517
   compatible = "ti,am3517", "ti,omap3"
 - OMAP3630
-  compatible = "ti,omap36xx", "ti,omap3"
-- AM33xx
-  compatible = "ti,am33xx", "ti,omap3"
+  compatible = "ti,omap3630", "ti,omap3"
+  legacy: "ti,omap36xx" - please do not use any more
+- AM335x
+  compatible = "ti,am33xx"
 
 - OMAP4430
   compatible = "ti,omap4430", "ti,omap4"
@@ -110,19 +114,19 @@ SoCs:
 - AM4372
   compatible = "ti,am4372", "ti,am43"
 
-Boards:
+Boards (incomplete list of examples):
 
 - OMAP3 BeagleBoard : Low cost community board
-  compatible = "ti,omap3-beagle", "ti,omap3"
+  compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
 
 - OMAP3 Tobi with Overo : Commercial expansion board with daughter board
-  compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
+  compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
 
 - OMAP4 SDP : Software Development Board
-  compatible = "ti,omap4-sdp", "ti,omap4430"
+  compatible = "ti,omap4-sdp", "ti,omap4430", "ti,omap4"
 
 - OMAP4 PandaBoard : Low cost community board
-  compatible = "ti,omap4-panda", "ti,omap4430"
+  compatible = "ti,omap4-panda", "ti,omap4430", "ti,omap4"
 
 - OMAP4 DuoVero with Parlor : Commercial expansion board with daughter board
   compatible = "gumstix,omap4-duovero-parlor", "gumstix,omap4-duovero", "ti,omap4430", "ti,omap4";
@@ -134,16 +138,16 @@ Boards:
   compatible = "variscite,var-dvk-om44", "variscite,var-som-om44", "ti,omap4460", "ti,omap4";
 
 - OMAP3 EVM : Software Development Board for OMAP35x, AM/DM37x
-  compatible = "ti,omap3-evm", "ti,omap3"
+  compatible = "ti,omap3-evm", "ti,omap3630", "ti,omap3"
 
 - AM335X EVM : Software Development Board for AM335x
-  compatible = "ti,am335x-evm", "ti,am33xx", "ti,omap3"
+  compatible = "ti,am335x-evm", "ti,am33xx"
 
 - AM335X Bone : Low cost community board
-  compatible = "ti,am335x-bone", "ti,am33xx", "ti,omap3"
+  compatible = "ti,am335x-bone", "ti,am33xx"
 
 - AM3359 ICEv2 : Low cost Industrial Communication Engine EVM.
-  compatible = "ti,am3359-icev2", "ti,am33xx", "ti,omap3"
+  compatible = "ti,am3359-icev2", "ti,am33xx"
 
 - AM335X OrionLXm : Substation Automation Platform
   compatible = "novatech,am335x-lxm", "ti,am33xx"
index c82c5e5..9c7e703 100644 (file)
@@ -496,12 +496,12 @@ properties:
 
       - description: Theobroma Systems RK3368-uQ7 with Haikou baseboard
         items:
-          - const: tsd,rk3368-uq7-haikou
+          - const: tsd,rk3368-lion-haikou
           - const: rockchip,rk3368
 
       - description: Theobroma Systems RK3399-Q7 with Haikou baseboard
         items:
-          - const: tsd,rk3399-q7-haikou
+          - const: tsd,rk3399-puma-haikou
           - const: rockchip,rk3399
 
       - description: Tronsmart Orion R68 Meta
index 0c38e4b..1758051 100644 (file)
@@ -15,12 +15,16 @@ In 'cpus' nodes:
 
 In 'operating-points-v2' table:
 - compatible: Should be
-       - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx SoCs
+       - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx,
+         omap34xx, omap36xx and am3517 SoCs
 - syscon: A phandle pointing to a syscon node representing the control module
          register space of the SoC.
 
 Optional properties:
 --------------------
+- "vdd-supply", "vbb-supply": to define two regulators for dra7xx
+- "cpu0-supply", "vbb-supply": to define two regulators for omap36xx
+
 For each opp entry in 'operating-points-v2' table:
 - opp-supported-hw: Two bitfields indicating:
        1. Which revision of the SoC the OPP is supported by
index 3e36c1d..fb46b49 100644 (file)
@@ -10,14 +10,23 @@ The Exynos PPMU driver uses the devfreq-event class to provide event data
 to various devfreq devices. The devfreq devices would use the event data when
 derterming the current state of each IP.
 
-Required properties:
+Required properties for PPMU device:
 - compatible: Should be "samsung,exynos-ppmu" or "samsung,exynos-ppmu-v2.
 - reg: physical base address of each PPMU and length of memory mapped region.
 
-Optional properties:
+Optional properties for PPMU device:
 - clock-names : the name of clock used by the PPMU, "ppmu"
 - clocks : phandles for clock specified in "clock-names" property
 
+Required properties for 'events' child node of PPMU device:
+- event-name : the unique event name among PPMU device
+Optional properties for 'events' child node of PPMU device:
+- event-data-type : Define the type of data which shell be counted
+by the counter. You can check include/dt-bindings/pmu/exynos_ppmu.h for
+all possible type, i.e. count read requests, count write data in bytes,
+etc. This field is optional and when it is missing, the driver code
+will use default data type.
+
 Example1 : PPMUv1 nodes in exynos3250.dtsi are listed below.
 
                ppmu_dmc0: ppmu_dmc0@106a0000 {
@@ -145,3 +154,16 @@ Example3 : PPMUv2 nodes in exynos5433.dtsi are listed below.
                        reg = <0x104d0000 0x2000>;
                        status = "disabled";
                };
+
+Example4 : 'event-data-type' in exynos4412-ppmu-common.dtsi are listed below.
+
+       &ppmu_dmc0 {
+               status = "okay";
+               events {
+                       ppmu_dmc0_3: ppmu-event3-dmc0 {
+                       event-name = "ppmu-event3-dmc0";
+                       event-data-type = <(PPMU_RO_DATA_CNT |
+                                       PPMU_WO_DATA_CNT)>;
+                       };
+               };
+       };
index f8e9464..e71f752 100644 (file)
@@ -50,8 +50,6 @@ Required properties only for passive bus device:
 Optional properties only for parent bus device:
 - exynos,saturation-ratio: the percentage value which is used to calibrate
                        the performance count against total cycle count.
-- exynos,voltage-tolerance: the percentage value for bus voltage tolerance
-                       which is used to calculate the max voltage.
 
 Detailed correlation between sub-blocks and power line according to Exynos SoC:
 - In case of Exynos3250, there are two power line as following:
index 27f38ee..d3e423f 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/arm/allwinner,sun4i-a10-csi.yaml#
+$id: http://devicetree.org/schemas/media/allwinner,sun4i-a10-csi.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
@@ -27,14 +27,12 @@ properties:
   clocks:
     items:
       - description: The CSI interface clock
-      - description: The CSI module clock
       - description: The CSI ISP clock
       - description: The CSI DRAM clock
 
   clock-names:
     items:
       - const: bus
-      - const: mod
       - const: isp
       - const: ram
 
@@ -89,9 +87,8 @@ examples:
         compatible = "allwinner,sun7i-a20-csi0";
         reg = <0x01c09000 0x1000>;
         interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-        clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
-                 <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
-        clock-names = "bus", "mod", "isp", "ram";
+        clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+        clock-names = "bus", "isp", "ram";
         resets = <&ccu RST_CSI0>;
 
         port {
index f83d888..064b7df 100644 (file)
@@ -33,13 +33,13 @@ patternProperties:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
             - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
-              ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1,
-              GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2,
-              GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12,
-              I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7,
-              I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC,
-              LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
+              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC,
+              ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0,
+              GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+              GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11,
+              I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6,
+              I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+              LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
               MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2,
               NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
               NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
@@ -48,47 +48,45 @@ patternProperties:
               PWM8, PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
               RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12,
               SALT13, SALT14, SALT15, SALT16, SALT2, SALT3, SALT4, SALT5,
-              SALT6, SALT7, SALT8, SALT9, SD1, SD2, SD3, SD3DAT4, SD3DAT5,
-              SD3DAT6, SD3DAT7, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO,
-              SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1,
-              SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
-              TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5,
-              TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1,
-              TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13, UART6, UART7,
-              UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
-              WDTRST4, ]
+              SALT6, SALT7, SALT8, SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL,
+              SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+              SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+              TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+              TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+              THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
+              UART6, UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+              WDTRST3, WDTRST4, ]
         groups:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
             - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
-              ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, GPIT0,
-              GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
-              GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1,
-              I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3,
-              I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6,
-              JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
-              MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3,
-              MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
-              NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1,
-              NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE,
-              PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0, PWM12G1,
-              PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2, PWM3,
-              PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
-              QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
-              RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1,
-              SALT11G0, SALT11G1, SALT12G0, SALT12G1, SALT13G0, SALT13G1,
-              SALT14G0, SALT14G1, SALT15G0, SALT15G1, SALT16G0, SALT16G1,
-              SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9G0,
-              SALT9G1, SD1, SD2, SD3, SD3DAT4, SD3DAT5, SD3DAT6, SD3DAT7,
-              SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD,
-              SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
-              SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13,
-              TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8,
-              TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4,
-              UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
-              UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
-              WDTRST4, ]
+              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1,
+              EMMCG4, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID,
+              FWQSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5,
+              GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6,
+              GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14,
+              I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9,
+              I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD,
+              LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3, MACLINK4,
+              MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1,
+              NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+              NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+              OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1,
+              PWM12G0, PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0,
+              PWM15G1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1,
+              PWM9G0, PWM9G1, QSPI1, QSPI2, RGMII1, RGMII2, RGMII3, RGMII4,
+              RMII1, RMII2, RMII3, RMII4, RXD1, RXD2, RXD3, RXD4, SALT1,
+              SALT10G0, SALT10G1, SALT11G0, SALT11G1, SALT12G0, SALT12G1,
+              SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0, SALT15G1,
+              SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7,
+              SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+              SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+              SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+              TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+              TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+              THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
+              UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, VB,
+              VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
 
 required:
   - compatible
index a78150c..f324169 100644 (file)
@@ -30,8 +30,8 @@ if:
 properties:
   compatible:
     enum:
-      - const: regulator-fixed
-      - const: regulator-fixed-clock
+      - regulator-fixed
+      - regulator-fixed-clock
 
   regulator-name: true
 
index b261a30..04819ad 100644 (file)
@@ -24,15 +24,17 @@ description: |
 
 properties:
   compatible:
-    items:
-      - enum:
-          - sifive,rocket0
-          - sifive,e5
-          - sifive,e51
-          - sifive,u54-mc
-          - sifive,u54
-          - sifive,u5
-      - const: riscv
+    oneOf:
+      - items:
+          - enum:
+              - sifive,rocket0
+              - sifive,e5
+              - sifive,e51
+              - sifive,u54-mc
+              - sifive,u54
+              - sifive,u5
+          - const: riscv
+      - const: riscv    # Simulator only
     description:
       Identifies that the hart uses the RISC-V instruction set
       and identifies the type of the hart.
@@ -66,12 +68,8 @@ properties:
       insensitive, letters in the riscv,isa string must be all
       lowercase to simplify parsing.
 
-  timebase-frequency:
-    type: integer
-    minimum: 1
-    description:
-      Specifies the clock frequency of the system timer in Hz.
-      This value is common to all harts on a single system image.
+  # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+  timebase-frequency: false
 
   interrupt-controller:
     type: object
@@ -93,7 +91,6 @@ properties:
 
 required:
   - riscv,isa
-  - timebase-frequency
   - interrupt-controller
 
 examples:
index 2b9f488..caf023c 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==============================================================
-Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
-==============================================================
+=============================================================
+Linux Base Driver for the Intel(R) PRO/100 Family of Adapters
+=============================================================
 
 June 1, 2018
 
@@ -21,7 +21,7 @@ Contents
 In This Release
 ===============
 
-This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of
+This file describes the Linux Base Driver for the Intel(R) PRO/100 Family of
 Adapters. This driver includes support for Itanium(R)2-based systems.
 
 For questions related to hardware requirements, refer to the documentation
@@ -138,9 +138,9 @@ version 1.6 or later is required for this functionality.
 The latest release of ethtool can be found from
 https://www.kernel.org/pub/software/network/ethtool/
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is provided through the ethtool* utility.  For instructions on
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is provided through the ethtool utility.  For instructions on
 enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
 enabled on the system during the next shut down or reboot.  For this
 driver version, in order to enable WoL, the e100 driver must be loaded
index 956560b..4aaae0f 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999 - 2013 Intel Corporation.
@@ -438,10 +438,10 @@ ethtool
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool/
 
-Enabling Wake on LAN* (WoL)
----------------------------
+Enabling Wake on LAN (WoL)
+--------------------------
 
-  WoL is configured through the ethtool* utility.
+  WoL is configured through the ethtool utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
   For this driver version, in order to enable WoL, the e1000 driver must be
index 01999f0..f49cd37 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-======================================================
-Linux* Driver for Intel(R) Ethernet Network Connection
-======================================================
+=====================================================
+Linux Driver for Intel(R) Ethernet Network Connection
+=====================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 2008-2018 Intel Corporation.
@@ -338,7 +338,7 @@ and higher cannot be forced. Use the autonegotiation advertising setting to
 manually set devices for 1 Gbps and higher.
 
 Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
 
 Caution: Only experienced network administrators should force speed and duplex
 or change autonegotiation advertising manually. The settings at the switch must
@@ -351,9 +351,9 @@ will not attempt to auto-negotiate with its link partner since those adapters
 operate only in full duplex and only at their native speed.
 
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
 
 WoL will be enabled on the system during the next shut down or reboot. For
 this driver version, in order to enable WoL, the e1000e driver must be loaded
index ac3269e..4d279e6 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==============================================================
-Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
-==============================================================
+=============================================================
+Linux Base Driver for Intel(R) Ethernet Multi-host Controller
+=============================================================
 
 August 20, 2018
 Copyright(c) 2015-2018 Intel Corporation.
@@ -120,8 +120,8 @@ rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r
 Known Issues/Troubleshooting
 ============================
 
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS under Linux KVM
----------------------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS under Linux KVM
+-------------------------------------------------------------------------------------
 KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
 includes traditional PCIe devices, as well as SR-IOV-capable devices based on
 the Intel Ethernet Controller XL710.
index 848fd38..8a9b185 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==================================================================
-Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
-==================================================================
+=================================================================
+Linux Base Driver for the Intel(R) Ethernet Controller 700 Series
+=================================================================
 
 Intel 40 Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -384,7 +384,7 @@ NOTE: You cannot set the speed for devices based on the Intel(R) Ethernet
 Network Adapter XXV710 based devices.
 
 Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
 
 Caution: Only experienced network administrators should force speed and duplex
 or change autonegotiation advertising manually. The settings at the switch must
index cfc0884..84ac7e7 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==================================================================
-Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
-==================================================================
+=================================================================
+Linux Base Driver for Intel(R) Ethernet Adaptive Virtual Function
+=================================================================
 
 Intel Ethernet Adaptive Virtual Function Linux driver.
 Copyright(c) 2013-2018 Intel Corporation.
@@ -19,7 +19,7 @@ Contents
 Overview
 ========
 
-This file describes the iavf Linux* Base Driver. This driver was formerly
+This file describes the iavf Linux Base Driver. This driver was formerly
 called i40evf.
 
 The iavf driver supports the below mentioned virtual function devices and
index c220aa2..ee43ea5 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===================================================================
-Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
-===================================================================
+==================================================================
+Linux Base Driver for the Intel(R) Ethernet Connection E800 Series
+==================================================================
 
 Intel ice Linux driver.
 Copyright(c) 2018 Intel Corporation.
index fc8cfaa..87e560f 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -129,9 +129,9 @@ version is required for this functionality. Download it at:
 https://www.kernel.org/pub/software/network/ethtool/
 
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
 
 WoL will be enabled on the system during the next shut down or reboot. For
 this driver version, in order to enable WoL, the igb driver must be loaded
index 9cddabe..557fc02 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-============================================================
-Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
-============================================================
+===========================================================
+Linux Base Virtual Function Driver for Intel(R) 1G Ethernet
+===========================================================
 
 Intel Gigabit Virtual Function Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
index c7d2548..f1d5233 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-=============================================================================
-Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
-=============================================================================
+===========================================================================
+Linux Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
+===========================================================================
 
 Intel 10 Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -519,8 +519,8 @@ The offload is also supported for ixgbe's VFs, but the VF must be set as
 Known Issues/Troubleshooting
 ============================
 
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS
+---------------------------------------------------------------------
 Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.
 This includes traditional PCIe devices, as well as SR-IOV-capable devices based
 on the Intel Ethernet Controller XL710.
index 5d49773..76bbde7 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-=============================================================
-Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
-=============================================================
+============================================================
+Linux Base Virtual Function Driver for Intel(R) 10G Ethernet
+============================================================
 
 Intel 10 Gigabit Virtual Function Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
index 1393589..c17d680 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==========================================================
-Linux* Driver for the Pensando(R) Ethernet adapter family
-==========================================================
+========================================================
+Linux Driver for the Pensando(R) Ethernet adapter family
+========================================================
 
 Pensando Linux Ethernet driver.
 Copyright(c) 2019 Pensando Systems, Inc
index 49e95f4..8d4ad1d 100644 (file)
@@ -207,8 +207,8 @@ TCP variables:
 
 somaxconn - INTEGER
        Limit of socket listen() backlog, known in userspace as SOMAXCONN.
-       Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
-       for TCP sockets.
+       Defaults to 4096. (Was 128 before linux-5.4)
+       See also tcp_max_syn_backlog for additional tuning for TCP sockets.
 
 tcp_abort_on_overflow - BOOLEAN
        If listening service is too slow to accept new connections,
@@ -408,11 +408,14 @@ tcp_max_orphans - INTEGER
        up to ~64K of unswappable memory.
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which have not
-       received an acknowledgment from connecting client.
+       Maximal number of remembered connection requests (SYN_RECV),
+       which have not received an acknowledgment from connecting client.
+       This is a per-listener limit.
        The minimal value is 128 for low memory machines, and it will
        increase in proportion to the memory of machine.
        If server suffers from overload, try increasing this number.
+       Remember to also check /proc/sys/net/core/somaxconn
+       A SYN_RECV request socket consumes about 304 bytes of memory.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
index 0dd3f74..f914e81 100644 (file)
@@ -436,6 +436,10 @@ by the driver:
    encryption.
  * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
    but did not arrive in the expected order.
+ * ``tx_tls_skip_no_sync_data`` - number of TX packets which were part of
+   a TLS stream and arrived out-of-order, but skipped the HW offload routine
+   and went to the regular transmit flow as they were retransmissions of the
+   connection handshake.
  * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
    a TLS stream dropped, because they arrived out of order and associated
    record could not be found.
index af64c4b..a8de2fb 100644 (file)
@@ -27,6 +27,7 @@ x86-specific Documentation
    mds
    microcode
    resctrl_ui
+   tsx_async_abort
    usb-legacy-support
    i386/index
    x86_64/index
diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst
new file mode 100644 (file)
index 0000000..583ddc1
--- /dev/null
@@ -0,0 +1,117 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TSX Async Abort (TAA) mitigation
+================================
+
+.. _tsx_async_abort:
+
+Overview
+--------
+
+TSX Async Abort (TAA) is a side channel attack on internal buffers in some
+Intel processors similar to Microachitectural Data Sampling (MDS).  In this
+case certain loads may speculatively pass invalid data to dependent operations
+when an asynchronous abort condition is pending in a Transactional
+Synchronization Extensions (TSX) transaction.  This includes loads with no
+fault or assist condition. Such loads may speculatively expose stale data from
+the same uarch data structures as in MDS, with same scope of exposure i.e.
+same-thread and cross-thread. This issue affects all current processors that
+support TSX.
+
+Mitigation strategy
+-------------------
+
+a) TSX disable - one of the mitigations is to disable TSX. A new MSR
+IA32_TSX_CTRL will be available in future and current processors after
+microcode update which can be used to disable TSX. In addition, it
+controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID.
+
+b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this
+vulnerability. More details on this approach can be found in
+:ref:`Documentation/admin-guide/hw-vuln/mds.rst <mds>`.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ =============    ============================================================
+ off              Mitigation is disabled. Either the CPU is not affected or
+                  tsx_async_abort=off is supplied on the kernel command line.
+
+ tsx disabled     Mitigation is enabled. TSX feature is disabled by default at
+                  bootup on processors that support TSX control.
+
+ verw             Mitigation is enabled. CPU is affected and MD_CLEAR is
+                  advertised in CPUID.
+
+ ucode needed     Mitigation is enabled. CPU is affected and MD_CLEAR is not
+                  advertised in CPUID. That is mainly for virtualization
+                  scenarios where the host has the updated microcode but the
+                  hypervisor does not expose MD_CLEAR in CPUID. It's a best
+                  effort approach without guarantee.
+ =============    ============================================================
+
+If the CPU is affected and the "tsx_async_abort" kernel command line parameter is
+not provided then the kernel selects an appropriate mitigation depending on the
+status of RTM and MD_CLEAR CPUID bits.
+
+Below tables indicate the impact of tsx=on|off|auto cmdline options on state of
+TAA mitigation, VERW behavior and TSX feature for various combinations of
+MSR_IA32_ARCH_CAPABILITIES bits.
+
+1. "tsx=off"
+
+=========  =========  ============  ============  ==============  ===================  ======================
+MSR_IA32_ARCH_CAPABILITIES bits     Result with cmdline tsx=off
+----------------------------------  -------------------------------------------------------------------------
+TAA_NO     MDS_NO     TSX_CTRL_MSR  TSX state     VERW can clear  TAA mitigation       TAA mitigation
+                                    after bootup  CPU buffers     tsx_async_abort=off  tsx_async_abort=full
+=========  =========  ============  ============  ==============  ===================  ======================
+    0          0           0         HW default         Yes           Same as MDS           Same as MDS
+    0          0           1        Invalid case   Invalid case       Invalid case          Invalid case
+    0          1           0         HW default         No         Need ucode update     Need ucode update
+    0          1           1          Disabled          Yes           TSX disabled          TSX disabled
+    1          X           1          Disabled           X             None needed           None needed
+=========  =========  ============  ============  ==============  ===================  ======================
+
+2. "tsx=on"
+
+=========  =========  ============  ============  ==============  ===================  ======================
+MSR_IA32_ARCH_CAPABILITIES bits     Result with cmdline tsx=on
+----------------------------------  -------------------------------------------------------------------------
+TAA_NO     MDS_NO     TSX_CTRL_MSR  TSX state     VERW can clear  TAA mitigation       TAA mitigation
+                                    after bootup  CPU buffers     tsx_async_abort=off  tsx_async_abort=full
+=========  =========  ============  ============  ==============  ===================  ======================
+    0          0           0         HW default        Yes            Same as MDS          Same as MDS
+    0          0           1        Invalid case   Invalid case       Invalid case         Invalid case
+    0          1           0         HW default        No          Need ucode update     Need ucode update
+    0          1           1          Enabled          Yes               None              Same as MDS
+    1          X           1          Enabled          X              None needed          None needed
+=========  =========  ============  ============  ==============  ===================  ======================
+
+3. "tsx=auto"
+
+=========  =========  ============  ============  ==============  ===================  ======================
+MSR_IA32_ARCH_CAPABILITIES bits     Result with cmdline tsx=auto
+----------------------------------  -------------------------------------------------------------------------
+TAA_NO     MDS_NO     TSX_CTRL_MSR  TSX state     VERW can clear  TAA mitigation       TAA mitigation
+                                    after bootup  CPU buffers     tsx_async_abort=off  tsx_async_abort=full
+=========  =========  ============  ============  ==============  ===================  ======================
+    0          0           0         HW default    Yes                Same as MDS           Same as MDS
+    0          0           1        Invalid case  Invalid case        Invalid case          Invalid case
+    0          1           0         HW default    No              Need ucode update     Need ucode update
+    0          1           1          Disabled      Yes               TSX disabled          TSX disabled
+    1          X           1          Enabled       X                 None needed           None needed
+=========  =========  ============  ============  ==============  ===================  ======================
+
+In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that
+indicates whether MSR_IA32_TSX_CTRL is supported.
+
+There are two control bits in IA32_TSX_CTRL MSR:
+
+      Bit 0: When set it disables the Restricted Transactional Memory (RTM)
+             sub-feature of TSX (will force all transactions to abort on the
+             XBEGIN instruction).
+
+      Bit 1: When set it disables the enumeration of the RTM and HLE feature
+             (i.e. it will make CPUID(EAX=7).EBX{bit4} and
+             CPUID(EAX=7).EBX{bit11} read as 0).
index e51a68b..40f2eb2 100644 (file)
@@ -2323,11 +2323,13 @@ F:      drivers/edac/altera_edac.
 
 ARM/SPREADTRUM SoC SUPPORT
 M:     Orson Zhai <orsonzhai@gmail.com>
-M:     Baolin Wang <baolin.wang@linaro.org>
+M:     Baolin Wang <baolin.wang7@gmail.com>
 M:     Chunyan Zhang <zhang.lyra@gmail.com>
 S:     Maintained
 F:     arch/arm64/boot/dts/sprd
 N:     sprd
+N:     sc27xx
+N:     sc2731
 
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
@@ -3051,6 +3053,7 @@ M:        Daniel Borkmann <daniel@iogearbox.net>
 R:     Martin KaFai Lau <kafai@fb.com>
 R:     Song Liu <songliubraving@fb.com>
 R:     Yonghong Song <yhs@fb.com>
+R:     Andrii Nakryiko <andriin@fb.com>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@@ -3096,7 +3099,7 @@ S:        Supported
 F:     arch/arm64/net/
 
 BPF JIT for MIPS (32-BIT AND 64-BIT)
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
@@ -3183,7 +3186,7 @@ N:        bcm216*
 N:     kona
 F:     arch/arm/mach-bcm/
 
-BROADCOM BCM2835 ARM ARCHITECTURE
+BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 M:     Eric Anholt <eric@anholt.net>
 M:     Stefan Wahren <wahrenst@gmx.net>
 L:     bcm-kernel-feedback-list@broadcom.com
@@ -3191,6 +3194,7 @@ L:        linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/anholt/linux
 S:     Maintained
+N:     bcm2711
 N:     bcm2835
 F:     drivers/staging/vc04_services
 
@@ -3237,8 +3241,6 @@ S:        Maintained
 F:     drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
-M:     Brian Norris <computersforpeace@gmail.com>
-M:     Gregory Fong <gregory.0xf0@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3259,7 +3261,6 @@ S:        Maintained
 F:     drivers/cpufreq/bmips-cpufreq.c
 
 BROADCOM BMIPS MIPS ARCHITECTURE
-M:     Kevin Cernekee <cernekee@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-mips@vger.kernel.org
@@ -3531,7 +3532,7 @@ BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
 S:     Maintained
 F:     drivers/devfreq/exynos-bus.c
 F:     Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -3736,7 +3737,6 @@ F:        drivers/crypto/cavium/cpt/
 
 CAVIUM THUNDERX2 ARM64 SOC
 M:     Robert Richter <rrichter@cavium.com>
-M:     Jayachandran C <jnair@caviumnetworks.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm64/boot/dts/cavium/thunder2-99xx*
@@ -4269,14 +4269,13 @@ F:      include/linux/cpufreq.h
 F:     include/linux/sched/cpufreq.h
 F:     tools/testing/selftests/cpufreq/
 
-CPU FREQUENCY DRIVERS - ARM BIG LITTLE
+CPU FREQUENCY DRIVERS - VEXPRESS SPC ARM BIG LITTLE
 M:     Viresh Kumar <viresh.kumar@linaro.org>
 M:     Sudeep Holla <sudeep.holla@arm.com>
 L:     linux-pm@vger.kernel.org
 W:     http://www.arm.com/products/processors/technologies/biglittleprocessing.php
 S:     Maintained
-F:     drivers/cpufreq/arm_big_little.h
-F:     drivers/cpufreq/arm_big_little.c
+F:     drivers/cpufreq/vexpress-spc-cpufreq.c
 
 CPU POWER MONITORING SUBSYSTEM
 M:     Thomas Renninger <trenn@suse.com>
@@ -4761,9 +4760,9 @@ F:        include/linux/devcoredump.h
 DEVICE FREQUENCY (DEVFREQ)
 M:     MyungJoo Ham <myungjoo.ham@samsung.com>
 M:     Kyungmin Park <kyungmin.park@samsung.com>
-R:     Chanwoo Choi <cw00.choi@samsung.com>
+M:     Chanwoo Choi <cw00.choi@samsung.com>
 L:     linux-pm@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
 S:     Maintained
 F:     drivers/devfreq/
 F:     include/linux/devfreq.h
@@ -4773,10 +4772,11 @@ F:      include/trace/events/devfreq.h
 DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
 M:     Chanwoo Choi <cw00.choi@samsung.com>
 L:     linux-pm@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
 S:     Supported
 F:     drivers/devfreq/event/
 F:     drivers/devfreq/devfreq-event.c
+F:     include/dt-bindings/pmu/exynos_ppmu.h
 F:     include/linux/devfreq-event.h
 F:     Documentation/devicetree/bindings/devfreq/event/
 
@@ -8001,7 +8001,7 @@ S:        Maintained
 F:     drivers/usb/atm/ueagle-atm.c
 
 IMGTEC ASCII LCD DRIVER
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 S:     Maintained
 F:     Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
 F:     drivers/auxdisplay/img-ascii-lcd.c
@@ -8298,11 +8298,14 @@ F:      drivers/hid/intel-ish-hid/
 
 INTEL IOMMU (VT-d)
 M:     David Woodhouse <dwmw2@infradead.org>
+M:     Lu Baolu <baolu.lu@linux.intel.com>
 L:     iommu@lists.linux-foundation.org
-T:     git git://git.infradead.org/iommu-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
 S:     Supported
-F:     drivers/iommu/intel-iommu.c
+F:     drivers/iommu/dmar.c
+F:     drivers/iommu/intel*.[ch]
 F:     include/linux/intel-iommu.h
+F:     include/linux/intel-svm.h
 
 INTEL IOP-ADMA DMA DRIVER
 R:     Dan Williams <dan.j.williams@intel.com>
@@ -10518,8 +10521,12 @@ F:     mm/memblock.c
 F:     Documentation/core-api/boot-time-mm.rst
 
 MEMORY MANAGEMENT
+M:     Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
 W:     http://www.linux-mm.org
+T:     quilt https://ozlabs.org/~akpm/mmotm/
+T:     quilt https://ozlabs.org/~akpm/mmots/
+T:     git git://github.com/hnaz/linux-mm.git
 S:     Maintained
 F:     include/linux/mm.h
 F:     include/linux/gfp.h
@@ -10828,7 +10835,7 @@ F:      drivers/usb/image/microtek.*
 
 MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
@@ -10842,7 +10849,7 @@ F:      arch/mips/
 F:     drivers/platform/mips/
 
 MIPS BOSTON DEVELOPMENT BOARD
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/clock/img,boston-clock.txt
@@ -10852,7 +10859,7 @@ F:      drivers/clk/imgtec/clk-boston.c
 F:     include/dt-bindings/clock/boston-clock.h
 
 MIPS GENERIC PLATFORM
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/power/mti,mips-cpc.txt
@@ -11407,7 +11414,6 @@ F:      include/trace/events/tcp.h
 NETWORKING [TLS]
 M:     Boris Pismenny <borisp@mellanox.com>
 M:     Aviad Yehezkel <aviadye@mellanox.com>
-M:     Dave Watson <davejwatson@fb.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <jakub.kicinski@netronome.com>
@@ -13905,7 +13911,7 @@ F:      drivers/mtd/nand/raw/r852.h
 
 RISC-V ARCHITECTURE
 M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Albert Ou <aou@eecs.berkeley.edu>
 L:     linux-riscv@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
@@ -14782,7 +14788,7 @@ F:      drivers/media/usb/siano/
 F:     drivers/media/mmc/siano/
 
 SIFIVE DRIVERS
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
 L:     linux-riscv@lists.infradead.org
 T:     git git://github.com/sifive/riscv-linux.git
@@ -14792,7 +14798,7 @@ N:      sifive
 
 SIFIVE FU540 SYSTEM-ON-CHIP
 M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 L:     linux-riscv@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
 S:     Supported
@@ -18034,6 +18040,7 @@ F:      Documentation/vm/zsmalloc.rst
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
 M:     Dan Streetman <ddstreet@ieee.org>
+M:     Vitaly Wool <vitaly.wool@konsulko.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 5475cdb..9cd2891 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
-NAME = Nesting Opossum
+EXTRAVERSION = -rc8
+NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -917,6 +917,9 @@ ifeq ($(CONFIG_RELR),y)
 LDFLAGS_vmlinux        += --pack-dyn-relocs=relr
 endif
 
+# make the checker run with the right architecture
+CHECKFLAGS += --arch=$(ARCH)
+
 # insure the checker run with the right endianness
 CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
 
index bfc7f5f..9acbeba 100644 (file)
                clock-frequency = <33333333>;
        };
 
+       reg_5v0: regulator-5v0 {
+               compatible = "regulator-fixed";
+
+               regulator-name = "5v0-supply";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
        cpu_intc: cpu-interrupt-controller {
                compatible = "snps,archs-intc";
                interrupt-controller;
                        clocks = <&input_clk>;
                        cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
                                   <&creg_gpio 1 GPIO_ACTIVE_LOW>;
+
+                       spi-flash@0 {
+                               compatible = "sst26wf016b", "jedec,spi-nor";
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               spi-max-frequency = <4000000>;
+                       };
+
+                       adc@1 {
+                               compatible = "ti,adc108s102";
+                               reg = <1>;
+                               vref-supply = <&reg_5v0>;
+                               spi-max-frequency = <1000000>;
+                       };
                };
 
                creg_gpio: gpio@14b0 {
index 9b9a744..0974226 100644 (file)
@@ -32,6 +32,8 @@ CONFIG_INET=y
 CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_SPI_NOR=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
@@ -55,6 +57,8 @@ CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_SNPS_CREG=y
 # CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
 # CONFIG_DRM_FBDEV_EMULATION is not set
 CONFIG_DRM_UDL=y
@@ -72,6 +76,8 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 CONFIG_DMADEVICES=y
 CONFIG_DW_AXI_DMAC=y
+CONFIG_IIO=y
+CONFIG_TI_ADC108S102=y
 CONFIG_EXT3_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index 861a8ae..661fd84 100644 (file)
@@ -614,8 +614,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
        /* loop thru all available h/w condition indexes */
        for (i = 0; i < cc_bcr.c; i++) {
                write_aux_reg(ARC_REG_CC_INDEX, i);
-               cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
-               cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
+               cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
+               cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
 
                arc_pmu_map_hw_event(i, cc_name.str);
                arc_pmu_add_raw_event_attr(i, cc_name.str);
index bf30020..76f819f 100644 (file)
                can = &hecc;
        };
 
+       cpus {
+               cpu: cpu@0 {
+                       /* Based on OMAP3630 variants OPP50 and OPP100 */
+                       operating-points-v2 = <&cpu0_opp_table>;
+
+                       clock-latency = <300000>; /* From legacy driver */
+               };
+       };
+
+       cpu0_opp_table: opp-table {
+               compatible = "operating-points-v2-ti-cpu";
+               syscon = <&scm_conf>;
+               /*
+                * AM3517 TRM only lists 600MHz @ 1.2V, but omap36xx
+                * appear to operate at 300MHz as well. Since AM3517 only
+                * lists one operating voltage, it will remain fixed at 1.2V
+                */
+               opp50-300000000 {
+                       opp-hz = /bits/ 64 <300000000>;
+                       opp-microvolt = <1200000>;
+                       opp-supported-hw = <0xffffffff 0xffffffff>;
+                       opp-suspend;
+               };
+
+               opp100-600000000 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <1200000>;
+                       opp-supported-hw = <0xffffffff 0xffffffff>;
+               };
+       };
+
        ocp@68000000 {
                am35x_otg_hs: am35x_otg_hs@5c040000 {
                        compatible = "ti,omap3-musb";
index e507e4a..e7d7124 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TeeJet Mt.Ventoux";
-       compatible = "teejet,mt_ventoux", "ti,omap3";
+       compatible = "teejet,mt_ventoux", "ti,am3517", "ti,omap3";
 
        memory@80000000 {
                device_type = "memory";
index 883fb85..1b4b2b0 100644 (file)
                reg = <0x70>;
                #address-cells = <1>;
                #size-cells = <0>;
+               i2c-mux-idle-disconnect;
 
                i2c@0 {
                        /* FMC A */
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <0>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@1 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <1>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@2 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <2>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@3 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <3>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@4 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <4>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@5 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <5>;
-                       i2c-mux-idle-disconnect;
 
                        ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; };
                        ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; };
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <6>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@7 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <7>;
-                       i2c-mux-idle-disconnect;
 
                        u41: pca9575@20 {
                                compatible = "nxp,pca9575";
index 09a088f..b75af21 100644 (file)
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
+       bus-width = <4>;
        mmc-pwrseq = <&wifi_pwrseq>;
        non-removable;
        status = "okay";
index 7c3cb7e..925cb37 100644 (file)
@@ -9,6 +9,14 @@
                reg = <0 0x40000000>;
        };
 
+       leds {
+               /*
+                * Since there is no upstream GPIO driver yet,
+                * remove the incomplete node.
+                */
+               /delete-node/ act;
+       };
+
        reg_3v3: fixed-regulator {
                compatible = "regulator-fixed";
                regulator-name = "3V3";
index 2a6ce87..9e027b9 100644 (file)
        pinctrl-0 = <&pinctrl_pwm3>;
 };
 
+&snvs_pwrkey {
+       status = "okay";
+};
+
 &ssi2 {
        status = "okay";
 };
index 7ceae35..547fb14 100644 (file)
        vin-supply = <&sw1c_reg>;
 };
 
+&snvs_poweroff {
+       status = "okay";
+};
+
 &iomuxc {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_hog>;
index f3404dd..cf62846 100644 (file)
                        accelerometer@1c {
                                compatible = "fsl,mma8451";
                                reg = <0x1c>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_mma8451_int>;
                                interrupt-parent = <&gpio6>;
                                interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
                        };
                        >;
                };
 
+               pinctrl_mma8451_int: mma8451intgrp {
+                       fsl,pins = <
+                               MX6QDL_PAD_EIM_BCLK__GPIO6_IO31         0xb0b1
+                       >;
+               };
+
                pinctrl_pwm3: pwm1grp {
                        fsl,pins = <
                                MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
index 710f850..e2e604d 100644 (file)
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302d0000 0x10000>;
                                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
                                         <&clks IMX7D_GPT1_ROOT_CLK>;
                                clock-names = "ipg", "per";
                        };
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302e0000 0x10000>;
                                interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
                                         <&clks IMX7D_GPT2_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302f0000 0x10000>;
                                interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
                                         <&clks IMX7D_GPT3_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x30300000 0x10000>;
                                interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
                                         <&clks IMX7D_GPT4_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
index f7a841a..2a0a98f 100644 (file)
@@ -9,5 +9,5 @@
 
 / {
        model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit";
-       compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3";
+       compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3";
 };
index 7675bc3..57bae2a 100644 (file)
@@ -9,5 +9,5 @@
 
 / {
        model = "LogicPD Zoom OMAP35xx Torpedo Development Kit";
-       compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3";
+       compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3430", "ti,omap3";
 };
index 3fdd0a7..506b118 100644 (file)
 &twl_gpio {
        ti,use-leds;
 };
+
+&twl_keypad {
+       status = "disabled";
+};
index 1aa99fc..125ed93 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TI OMAP3 BeagleBoard xM";
-       compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
+       compatible = "ti,omap3-beagle-xm", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        cpus {
                cpu@0 {
index e3df3c1..4ed3f93 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TI OMAP3 BeagleBoard";
-       compatible = "ti,omap3-beagle", "ti,omap3";
+       compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
 
        cpus {
                cpu@0 {
index 76e52c7..32dbaea 100644 (file)
@@ -9,7 +9,7 @@
 
 / {
        model = "CompuLab CM-T3530";
-       compatible = "compulab,omap3-cm-t3530", "ti,omap34xx", "ti,omap3";
+       compatible = "compulab,omap3-cm-t3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
 
        /* Regulator to trigger the reset signal of the Wifi module */
        mmc2_sdio_reset: regulator-mmc2-sdio-reset {
index 6e944df..683819b 100644 (file)
@@ -9,7 +9,7 @@
 
 / {
        model = "CompuLab CM-T3730";
-       compatible = "compulab,omap3-cm-t3730", "ti,omap36xx", "ti,omap3";
+       compatible = "compulab,omap3-cm-t3730", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        wl12xx_vmmc2: wl12xx_vmmc2 {
                compatible = "regulator-fixed";
index a80fc60..afed850 100644 (file)
@@ -11,7 +11,7 @@
 #include "omap3-devkit8000-lcd-common.dtsi"
 / {
        model = "TimLL OMAP3 Devkit8000 with 4.3'' LCD panel";
-       compatible = "timll,omap3-devkit8000", "ti,omap3";
+       compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
 
        lcd0: display {
                panel-timing {
index 0753776..07c51a1 100644 (file)
@@ -11,7 +11,7 @@
 #include "omap3-devkit8000-lcd-common.dtsi"
 / {
        model = "TimLL OMAP3 Devkit8000 with 7.0'' LCD panel";
-       compatible = "timll,omap3-devkit8000", "ti,omap3";
+       compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
 
        lcd0: display {
                panel-timing {
index faafc48..162d072 100644 (file)
@@ -7,7 +7,7 @@
 #include "omap3-devkit8000-common.dtsi"
 / {
        model = "TimLL OMAP3 Devkit8000";
-       compatible = "timll,omap3-devkit8000", "ti,omap3";
+       compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
 
        aliases {
                display1 = &dvi0;
index b6ef1a7..409a758 100644 (file)
@@ -11,7 +11,7 @@
 
 / {
        model = "OMAP3 GTA04";
-       compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
+       compatible = "ti,omap3-gta04", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        cpus {
                cpu@0 {
index badb9b3..c9ecbc4 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TI OMAP3 HEAD acoustics LCD-baseboard with TAO3530 SOM";
-       compatible = "headacoustics,omap3-ha-lcd", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+       compatible = "headacoustics,omap3-ha-lcd", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
 };
 
 &omap3_pmx_core {
index a536525..35c4e15 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TI OMAP3 HEAD acoustics baseboard with TAO3530 SOM";
-       compatible = "headacoustics,omap3-ha", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+       compatible = "headacoustics,omap3-ha", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
 };
 
 &omap3_pmx_core {
index 03dcd05..d134ce1 100644 (file)
@@ -10,7 +10,7 @@
 
 / {
        model = "IGEPv2 Rev. F (TI OMAP AM/DM37x)";
-       compatible = "isee,omap3-igep0020-rev-f", "ti,omap36xx", "ti,omap3";
+       compatible = "isee,omap3-igep0020-rev-f", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        /* Regulator to trigger the WL_EN signal of the Wifi module */
        lbep5clwmc_wlen: regulator-lbep5clwmc-wlen {
index 6d0519e..e341535 100644 (file)
@@ -10,7 +10,7 @@
 
 / {
        model = "IGEPv2 Rev. C (TI OMAP AM/DM37x)";
-       compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
+       compatible = "isee,omap3-igep0020", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        vmmcsdio_fixed: fixedregulator-mmcsdio {
                compatible = "regulator-fixed";
index 060acd1..9ca1d0f 100644 (file)
@@ -10,7 +10,7 @@
 
 / {
        model = "IGEP COM MODULE Rev. G (TI OMAP AM/DM37x)";
-       compatible = "isee,omap3-igep0030-rev-g", "ti,omap36xx", "ti,omap3";
+       compatible = "isee,omap3-igep0030-rev-g", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        /* Regulator to trigger the WL_EN signal of the Wifi module */
        lbep5clwmc_wlen: regulator-lbep5clwmc-wlen {
index 25170bd..32f3103 100644 (file)
@@ -10,7 +10,7 @@
 
 / {
        model = "IGEP COM MODULE Rev. E (TI OMAP AM/DM37x)";
-       compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
+       compatible = "isee,omap3-igep0030", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        vmmcsdio_fixed: fixedregulator-mmcsdio {
                compatible = "regulator-fixed";
index 9a5fde2..ec9ba04 100644 (file)
@@ -10,7 +10,7 @@
 
 / {
        model = "TI OMAP3430 LDP (Zoom1 Labrador)";
-       compatible = "ti,omap3-ldp", "ti,omap3";
+       compatible = "ti,omap3-ldp", "ti,omap3430", "ti,omap3";
 
        memory@80000000 {
                device_type = "memory";
index c22833d..73d4778 100644 (file)
@@ -7,7 +7,7 @@
 
 / {
        model = "INCOstartec LILLY-A83X module (DM3730)";
-       compatible = "incostartec,omap3-lilly-a83x", "ti,omap36xx", "ti,omap3";
+       compatible = "incostartec,omap3-lilly-a83x", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        chosen {
                        bootargs = "console=ttyO0,115200n8 vt.global_cursor_default=0 consoleblank=0";
index fec3354..ecb4ef7 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "INCOstartec LILLY-DBB056 (DM3730)";
-       compatible = "incostartec,omap3-lilly-dbb056", "incostartec,omap3-lilly-a83x", "ti,omap36xx", "ti,omap3";
+       compatible = "incostartec,omap3-lilly-dbb056", "incostartec,omap3-lilly-a83x", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &twl {
index 74c0ff2..2495a69 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        model = "Nokia N9";
-       compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
+       compatible = "nokia,omap3-n9", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &i2c2 {
index 6681d45..a075b63 100644 (file)
        cpus {
                cpu@0 {
                        cpu0-supply = <&vcc>;
-                       operating-points = <
-                               /* kHz    uV */
-                               300000  1012500
-                               600000  1200000
-                               800000  1325000
-                               1000000 1375000
-                       >;
                };
        };
 
index 9886bf8..31d47a1 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        model = "Nokia N950";
-       compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
+       compatible = "nokia,omap3-n950", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        keys {
                compatible = "gpio-keys";
index 1833857..7f04dfa 100644 (file)
@@ -14,5 +14,5 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Alto35";
-       compatible = "gumstix,omap3-overo-alto35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-alto35", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
index f204c8a..bc5a04e 100644 (file)
@@ -14,7 +14,7 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Chestnut43";
-       compatible = "gumstix,omap3-overo-chestnut43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-chestnut43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
index c633f7c..065c31c 100644 (file)
@@ -14,7 +14,7 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Gallop43";
-       compatible = "gumstix,omap3-overo-gallop43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-gallop43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
index fb88ebc..e38c1c5 100644 (file)
@@ -14,7 +14,7 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo35";
-       compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
index 76cca00..e6dc231 100644 (file)
@@ -14,7 +14,7 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo43";
-       compatible = "gumstix,omap3-overo-palo43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-palo43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
index cc081a9..587c08c 100644 (file)
@@ -14,7 +14,7 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Summit";
-       compatible = "gumstix,omap3-overo-summit", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-summit", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
index 1de41c0..f57de60 100644 (file)
@@ -14,6 +14,6 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
-       compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
index 9ed1311..281af6c 100644 (file)
@@ -14,5 +14,5 @@
 
 / {
        model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on TobiDuo";
-       compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+       compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
index 81b957f..ea50995 100644 (file)
@@ -16,7 +16,7 @@
 / {
        model = "Pandora Handheld Console 1GHz";
 
-       compatible = "openpandora,omap3-pandora-1ghz", "ti,omap36xx", "ti,omap3";
+       compatible = "openpandora,omap3-pandora-1ghz", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
index ae96002..24bf3fd 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "CompuLab SBC-T3530 with CM-T3530";
-       compatible = "compulab,omap3-sbc-t3530", "compulab,omap3-cm-t3530", "ti,omap34xx", "ti,omap3";
+       compatible = "compulab,omap3-sbc-t3530", "compulab,omap3-cm-t3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
 
        aliases {
                display0 = &dvi0;
index 7de6df1..eb3893b 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "CompuLab SBC-T3730 with CM-T3730";
-       compatible = "compulab,omap3-sbc-t3730", "compulab,omap3-cm-t3730", "ti,omap36xx", "ti,omap3";
+       compatible = "compulab,omap3-sbc-t3730", "compulab,omap3-cm-t3730", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        aliases {
                display0 = &dvi0;
index 40a8733..b6879cd 100644 (file)
@@ -9,7 +9,7 @@
 
 / {
        model = "LG Optimus Black";
-       compatible = "lg,omap3-sniper", "ti,omap36xx", "ti,omap3";
+       compatible = "lg,omap3-sniper", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        cpus {
                cpu@0 {
index 6276e70..64221e3 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TI OMAP3 Thunder baseboard with TAO3530 SOM";
-       compatible = "technexion,omap3-thunder", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+       compatible = "technexion,omap3-thunder", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
 };
 
 &omap3_pmx_core {
index db3a2fe..d240e39 100644 (file)
@@ -9,7 +9,7 @@
 
 / {
        model = "TI Zoom3";
-       compatible = "ti,omap3-zoom3", "ti,omap36xx", "ti,omap3";
+       compatible = "ti,omap3-zoom3", "ti,omap3630", "ti,omap36xx", "ti,omap3";
 
        cpus {
                cpu@0 {
index 0abd611..7bfde8a 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        model = "TI OMAP3430 SDP";
-       compatible = "ti,omap3430-sdp", "ti,omap3";
+       compatible = "ti,omap3430-sdp", "ti,omap3430", "ti,omap3";
 
        memory@80000000 {
                device_type = "memory";
index 7b09cbe..c4dd980 100644 (file)
 / {
        cpus {
                cpu: cpu@0 {
-                       /* OMAP343x/OMAP35xx variants OPP1-5 */
-                       operating-points = <
-                               /* kHz    uV */
-                               125000   975000
-                               250000  1075000
-                               500000  1200000
-                               550000  1270000
-                               600000  1350000
-                       >;
+                       /* OMAP343x/OMAP35xx variants OPP1-6 */
+                       operating-points-v2 = <&cpu0_opp_table>;
+
                        clock-latency = <300000>; /* From legacy driver */
                };
        };
 
+       /* see Documentation/devicetree/bindings/opp/opp.txt */
+       cpu0_opp_table: opp-table {
+               compatible = "operating-points-v2-ti-cpu";
+               syscon = <&scm_conf>;
+
+               opp1-125000000 {
+                       opp-hz = /bits/ 64 <125000000>;
+                       /*
+                        * we currently only select the max voltage from table
+                        * Table 3-3 of the omap3530 Data sheet (SPRS507F).
+                        * Format is: <target min max>
+                        */
+                       opp-microvolt = <975000 975000 975000>;
+                       /*
+                        * first value is silicon revision bit mask
+                        * second one 720MHz Device Identification bit mask
+                        */
+                       opp-supported-hw = <0xffffffff 3>;
+               };
+
+               opp2-250000000 {
+                       opp-hz = /bits/ 64 <250000000>;
+                       opp-microvolt = <1075000 1075000 1075000>;
+                       opp-supported-hw = <0xffffffff 3>;
+                       opp-suspend;
+               };
+
+               opp3-500000000 {
+                       opp-hz = /bits/ 64 <500000000>;
+                       opp-microvolt = <1200000 1200000 1200000>;
+                       opp-supported-hw = <0xffffffff 3>;
+               };
+
+               opp4-550000000 {
+                       opp-hz = /bits/ 64 <550000000>;
+                       opp-microvolt = <1275000 1275000 1275000>;
+                       opp-supported-hw = <0xffffffff 3>;
+               };
+
+               opp5-600000000 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <1350000 1350000 1350000>;
+                       opp-supported-hw = <0xffffffff 3>;
+               };
+
+               opp6-720000000 {
+                       opp-hz = /bits/ 64 <720000000>;
+                       opp-microvolt = <1350000 1350000 1350000>;
+                       /* only high-speed grade omap3530 devices */
+                       opp-supported-hw = <0xffffffff 2>;
+                       turbo-mode;
+               };
+       };
+
        ocp@68000000 {
                omap3_pmx_core2: pinmux@480025d8 {
                        compatible = "ti,omap3-padconf", "pinctrl-single";
index 1e552f0..c618cb2 100644 (file)
        };
 
        cpus {
-               /* OMAP3630/OMAP37xx 'standard device' variants OPP50 to OPP130 */
+               /* OMAP3630/OMAP37xx variants OPP50 to OPP130 and OPP1G */
                cpu: cpu@0 {
-                       operating-points = <
-                               /* kHz    uV */
-                               300000  1012500
-                               600000  1200000
-                               800000  1325000
-                       >;
-                       clock-latency = <300000>; /* From legacy driver */
+                       operating-points-v2 = <&cpu0_opp_table>;
+
+                       vbb-supply = <&abb_mpu_iva>;
+                       clock-latency = <300000>; /* From omap-cpufreq driver */
+               };
+       };
+
+       /* see Documentation/devicetree/bindings/opp/opp.txt */
+       cpu0_opp_table: opp-table {
+               compatible = "operating-points-v2-ti-cpu";
+               syscon = <&scm_conf>;
+
+               opp50-300000000 {
+                       opp-hz = /bits/ 64 <300000000>;
+                       /*
+                        * we currently only select the max voltage from table
+                        * Table 4-19 of the DM3730 Data sheet (SPRS685B)
+                        * Format is:   cpu0-supply:    <target min max>
+                        *              vbb-supply:     <target min max>
+                        */
+                       opp-microvolt = <1012500 1012500 1012500>,
+                                        <1012500 1012500 1012500>;
+                       /*
+                        * first value is silicon revision bit mask
+                        * second one is "speed binned" bit mask
+                        */
+                       opp-supported-hw = <0xffffffff 3>;
+                       opp-suspend;
+               };
+
+               opp100-600000000 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <1200000 1200000 1200000>,
+                                        <1200000 1200000 1200000>;
+                       opp-supported-hw = <0xffffffff 3>;
+               };
+
+               opp130-800000000 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = <1325000 1325000 1325000>,
+                                        <1325000 1325000 1325000>;
+                       opp-supported-hw = <0xffffffff 3>;
                };
+
+               opp1g-1000000000 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <1375000 1375000 1375000>,
+                                        <1375000 1375000 1375000>;
+                       /* only on am/dm37x with speed-binned bit set */
+                       opp-supported-hw = <0xffffffff 2>;
+                       turbo-mode;
+               };
+       };
+
+       opp_supply_mpu_iva: opp_supply {
+               compatible = "ti,omap-opp-supply";
+               ti,absolute-max-voltage-uv = <1375000>;
        };
 
        ocp@68000000 {
index 4454449..a40fe8d 100644 (file)
                compatible = "ti,wl1285", "ti,wl1283";
                reg = <2>;
                /* gpio_100 with gpmc_wait2 pad as wakeirq */
-               interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>,
+               interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>,
                                      <&omap4_pmx_core 0x4e>;
                interrupt-names = "irq", "wakeup";
                ref-clock-frequency = <26000000>;
index 14be2ec..55ea8b6 100644 (file)
                compatible = "ti,wl1271";
                reg = <2>;
                /* gpio_53 with gpmc_ncs3 pad as wakeup */
-               interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>,
+               interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_HIGH>,
                                      <&omap4_pmx_core 0x3a>;
                interrupt-names = "irq", "wakeup";
                ref-clock-frequency = <38400000>;
index 3c27496..91480ac 100644 (file)
                compatible = "ti,wl1281";
                reg = <2>;
                interrupt-parent = <&gpio1>;
-               interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */
+               interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
                ref-clock-frequency = <26000000>;
                tcxo-clock-frequency = <26000000>;
        };
index 6dbbc9b..d003221 100644 (file)
@@ -69,7 +69,7 @@
                compatible = "ti,wl1271";
                reg = <2>;
                interrupt-parent = <&gpio2>;
-               interrupts = <9 IRQ_TYPE_EDGE_RISING>; /* gpio 41 */
+               interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; /* gpio 41 */
                ref-clock-frequency = <38400000>;
        };
 };
index 7fff555..68ac046 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&wlcore_irq_pin>;
                interrupt-parent = <&gpio1>;
-               interrupts = <14 IRQ_TYPE_EDGE_RISING>; /* gpio 14 */
+               interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;  /* gpio 14 */
                ref-clock-frequency = <26000000>;
        };
 };
index fac2e57..4791834 100644 (file)
                };
        };
 
-       gpu_cm: clock-controller@1500 {
+       gpu_cm: gpu_cm@1500 {
                compatible = "ti,omap4-cm";
                reg = <0x1500 0x100>;
                #address-cells = <1>;
index e4a0d51..0a3a7d6 100644 (file)
                                                 <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
                                        bias-disable;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                                pins2 {
                                        pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
                                        bias-pull-up;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                        };
 
                                                 <STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
                                        bias-disable;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                                pins2 {
                                        pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
                                        bias-pull-up;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                        };
 
index 89d29b5..91fc0a3 100644 (file)
 
        ov5640: camera@3c {
                compatible = "ovti,ov5640";
-               pinctrl-names = "default";
-               pinctrl-0 = <&ov5640_pins>;
                reg = <0x3c>;
                clocks = <&clk_ext_camera>;
                clock-names = "xclk";
                DOVDD-supply = <&v2v8>;
-               powerdown-gpios = <&stmfx_pinctrl 18 GPIO_ACTIVE_HIGH>;
-               reset-gpios = <&stmfx_pinctrl 19 GPIO_ACTIVE_LOW>;
+               powerdown-gpios = <&stmfx_pinctrl 18 (GPIO_ACTIVE_HIGH | GPIO_PUSH_PULL)>;
+               reset-gpios = <&stmfx_pinctrl 19 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
                rotation = <180>;
                status = "okay";
 
 
                        joystick_pins: joystick {
                                pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
-                               drive-push-pull;
                                bias-pull-down;
                        };
-
-                       ov5640_pins: camera {
-                               pins = "agpio2", "agpio3"; /* stmfx pins 18 & 19 */
-                               drive-push-pull;
-                               output-low;
-                       };
                };
        };
 };
index 9b11654..f98e037 100644 (file)
                        interrupt-names = "int0", "int1";
                        clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
                        clock-names = "hclk", "cclk";
-                       bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
+                       bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
                        status = "disabled";
                };
 
                        interrupt-names = "int0", "int1";
                        clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
                        clock-names = "hclk", "cclk";
-                       bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
+                       bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
                        status = "disabled";
                };
 
index 874231b..8aebefd 100644 (file)
                        compatible = "allwinner,sun7i-a20-csi0";
                        reg = <0x01c09000 0x1000>;
                        interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
-                                <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
-                       clock-names = "bus", "mod", "isp", "ram";
+                       clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+                       clock-names = "bus", "isp", "ram";
                        resets = <&ccu RST_CSI0>;
                        status = "disabled";
                };
index 568b90e..3bec3e0 100644 (file)
        vqmmc-supply = <&reg_dldo1>;
        non-removable;
        wakeup-source;
+       keep-power-in-suspend;
        status = "okay";
 
        brcmf: wifi@1 {
index dc8a5f3..c8ebb23 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
                reg = <0x70>;
+               i2c-mux-idle-disconnect;
 
                sff0_i2c: i2c@1 {
                        #address-cells = <1>;
                reg = <0x71>;
                #address-cells = <1>;
                #size-cells = <0>;
+               i2c-mux-idle-disconnect;
 
                sff5_i2c: i2c@1 {
                        #address-cells = <1>;
index 01e3c0f..231f897 100644 (file)
@@ -167,6 +167,7 @@ CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_DA8XX=y
 CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_GPIO=m
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
index 9bfffbe..0f7381e 100644 (file)
@@ -276,6 +276,7 @@ CONFIG_VIDEO_OV5640=m
 CONFIG_VIDEO_OV5645=m
 CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM=y
 CONFIG_DRM_PANEL_LVDS=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
index d3f5097..40d7f1a 100644 (file)
@@ -356,15 +356,15 @@ CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
 CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
 CONFIG_DRM_OMAP_PANEL_DPI=m
 CONFIG_DRM_OMAP_PANEL_DSI_CM=m
-CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM=m
-CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
-CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
 CONFIG_DRM_TILCDC=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_PANEL_LG_LB035Q02=m
+CONFIG_DRM_PANEL_NEC_NL8048HL11=m
+CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
+CONFIG_DRM_PANEL_SONY_ACX565AKM=m
+CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
+CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
index 567dbed..f1d0a78 100644 (file)
@@ -82,7 +82,7 @@
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
 {
        unsigned int domain;
 
@@ -94,7 +94,7 @@ static inline unsigned int get_domain(void)
        return domain;
 }
 
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
 {
        asm volatile(
        "mcr    p15, 0, %0, c3, c0      @ set domain"
@@ -102,12 +102,12 @@ static inline void set_domain(unsigned val)
        isb();
 }
 #else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
 {
        return 0;
 }
 
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
 {
 }
 #endif
index 303248e..98c6b91 100644 (file)
@@ -22,7 +22,7 @@
  * perform such accesses (eg, via list poison values) which could then
  * be exploited for priviledge escalation.
  */
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
 {
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
        unsigned int old_domain = get_domain();
@@ -37,7 +37,7 @@ static inline unsigned int uaccess_save_and_enable(void)
 #endif
 }
 
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
 {
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
        /* Restore the user access mask */
index a7810be..4a39828 100644 (file)
@@ -68,7 +68,7 @@ ENDPROC(__vet_atags)
  * The following fragment of code is executed with the MMU on in MMU mode,
  * and uses absolute addresses; this is not position independent.
  *
- *  r0  = cp#15 control register
+ *  r0  = cp#15 control register (exc_ret for M-class)
  *  r1  = machine ID
  *  r2  = atags/dtb pointer
  *  r9  = processor ID
@@ -137,7 +137,8 @@ __mmap_switched_data:
 #ifdef CONFIG_CPU_CP15
        .long   cr_alignment                    @ r3
 #else
-       .long   0                               @ r3
+M_CLASS(.long  exc_ret)                        @ r3
+AR_CLASS(.long 0)                              @ r3
 #endif
        .size   __mmap_switched_data, . - __mmap_switched_data
 
index afa350f..0fc814b 100644 (file)
@@ -201,6 +201,8 @@ M_CLASS(streq       r3, [r12, #PMSAv8_MAIR1])
        bic     r0, r0, #V7M_SCB_CCR_IC
 #endif
        str     r0, [r12, V7M_SCB_CCR]
+       /* Pass exc_ret to __mmap_switched */
+       mov     r0, r10
 #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
        ret     lr
 ENDPROC(__after_proc_init)
index 8062412..9fc5c73 100644 (file)
@@ -462,8 +462,8 @@ static s8 dm365_queue_priority_mapping[][2] = {
 };
 
 static const struct dma_slave_map dm365_edma_map[] = {
-       { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
-       { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
+       { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
+       { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
        { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
        { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
        { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
index 39a7d93..24dd5bb 100644 (file)
@@ -62,13 +62,13 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
  */
 void imx6q_cpuidle_fec_irqs_used(void)
 {
-       imx6q_cpuidle_driver.states[1].disabled = true;
+       cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, true);
 }
 EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_used);
 
 void imx6q_cpuidle_fec_irqs_unused(void)
 {
-       imx6q_cpuidle_driver.states[1].disabled = false;
+       cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, false);
 }
 EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_unused);
 
index d942a33..2efd18e 100644 (file)
@@ -89,6 +89,13 @@ static struct iommu_platform_data omap3_iommu_pdata = {
        .reset_name = "mmu",
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
+};
+
+static struct iommu_platform_data omap3_iommu_isp_pdata = {
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
 };
 
 static int omap3_sbc_t3730_twl_callback(struct device *dev,
@@ -424,6 +431,8 @@ static struct iommu_platform_data omap4_iommu_pdata = {
        .reset_name = "mmu_cache",
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
 };
 #endif
 
@@ -617,6 +626,8 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
 #ifdef CONFIG_ARCH_OMAP3
        OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
                       &omap3_iommu_pdata),
+       OF_DEV_AUXDATA("ti,omap2-iommu", 0x480bd400, "480bd400.mmu",
+                      &omap3_iommu_isp_pdata),
        OF_DEV_AUXDATA("ti,omap3-smartreflex-core", 0x480cb000,
                       "480cb000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]),
        OF_DEV_AUXDATA("ti,omap3-smartreflex-mpu-iva", 0x480c9000,
index 239084c..26cbce1 100644 (file)
@@ -481,14 +481,18 @@ static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
 static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
 {
        u32 reg;
+       int gating_bit = cpu;
 
        pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
        if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
                return -EINVAL;
 
+       if (is_a83t && cpu == 0)
+               gating_bit = 4;
+
        /* gate processor power */
        reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
-       reg |= PRCM_PWROFF_GATING_REG_CORE(cpu);
+       reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit);
        writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
        udelay(20);
 
index 2447427..69f3fa2 100644 (file)
@@ -203,7 +203,7 @@ void tegra20_cpuidle_pcie_irqs_in_use(void)
 {
        pr_info_once(
                "Disabling cpuidle LP2 state, since PCIe IRQs are in use\n");
-       tegra_idle_driver.states[1].disabled = true;
+       cpuidle_driver_state_disabled(&tegra_idle_driver, 1, true);
 }
 
 int __init tegra20_cpuidle_init(void)
index 04b3643..788c5cf 100644 (file)
@@ -324,7 +324,7 @@ union offset_union {
        __put32_unaligned_check("strbt", val, addr)
 
 static void
-do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
+do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
 {
        if (!LDST_U_BIT(instr))
                offset.un = -offset.un;
@@ -337,7 +337,7 @@ do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs
 }
 
 static int
-do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
 
@@ -386,8 +386,7 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
 }
 
 static int
-do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
-                     struct pt_regs *regs)
+do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
        unsigned int rd2;
@@ -449,7 +448,7 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
 }
 
 static int
-do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
 
@@ -498,7 +497,7 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg
  * PU = 10             A                    B
  */
 static int
-do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd, rn, correction, nr_regs, regbits;
        unsigned long eaddr, newaddr;
@@ -539,7 +538,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
         * processor for us.
         */
        if (addr != eaddr) {
-               pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
+               pr_err("LDMSTM: PC = %08lx, instr = %08x, "
                        "addr = %08lx, eaddr = %08lx\n",
                         instruction_pointer(regs), instr, addr, eaddr);
                show_regs(regs);
@@ -716,10 +715,10 @@ thumb2arm(u16 tinstr)
  * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
  */
 static void *
-do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
                            union offset_union *poffset)
 {
-       unsigned long instr = *pinstr;
+       u32 instr = *pinstr;
        u16 tinst1 = (instr >> 16) & 0xffff;
        u16 tinst2 = instr & 0xffff;
 
@@ -767,17 +766,48 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
        return NULL;
 }
 
+static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
+{
+       u32 instr = 0;
+       int fault;
+
+       if (user_mode(regs))
+               fault = get_user(instr, ip);
+       else
+               fault = probe_kernel_address(ip, instr);
+
+       *inst = __mem_to_opcode_arm(instr);
+
+       return fault;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
+{
+       u16 instr = 0;
+       int fault;
+
+       if (user_mode(regs))
+               fault = get_user(instr, ip);
+       else
+               fault = probe_kernel_address(ip, instr);
+
+       *inst = __mem_to_opcode_thumb16(instr);
+
+       return fault;
+}
+
 static int
 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
        union offset_union uninitialized_var(offset);
-       unsigned long instr = 0, instrptr;
-       int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+       unsigned long instrptr;
+       int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
        unsigned int type;
-       unsigned int fault;
+       u32 instr = 0;
        u16 tinstr = 0;
        int isize = 4;
        int thumb2_32b = 0;
+       int fault;
 
        if (interrupts_enabled(regs))
                local_irq_enable();
@@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (thumb_mode(regs)) {
                u16 *ptr = (u16 *)(instrptr & ~1);
-               fault = probe_kernel_address(ptr, tinstr);
-               tinstr = __mem_to_opcode_thumb16(tinstr);
+
+               fault = alignment_get_thumb(regs, ptr, &tinstr);
                if (!fault) {
                        if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
                            IS_T32(tinstr)) {
                                /* Thumb-2 32-bit */
-                               u16 tinst2 = 0;
-                               fault = probe_kernel_address(ptr + 1, tinst2);
-                               tinst2 = __mem_to_opcode_thumb16(tinst2);
+                               u16 tinst2;
+                               fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
                                instr = __opcode_thumb32_compose(tinstr, tinst2);
                                thumb2_32b = 1;
                        } else {
@@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                        }
                }
        } else {
-               fault = probe_kernel_address((void *)instrptr, instr);
-               instr = __mem_to_opcode_arm(instr);
+               fault = alignment_get_arm(regs, (void *)instrptr, &instr);
        }
 
        if (fault) {
@@ -926,7 +954,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * Oops, we didn't handle the instruction.
         */
        pr_err("Alignment trap: not handling instruction "
-               "%0*lx at [<%08lx>]\n",
+               "%0*x at [<%08lx>]\n",
                isize << 1,
                isize == 2 ? tinstr : instr, instrptr);
        ai_skipped += 1;
@@ -936,7 +964,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        ai_user += 1;
 
        if (ai_usermode & UM_WARN)
-               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
+               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
                       "Address=0x%08lx FSR 0x%03x\n", current->comm,
                        task_pid_nr(current), instrptr,
                        isize << 1,
index 1448f14..1a49d50 100644 (file)
@@ -132,13 +132,11 @@ __v7m_setup_cont:
        dsb
        mov     r6, lr                  @ save LR
        ldr     sp, =init_thread_union + THREAD_START_SP
-       stmia   sp, {r0-r3, r12}
        cpsie   i
        svc     #0
 1:     cpsid   i
-       ldr     r0, =exc_ret
-       orr     lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
-       str     lr, [r0]
+       /* Calculate exc_ret */
+       orr     r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
        ldmia   sp, {r0-r3, r12}
        str     r5, [r12, #11 * 4]      @ restore the original SVC vector entry
        mov     lr, r6                  @ restore LR
index 24f1aac..d5b6e81 100644 (file)
                reg = <1>;
        };
 };
+
+&reg_dc1sw {
+       /*
+        * Ethernet PHY needs 30ms to properly power up and some more
+        * to initialize. 100ms should be plenty of time to finish
+        * whole process.
+        */
+       regulator-enable-ramp-delay = <100000>;
+};
index e6fb968..2509920 100644 (file)
 };
 
 &reg_dc1sw {
+       /*
+        * Ethernet PHY needs 30ms to properly power up and some more
+        * to initialize. 100ms should be plenty of time to finish
+        * whole process.
+        */
+       regulator-enable-ramp-delay = <100000>;
        regulator-name = "vcc-phy";
 };
 
index 3eccbdb..70f4cce 100644 (file)
                clock-output-names = "ext-osc32k";
        };
 
-       pmu {
-               compatible = "arm,cortex-a53-pmu";
-               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
-       };
-
        psci {
                compatible = "arm,psci-0.2";
                method = "smc";
index 8a3a770..56789cc 100644 (file)
 
                pinmux: pinmux@14029c {
                        compatible = "pinctrl-single";
-                       reg = <0x0014029c 0x250>;
+                       reg = <0x0014029c 0x26c>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        pinctrl-single,register-width = <32>;
                        pinctrl-single,function-mask = <0xf>;
                        pinctrl-single,gpio-range = <
-                               &range 0 154 MODE_GPIO
+                               &range 0  91 MODE_GPIO
+                               &range 95 60 MODE_GPIO
                                >;
                        range: gpio-range {
                                #pinctrl-single,gpio-range-cells = <3>;
index 71e2e34..0098dfd 100644 (file)
                                        <&pinmux 108 16 27>,
                                        <&pinmux 135 77 6>,
                                        <&pinmux 141 67 4>,
-                                       <&pinmux 145 149 6>,
-                                       <&pinmux 151 91 4>;
+                                       <&pinmux 145 149 6>;
                };
 
                i2c1: i2c@e0000 {
index d98346d..078a501 100644 (file)
        status = "okay";
 
        i2c-mux@77 {
-               compatible = "nxp,pca9847";
+               compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
                #size-cells = <0>;
index 408e0ec..b032f38 100644 (file)
@@ -33,7 +33,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster0_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@1 {
@@ -49,7 +49,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster0_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@100 {
@@ -65,7 +65,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster1_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@101 {
@@ -81,7 +81,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster1_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@200 {
@@ -97,7 +97,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster2_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@201 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster2_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@300 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster3_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@301 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster3_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@400 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster4_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@401 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster4_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@500 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster5_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@501 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster5_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@600 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster6_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@601 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster6_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@700 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster7_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@701 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster7_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cluster0_l2: l2-cache0 {
                        cache-level = <2>;
                };
 
-               cpu_pw20: cpu-pw20 {
+               cpu_pw15: cpu-pw15 {
                        compatible = "arm,idle-state";
-                       idle-state-name = "PW20";
+                       idle-state-name = "PW15";
                        arm,psci-suspend-param = <0x0>;
                        entry-latency-us = <2000>;
                        exit-latency-us = <2000>;
index 5f9d0da..23c8fad 100644 (file)
                        };
 
                        sdma2: dma-controller@302c0000 {
-                               compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+                               compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
                                reg = <0x302c0000 0x10000>;
                                interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>,
                        };
 
                        sdma3: dma-controller@302b0000 {
-                               compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+                               compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
                                reg = <0x302b0000 0x10000>;
                                interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>,
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b60000 0x10000>;
                                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC3_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                        };
 
                        sdma1: dma-controller@30bd0000 {
-                               compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+                               compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
                                reg = <0x30bd0000 0x10000>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
index 785f4c4..43c4db3 100644 (file)
                        };
 
                        sdma3: dma-controller@302b0000 {
-                               compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
+                               compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
                                reg = <0x302b0000 0x10000>;
                                interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>,
                        };
 
                        sdma2: dma-controller@302c0000 {
-                               compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
+                               compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
                                reg = <0x302c0000 0x10000>;
                                interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>,
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b60000 0x10000>;
                                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC3_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                        };
 
                        sdma1: dma-controller@30bd0000 {
-                               compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma";
+                               compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
                                reg = <0x30bd0000 0x10000>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
index af99473..32ce149 100644 (file)
@@ -88,9 +88,9 @@
                regulator-name = "0V9_ARM";
                regulator-min-microvolt = <900000>;
                regulator-max-microvolt = <1000000>;
-               gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
-               states = <1000000 0x0
-                          900000 0x1>;
+               gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+               states = <1000000 0x1
+                          900000 0x0>;
                regulator-always-on;
        };
 };
index 04115ca..55a3d1c 100644 (file)
                                             "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MQ_CLK_DUMMY>,
+                               clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                             "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MQ_CLK_DUMMY>,
+                               clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index d105986..5f350cc 100644 (file)
                gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
        };
 
-       usb3_phy: usb3-phy {
-               compatible = "usb-nop-xceiv";
-               vcc-supply = <&exp_usb3_vbus>;
-       };
-
        vsdc_reg: vsdc-reg {
                compatible = "regulator-gpio";
                regulator-name = "vsdc";
        status = "okay";
 };
 
+&comphy2 {
+       connector {
+               compatible = "usb-a-connector";
+               phy-supply = <&exp_usb3_vbus>;
+       };
+};
+
 &usb3 {
        status = "okay";
        phys = <&comphy2 0>;
-       usb-phy = <&usb3_phy>;
 };
 
 &mdio {
index e152b0c..b806686 100644 (file)
@@ -44,7 +44,7 @@
                power-supply = <&pp3300_disp>;
 
                panel-timing {
-                       clock-frequency = <266604720>;
+                       clock-frequency = <266666667>;
                        hactive = <2400>;
                        hfront-porch = <48>;
                        hback-porch = <84>;
index 0d1f5f9..c133e8d 100644 (file)
        status = "okay";
 
        u2phy0_host: host-port {
-               phy-supply = <&vcc5v0_host>;
+               phy-supply = <&vcc5v0_typec>;
                status = "okay";
        };
 
 
 &usbdrd_dwc3_0 {
        status = "okay";
-       dr_mode = "otg";
+       dr_mode = "host";
 };
 
 &usbdrd3_1 {
index 0401d4e..e544deb 100644 (file)
                regulator-always-on;
                regulator-boot-on;
                regulator-min-microvolt = <800000>;
-               regulator-max-microvolt = <1400000>;
+               regulator-max-microvolt = <1700000>;
                vin-supply = <&vcc5v0_sys>;
        };
 };
        rk808: pmic@1b {
                compatible = "rockchip,rk808";
                reg = <0x1b>;
-               interrupt-parent = <&gpio1>;
-               interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
                #clock-cells = <1>;
                clock-output-names = "xin32k", "rk808-clkout2";
                pinctrl-names = "default";
 
        pmic {
                pmic_int_l: pmic-int-l {
-                       rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+                       rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
                };
 
                vsel1_gpio: vsel1-gpio {
 
 &sdmmc {
        bus-width = <4>;
-       cap-mmc-highspeed;
        cap-sd-highspeed;
        cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
        disable-wp;
 
 &sdhci {
        bus-width = <8>;
-       mmc-hs400-1_8v;
-       mmc-hs400-enhanced-strobe;
+       mmc-hs200-1_8v;
        non-removable;
        status = "okay";
 };
index b1454d1..aca07c2 100644 (file)
@@ -79,6 +79,7 @@
 #define CAVIUM_CPU_PART_THUNDERX_83XX  0x0A3
 #define CAVIUM_CPU_PART_THUNDERX2      0x0AF
 
+#define BRCM_CPU_PART_BRAHMA_B53       0x100
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define QCOM_CPU_PART_FALKOR_V1                0x800
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
 #define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
 #define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
index 9a21b84..8dc6c5c 100644 (file)
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
 
-#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL       (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -80,8 +80,9 @@
 #define PAGE_S2_DEVICE         __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
 
 #define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
-#define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
+#define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
 #define PAGE_EXECONLY          __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
index 8330810..565aa45 100644 (file)
@@ -283,23 +283,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
        set_pte(ptep, pte);
 }
 
-#define __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t pte_a, pte_t pte_b)
-{
-       pteval_t lhs, rhs;
-
-       lhs = pte_val(pte_a);
-       rhs = pte_val(pte_b);
-
-       if (pte_present(pte_a))
-               lhs &= ~PTE_RDONLY;
-
-       if (pte_present(pte_b))
-               rhs &= ~PTE_RDONLY;
-
-       return (lhs == rhs);
-}
-
 /*
  * Huge pte definitions.
  */
index 0c731bf..0c20a7c 100644 (file)
@@ -31,13 +31,6 @@ int __arm64_get_clock_mode(struct timekeeper *tk)
 #define __arch_get_clock_mode __arm64_get_clock_mode
 
 static __always_inline
-int __arm64_use_vsyscall(struct vdso_data *vdata)
-{
-       return !vdata[CS_HRES_COARSE].clock_mode;
-}
-#define __arch_use_vsyscall __arm64_use_vsyscall
-
-static __always_inline
 void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
 {
        vdata[CS_HRES_COARSE].mask      = VDSO_PRECISION_MASK;
index 6c3b10a..93f34b4 100644 (file)
@@ -489,6 +489,7 @@ static const struct midr_range arm64_ssb_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        {},
 };
 
@@ -573,6 +574,7 @@ static const struct midr_range spectre_v2_safe_list[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        { /* sentinel */ }
 };
 
@@ -659,17 +661,23 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
 #endif
 
 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
-
-static const struct midr_range arm64_repeat_tlbi_cpus[] = {
+static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
-       MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
+       {
+               ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
+       },
+       {
+               .midr_range.model = MIDR_QCOM_KRYO,
+               .matches = is_kryo_midr,
+       },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_1286807
-       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       {
+               ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       },
 #endif
        {},
 };
-
 #endif
 
 #ifdef CONFIG_CAVIUM_ERRATUM_27456
@@ -737,6 +745,33 @@ static const struct midr_range erratum_1418040_list[] = {
 };
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_845719
+static const struct midr_range erratum_845719_list[] = {
+       /* Cortex-A53 r0p[01234] */
+       MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+       /* Brahma-B53 r0p[0] */
+       MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+       {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+static const struct arm64_cpu_capabilities erratum_843419_list[] = {
+       {
+               /* Cortex-A53 r0p[01234] */
+               .matches = is_affected_midr_range,
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               MIDR_FIXED(0x4, BIT(8)),
+       },
+       {
+               /* Brahma-B53 r0p[0] */
+               .matches = is_affected_midr_range,
+               ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+       },
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -768,19 +803,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_843419
        {
-       /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 843419",
                .capability = ARM64_WORKAROUND_843419,
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
-               MIDR_FIXED(0x4, BIT(8)),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = cpucap_multi_entry_cap_matches,
+               .match_list = erratum_843419_list,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
        {
-       /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 845719",
                .capability = ARM64_WORKAROUND_845719,
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
        },
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_23154
@@ -816,6 +850,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
                .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = cpucap_multi_entry_cap_matches,
                .match_list = qcom_erratum_1003_list,
        },
@@ -824,7 +859,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
-               ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = cpucap_multi_entry_cap_matches,
+               .match_list = arm64_repeat_tlbi_list,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_858921
index 2071260..46822af 100644 (file)
@@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
         */
        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+       if (!system_supports_32bit_el0())
+               val |= ARMV8_PMU_PMCR_LC;
        __vcpu_sys_reg(vcpu, r->reg) = val;
 }
 
@@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                val = __vcpu_sys_reg(vcpu, PMCR_EL0);
                val &= ~ARMV8_PMU_PMCR_MASK;
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
+               if (!system_supports_32bit_el0())
+                       val |= ARMV8_PMU_PMCR_LC;
                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
                kvm_pmu_handle_pmcr(vcpu, val);
                kvm_vcpu_pmu_restore_guest(vcpu);
index 77a836e..df69eaa 100644 (file)
@@ -84,7 +84,7 @@ void __init prom_init(void)
                 * Here we will start up CPU1 in the background and ask it to
                 * reconfigure itself then go back to sleep.
                 */
-               memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+               memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
                __sync();
                set_c0_cause(C_SW0);
                cpumask_set_cpu(1, &bmips_booted_mask);
index bf6a8af..581a6a3 100644 (file)
@@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
 #endif
 }
 
-extern char bmips_reset_nmi_vec;
-extern char bmips_reset_nmi_vec_end;
-extern char bmips_smp_movevec;
-extern char bmips_smp_int_vec;
-extern char bmips_smp_int_vec_end;
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
 
 extern int bmips_smp_enabled;
 extern int bmips_cpu_offset;
index e78462e..b088255 100644 (file)
@@ -24,6 +24,8 @@
 
 #define VDSO_HAS_CLOCK_GETRES          1
 
+#define __VDSO_USE_SYSCALL             ULLONG_MAX
+
 #ifdef CONFIG_MIPS_CLOCK_VSYSCALL
 
 static __always_inline long gettimeofday_fallback(
@@ -205,7 +207,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
                break;
 #endif
        default:
-               cycle_now = 0;
+               cycle_now = __VDSO_USE_SYSCALL;
                break;
        }
 
index 1953147..00d41b9 100644 (file)
@@ -28,13 +28,6 @@ int __mips_get_clock_mode(struct timekeeper *tk)
 }
 #define __arch_get_clock_mode __mips_get_clock_mode
 
-static __always_inline
-int __mips_use_vsyscall(struct vdso_data *vdata)
-{
-       return (vdata[CS_HRES_COARSE].clock_mode != VDSO_CLOCK_NONE);
-}
-#define __arch_use_vsyscall __mips_use_vsyscall
-
 /* The asm-generic header needs to be included after the definitions above */
 #include <asm-generic/vdso/vsyscall.h>
 
index 76fae9b..712c15d 100644 (file)
@@ -464,10 +464,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end)
 
 static inline void bmips_nmi_handler_setup(void)
 {
-       bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
-               &bmips_reset_nmi_vec_end);
-       bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
-               &bmips_smp_int_vec_end);
+       bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+               bmips_reset_nmi_vec_end);
+       bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+               bmips_smp_int_vec_end);
 }
 
 struct reset_vec_info {
index e01cb33..41bb91f 100644 (file)
@@ -653,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                                   int restore_scratch)
 {
        if (restore_scratch) {
+               /*
+                * Ensure the MFC0 below observes the value written to the
+                * KScratch register by the prior MTC0.
+                */
+               if (scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
                        uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -667,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                        uasm_i_mtc0(p, 0, C0_PAGEMASK);
                        uasm_il_b(p, r, lid);
                }
-               if (scratch_reg >= 0) {
-                       uasm_i_ehb(p);
+               if (scratch_reg >= 0)
                        UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-               } else {
+               else
                        UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-               }
        } else {
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
@@ -921,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        }
        if (mode != not_refill && check_for_high_segbits) {
                uasm_l_large_segbits_fault(l, *p);
+
+               if (mode == refill_scratch && scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /*
                 * We get here if we are an xsseg address, or if we are
                 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -939,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                uasm_i_jr(p, ptr);
 
                if (mode == refill_scratch) {
-                       if (scratch_reg >= 0) {
-                               uasm_i_ehb(p);
+                       if (scratch_reg >= 0)
                                UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-                       } else {
+                       else
                                UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-                       }
                } else {
                        uasm_i_nop(p);
                }
index ef3847e..e5b6cad 100644 (file)
@@ -38,10 +38,3 @@ config REPLICATE_KTEXT
          Say Y here to enable replicating the kernel text across multiple
          nodes in a NUMA cluster.  This trades memory for speed.
 
-config REPLICATE_EXHANDLERS
-       bool "Exception handler replication support"
-       depends on SGI_IP27
-       help
-         Say Y here to enable replicating the kernel exception handlers
-         across multiple nodes in a NUMA cluster. This trades memory for
-         speed.
index 59d5375..79a52c4 100644 (file)
@@ -69,23 +69,14 @@ static void per_hub_init(cnodeid_t cnode)
 
        hub_rtc_init(cnode);
 
-#ifdef CONFIG_REPLICATE_EXHANDLERS
-       /*
-        * If this is not a headless node initialization,
-        * copy over the caliased exception handlers.
-        */
-       if (get_compact_nodeid() == cnode) {
-               extern char except_vec2_generic, except_vec3_generic;
-               extern void build_tlb_refill_handler(void);
-
-               memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
-               memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
-               build_tlb_refill_handler();
-               memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
-               memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
+       if (nasid) {
+               /* copy exception handlers from first node to current node */
+               memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
+                      (void *)CKSEG0, 0x200);
                __flush_cache_all();
+               /* switch to node local exception handlers */
+               REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
        }
-#endif
 }
 
 void per_cpu_init(void)
index fb077a9..8624a88 100644 (file)
@@ -332,11 +332,7 @@ static void __init mlreset(void)
                 * thinks it is a node 0 address.
                 */
                REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
-#ifdef CONFIG_REPLICATE_EXHANDLERS
-               REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
-#else
                REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
-#endif
 
 #ifdef LATER
                /*
index 1d1d748..b96d744 100644 (file)
@@ -2125,7 +2125,7 @@ ftrace_regs_caller:
        copy    %rp, %r26
        LDREG   -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
        ldo     -8(%r25), %r25
-       copy    %r3, %arg2
+       ldo     -FTRACE_FRAME_SIZE(%r1), %arg2
        b,l     ftrace_function_trampoline, %rp
        copy    %r1, %arg3 /* struct pt_regs */
 
index 677e9ba..f9dc597 100644 (file)
@@ -91,6 +91,7 @@
 
 static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
 {
+       addr &= 0xf0000000;     /* align addr to start of segment */
        barrier();      /* make sure thread.kuap is updated before playing with SRs */
        while (addr < end) {
                mtsrin(sr, addr);
index 409c9bf..57c229a 100644 (file)
@@ -175,4 +175,7 @@ do {                                                                        \
        ARCH_DLINFO_CACHE_GEOMETRY;                                     \
 } while (0)
 
+/* Relocate the kernel image to @final_address */
+void relocate(unsigned long final_address);
+
 #endif /* _ASM_POWERPC_ELF_H */
index a4e7762..100f1b5 100644 (file)
@@ -3249,7 +3249,20 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
        /* Switch to secure mode. */
        prom_printf("Switching to secure mode.\n");
 
+       /*
+        * The ultravisor will do an integrity check of the kernel image but we
+        * relocated it so the check will fail. Restore the original image by
+        * relocating it back to the kernel virtual base address.
+        */
+       if (IS_ENABLED(CONFIG_RELOCATABLE))
+               relocate(KERNELBASE);
+
        ret = enter_secure_mode(kbase, fdt);
+
+       /* Relocate the kernel again. */
+       if (IS_ENABLED(CONFIG_RELOCATABLE))
+               relocate(kbase);
+
        if (ret != U_SUCCESS) {
                prom_printf("Returned %d from switching to secure mode.\n", ret);
                prom_rtas_os_term("Switch to secure mode failed.\n");
index 78bab17..b183ab9 100644 (file)
@@ -26,7 +26,8 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
 logo_linux_clut224 btext_prepare_BAT
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC.
+relocate"
 
 NM="$1"
 OBJ="$2"
index 591bfb4..a3f9c66 100644 (file)
@@ -1217,6 +1217,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
        struct kvmppc_xive *xive = dev->private;
        struct kvmppc_xive_vcpu *xc;
        int i, r = -EBUSY;
+       u32 vp_id;
 
        pr_devel("connect_vcpu(cpu=%d)\n", cpu);
 
@@ -1228,25 +1229,32 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
                return -EPERM;
        if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
                return -EBUSY;
-       if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
-               pr_devel("Duplicate !\n");
-               return -EEXIST;
-       }
        if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
                pr_devel("Out of bounds !\n");
                return -EINVAL;
        }
-       xc = kzalloc(sizeof(*xc), GFP_KERNEL);
-       if (!xc)
-               return -ENOMEM;
 
        /* We need to synchronize with queue provisioning */
        mutex_lock(&xive->lock);
+
+       vp_id = kvmppc_xive_vp(xive, cpu);
+       if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+               pr_devel("Duplicate !\n");
+               r = -EEXIST;
+               goto bail;
+       }
+
+       xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+       if (!xc) {
+               r = -ENOMEM;
+               goto bail;
+       }
+
        vcpu->arch.xive_vcpu = xc;
        xc->xive = xive;
        xc->vcpu = vcpu;
        xc->server_num = cpu;
-       xc->vp_id = kvmppc_xive_vp(xive, cpu);
+       xc->vp_id = vp_id;
        xc->mfrr = 0xff;
        xc->valid = true;
 
index 955b820..fe3ed50 100644 (file)
@@ -220,6 +220,18 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
        return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
 }
 
+static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
+{
+       struct kvm_vcpu *vcpu = NULL;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Mapping between guest priorities and host priorities
  * is as follow.
index 248c1ea..78b906f 100644 (file)
@@ -106,6 +106,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
        struct kvmppc_xive *xive = dev->private;
        struct kvmppc_xive_vcpu *xc = NULL;
        int rc;
+       u32 vp_id;
 
        pr_devel("native_connect_vcpu(server=%d)\n", server_num);
 
@@ -124,7 +125,8 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
 
        mutex_lock(&xive->lock);
 
-       if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+       vp_id = kvmppc_xive_vp(xive, server_num);
+       if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
                pr_devel("Duplicate !\n");
                rc = -EEXIST;
                goto bail;
@@ -141,7 +143,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
        xc->vcpu = vcpu;
        xc->server_num = server_num;
 
-       xc->vp_id = kvmppc_xive_vp(xive, server_num);
+       xc->vp_id = vp_id;
        xc->valid = true;
        vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
 
index 02a5994..be3517e 100644 (file)
@@ -1142,6 +1142,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
        }
 
        /*
+        * If we have seen a tail call, we need a second pass.
+        * This is because bpf_jit_emit_common_epilogue() is called
+        * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
+        */
+       if (cgctx.seen & SEEN_TAILCALL) {
+               cgctx.idx = 0;
+               if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
+                       fp = org_fp;
+                       goto out_addrs;
+               }
+       }
+
+       /*
         * Pretend to build prologue, given the features we've seen.  This will
         * update ctgtx.idx as it pretends to output instructions, then we can
         * calculate total size from idx.
index 6bc24a4..6f300ab 100644 (file)
@@ -42,7 +42,7 @@ void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
 {
        struct pci_dn *pdn = pci_get_pdn(pdev);
 
-       if (eeh_has_flag(EEH_FORCE_DISABLED))
+       if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED))
                return;
 
        dev_dbg(&pdev->dev, "EEH: Setting up device\n");
index fbd6e6b..13e2516 100644 (file)
@@ -146,20 +146,25 @@ static int pnv_smp_cpu_disable(void)
        return 0;
 }
 
+static void pnv_flush_interrupts(void)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+               if (xive_enabled())
+                       xive_flush_interrupt();
+               else
+                       icp_opal_flush_interrupt();
+       } else {
+               icp_native_flush_interrupt();
+       }
+}
+
 static void pnv_smp_cpu_kill_self(void)
 {
+       unsigned long srr1, unexpected_mask, wmask;
        unsigned int cpu;
-       unsigned long srr1, wmask;
        u64 lpcr_val;
 
        /* Standard hot unplug procedure */
-       /*
-        * This hard disables local interurpts, ensuring we have no lazy
-        * irqs pending.
-        */
-       WARN_ON(irqs_disabled());
-       hard_irq_disable();
-       WARN_ON(lazy_irq_pending());
 
        idle_task_exit();
        current->active_mm = NULL; /* for sanity */
@@ -173,6 +178,27 @@ static void pnv_smp_cpu_kill_self(void)
                wmask = SRR1_WAKEMASK_P8;
 
        /*
+        * This turns the irq soft-disabled state we're called with, into a
+        * hard-disabled state with pending irq_happened interrupts cleared.
+        *
+        * PACA_IRQ_DEC   - Decrementer should be ignored.
+        * PACA_IRQ_HMI   - Can be ignored, processing is done in real mode.
+        * PACA_IRQ_DBELL, EE, PMI - Unexpected.
+        */
+       hard_irq_disable();
+       if (generic_check_cpu_restart(cpu))
+               goto out;
+
+       unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS);
+       if (local_paca->irq_happened & unexpected_mask) {
+               if (local_paca->irq_happened & PACA_IRQ_EE)
+                       pnv_flush_interrupts();
+               DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n",
+                               cpu, local_paca->irq_happened);
+       }
+       local_paca->irq_happened = PACA_IRQ_HARD_DIS;
+
+       /*
         * We don't want to take decrementer interrupts while we are
         * offline, so clear LPCR:PECE1. We keep PECE2 (and
         * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
@@ -197,6 +223,7 @@ static void pnv_smp_cpu_kill_self(void)
 
                srr1 = pnv_cpu_offline(cpu);
 
+               WARN_ON_ONCE(!irqs_disabled());
                WARN_ON(lazy_irq_pending());
 
                /*
@@ -212,13 +239,7 @@ static void pnv_smp_cpu_kill_self(void)
                 */
                if (((srr1 & wmask) == SRR1_WAKEEE) ||
                    ((srr1 & wmask) == SRR1_WAKEHVI)) {
-                       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-                               if (xive_enabled())
-                                       xive_flush_interrupt();
-                               else
-                                       icp_opal_flush_interrupt();
-                       } else
-                               icp_native_flush_interrupt();
+                       pnv_flush_interrupts();
                } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
                        unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
                        asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -266,7 +287,7 @@ static void pnv_smp_cpu_kill_self(void)
         */
        lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
        pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
-
+out:
        DBG("CPU%d coming online...\n", cpu);
 }
 
index 07ceee8..75604fe 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <asm/asm.h>
 
-#ifdef CONFIG_GENERIC_BUG
 #define __INSN_LENGTH_MASK  _UL(0x3)
 #define __INSN_LENGTH_32    _UL(0x3)
 #define __COMPRESSED_INSN_MASK _UL(0xffff)
@@ -20,7 +19,6 @@
 #define __BUG_INSN_32  _UL(0x00100073) /* ebreak */
 #define __BUG_INSN_16  _UL(0x9002) /* c.ebreak */
 
-#ifndef __ASSEMBLY__
 typedef u32 bug_insn_t;
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
@@ -43,6 +41,7 @@ typedef u32 bug_insn_t;
        RISCV_SHORT " %2"
 #endif
 
+#ifdef CONFIG_GENERIC_BUG
 #define __BUG_FLAGS(flags)                                     \
 do {                                                           \
        __asm__ __volatile__ (                                  \
@@ -58,14 +57,10 @@ do {                                                                \
                  "i" (flags),                                  \
                  "i" (sizeof(struct bug_entry)));              \
 } while (0)
-
-#endif /* !__ASSEMBLY__ */
 #else /* CONFIG_GENERIC_BUG */
-#ifndef __ASSEMBLY__
 #define __BUG_FLAGS(flags) do {                                        \
        __asm__ __volatile__ ("ebreak\n");                      \
 } while (0)
-#endif /* !__ASSEMBLY__ */
 #endif /* CONFIG_GENERIC_BUG */
 
 #define BUG() do {                                             \
@@ -79,15 +74,10 @@ do {                                                                \
 
 #include <asm-generic/bug.h>
 
-#ifndef __ASSEMBLY__
-
 struct pt_regs;
 struct task_struct;
 
-extern void die(struct pt_regs *regs, const char *str);
-extern void do_trap(struct pt_regs *regs, int signo, int code,
-       unsigned long addr);
-
-#endif /* !__ASSEMBLY__ */
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
 
 #endif /* _ASM_RISCV_BUG_H */
index fc1189a..3ba4d93 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <asm/mmiowb.h>
+#include <asm/pgtable.h>
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
@@ -162,6 +163,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 #endif
 
 /*
+ *  I/O port access constants.
+ */
+#define IO_SPACE_LIMIT         (PCI_IO_SIZE - 1)
+#define PCI_IOBASE             ((void __iomem *)PCI_IO_START)
+
+/*
  * Emulation routines for the port-mapped IO space used by some PCI drivers.
  * These are defined as being "fully synchronous", but also "not guaranteed to
  * be fully ordered with respect to other memory and I/O operations".  We're
index 7557642..6e1b0e0 100644 (file)
@@ -7,6 +7,9 @@
 #ifndef _ASM_RISCV_IRQ_H
 #define _ASM_RISCV_IRQ_H
 
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
 #define NR_IRQS         0
 
 void riscv_timer_interrupt(void);
index 42292d9..d322101 100644 (file)
@@ -7,6 +7,7 @@
 #define _ASM_RISCV_PGTABLE_H
 
 #include <linux/mmzone.h>
+#include <linux/sizes.h>
 
 #include <asm/pgtable-bits.h>
 
@@ -86,6 +87,7 @@ extern pgd_t swapper_pg_dir[];
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
+#define PCI_IO_SIZE      SZ_16M
 
 /*
  * Roughly size the vmemmap space to be large enough to fit enough
@@ -100,7 +102,10 @@ extern pgd_t swapper_pg_dir[];
 
 #define vmemmap                ((struct page *)VMEMMAP_START)
 
-#define FIXADDR_TOP      (VMEMMAP_START)
+#define PCI_IO_END       VMEMMAP_START
+#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
+#define FIXADDR_TOP      PCI_IO_START
+
 #ifdef CONFIG_64BIT
 #define FIXADDR_SIZE     PMD_SIZE
 #else
@@ -184,10 +189,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
        return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
 }
 
-static inline pte_t mk_pte(struct page *page, pgprot_t prot)
-{
-       return pfn_pte(page_to_pfn(page), prot);
-}
+#define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
 
 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
@@ -428,9 +430,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#ifdef CONFIG_FLATMEM
 #define kern_addr_valid(addr)   (1) /* FIXME */
-#endif
 
 extern void *dtb_early_va;
 extern void setup_bootmem(void);
index f0227bd..ee4f0ac 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef _ASM_RISCV_SWITCH_TO_H
 #define _ASM_RISCV_SWITCH_TO_H
 
+#include <linux/sched/task_stack.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/csr.h>
index b1ade9a..a5ad000 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/hwcap.h>
 #include <asm/smp.h>
+#include <asm/switch_to.h>
 
 unsigned long elf_hwcap __read_mostly;
 #ifdef CONFIG_FPU
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
new file mode 100644 (file)
index 0000000..105fb04
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ */
+#ifndef __ASM_HEAD_H
+#define __ASM_HEAD_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+extern atomic_t hart_lottery;
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+
+extern void *__cpu_up_stack_pointer[];
+extern void *__cpu_up_task_pointer[];
+
+void __init parse_dtb(void);
+
+#endif /* __ASM_HEAD_H */
index 6d86593..fffac6d 100644 (file)
@@ -24,7 +24,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
+asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
index c9ae483..e264e59 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/elf.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/moduleloader.h>
 
 unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
 {
index fb3a082..85e3c39 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
@@ -19,6 +20,7 @@
 #include <asm/csr.h>
 #include <asm/string.h>
 #include <asm/switch_to.h>
+#include <asm/thread_info.h>
 
 extern asmlinkage void ret_from_fork(void);
 extern asmlinkage void ret_from_kernel_thread(void);
index 3687514..1252113 100644 (file)
@@ -148,7 +148,7 @@ long arch_ptrace(struct task_struct *child, long request,
  * Allows PTRACE_SYSCALL to work.  These are called from entry.S in
  * {handle,ret_from}_syscall.
  */
-void do_syscall_trace_enter(struct pt_regs *regs)
+__visible void do_syscall_trace_enter(struct pt_regs *regs)
 {
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                if (tracehook_report_syscall_entry(regs))
@@ -162,7 +162,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)
        audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
 }
 
-void do_syscall_trace_exit(struct pt_regs *regs)
+__visible void do_syscall_trace_exit(struct pt_regs *regs)
 {
        audit_syscall_exit(regs);
 
index d0fe623..aa56bb1 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/reboot.h>
+#include <linux/pm.h>
 #include <asm/sbi.h>
 
 static void default_power_off(void)
index a990a6c..845ae0e 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/tlbflush.h>
 #include <asm/thread_info.h>
 
+#include "head.h"
+
 #ifdef CONFIG_DUMMY_CONSOLE
 struct screen_info screen_info = {
        .orig_video_lines       = 30,
index b14d764..d0f6f21 100644 (file)
@@ -26,7 +26,7 @@ struct rt_sigframe {
 
 #ifdef CONFIG_FPU
 static long restore_fp_state(struct pt_regs *regs,
-                            union __riscv_fp_state *sc_fpregs)
+                            union __riscv_fp_state __user *sc_fpregs)
 {
        long err;
        struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
@@ -53,7 +53,7 @@ static long restore_fp_state(struct pt_regs *regs,
 }
 
 static long save_fp_state(struct pt_regs *regs,
-                         union __riscv_fp_state *sc_fpregs)
+                         union __riscv_fp_state __user *sc_fpregs)
 {
        long err;
        struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
@@ -292,8 +292,8 @@ static void do_signal(struct pt_regs *regs)
  * notification of userspace execution resumption
  * - triggered by the _TIF_WORK_MASK flags
  */
-asmlinkage void do_notify_resume(struct pt_regs *regs,
-       unsigned long thread_info_flags)
+asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+                                          unsigned long thread_info_flags)
 {
        /* Handle pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
index b18cd6c..5c9ec78 100644 (file)
@@ -8,7 +8,9 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/cpu.h>
 #include <linux/interrupt.h>
+#include <linux/profile.h>
 #include <linux/smp.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
index 18ae6da..261f408 100644 (file)
@@ -29,6 +29,9 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 #include <asm/sbi.h>
+#include <asm/smp.h>
+
+#include "head.h"
 
 void *__cpu_up_stack_pointer[NR_CPUS];
 void *__cpu_up_task_pointer[NR_CPUS];
@@ -130,7 +133,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 /*
  * C entry point for a secondary processor.
  */
-asmlinkage void __init smp_callin(void)
+asmlinkage __visible void __init smp_callin(void)
 {
        struct mm_struct *mm = &init_mm;
 
index e5dd52d..f1ead9d 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
 #include <asm/vdso.h>
+#include <asm/syscall.h>
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
index 9dd1f2e..6a53c02 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <asm/sbi.h>
+#include <asm/processor.h>
 
 unsigned long riscv_timebase;
 EXPORT_SYMBOL_GPL(riscv_timebase);
index 1ac75f7..473de3a 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -83,7 +84,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
 }
 
 #define DO_ERROR_INFO(name, signo, code, str)                          \
-asmlinkage void name(struct pt_regs *regs)                             \
+asmlinkage __visible void name(struct pt_regs *regs)                   \
 {                                                                      \
        do_trap_error(regs, signo, code, regs->sepc, "Oops - " str);    \
 }
@@ -111,7 +112,6 @@ DO_ERROR_INFO(do_trap_ecall_s,
 DO_ERROR_INFO(do_trap_ecall_m,
        SIGILL, ILL_ILLTRP, "environment call from M-mode");
 
-#ifdef CONFIG_GENERIC_BUG
 static inline unsigned long get_break_insn_length(unsigned long pc)
 {
        bug_insn_t insn;
@@ -120,28 +120,15 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
                return 0;
        return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
 }
-#endif /* CONFIG_GENERIC_BUG */
 
-asmlinkage void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible void do_trap_break(struct pt_regs *regs)
 {
-       if (user_mode(regs)) {
-               force_sig_fault(SIGTRAP, TRAP_BRKPT,
-                               (void __user *)(regs->sepc));
-               return;
-       }
-#ifdef CONFIG_GENERIC_BUG
-       {
-               enum bug_trap_type type;
-
-               type = report_bug(regs->sepc, regs);
-               if (type == BUG_TRAP_TYPE_WARN) {
-                       regs->sepc += get_break_insn_length(regs->sepc);
-                       return;
-               }
-       }
-#endif /* CONFIG_GENERIC_BUG */
-
-       die(regs, "Kernel BUG");
+       if (user_mode(regs))
+               force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc);
+       else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN)
+               regs->sepc += get_break_insn_length(regs->sepc);
+       else
+               die(regs, "Kernel BUG");
 }
 
 #ifdef CONFIG_GENERIC_BUG
index c9c21e0..484d95a 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2015 Regents of the University of California
  */
 
+#include <linux/elf.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/binfmts.h>
@@ -25,7 +26,7 @@ static union {
        struct vdso_data        data;
        u8                      page[PAGE_SIZE];
 } vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
+static struct vdso_data *vdso_data = &vdso_data_store.data;
 
 static int __init vdso_init(void)
 {
index beeb5d7..ca66d44 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/mm.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
 
 /*
  * When necessary, performs a deferred icache flush for the given MM context,
index 96add14..247b8c8 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ptrace.h>
 #include <asm/tlbflush.h>
 
+#include "../kernel/head.h"
+
 /*
  * This routine handles page faults.  It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
index 83f7d12..573463d 100644 (file)
@@ -19,6 +19,8 @@
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+#include "../kernel/head.h"
+
 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
                                                        __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
@@ -337,8 +339,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
  */
 
 #ifndef __riscv_cmodel_medany
-#error "setup_vm() is called from head.S before relocate so it should "
-       "not use absolute addressing."
+#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 #endif
 
 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
@@ -458,7 +459,7 @@ void __init paging_init(void)
        zone_sizes_init();
 }
 
-#ifdef CONFIG_SPARSEMEM
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                               struct vmem_altmap *altmap)
 {
index 2e637ad..a9ffff3 100644 (file)
@@ -142,7 +142,7 @@ static irqreturn_t l2_int_handler(int irq, void *device)
        return IRQ_HANDLED;
 }
 
-int __init sifive_l2_init(void)
+static int __init sifive_l2_init(void)
 {
        struct device_node *np;
        struct resource res;
index 596ca7c..5367950 100644 (file)
@@ -101,10 +101,18 @@ static void handle_relocs(unsigned long offset)
        dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
        for (rela = rela_start; rela < rela_end; rela++) {
                loc = rela->r_offset + offset;
-               val = rela->r_addend + offset;
+               val = rela->r_addend;
                r_sym = ELF64_R_SYM(rela->r_info);
-               if (r_sym)
-                       val += dynsym[r_sym].st_value;
+               if (r_sym) {
+                       if (dynsym[r_sym].st_shndx != SHN_UNDEF)
+                               val += dynsym[r_sym].st_value + offset;
+               } else {
+                       /*
+                        * 0 == undefined symbol table index (STN_UNDEF),
+                        * used for R_390_RELATIVE, only add KASLR offset
+                        */
+                       val += offset;
+               }
                r_type = ELF64_R_TYPE(rela->r_info);
                rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
                if (rc)
index d827b5b..eaaefec 100644 (file)
@@ -35,6 +35,7 @@ struct unwind_state {
        struct task_struct *task;
        struct pt_regs *regs;
        unsigned long sp, ip;
+       bool reuse_sp;
        int graph_idx;
        bool reliable;
        bool error;
index b9d8fe4..8f84568 100644 (file)
@@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
 static ssize_t show_idle_time(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
+       unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
        struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
-       unsigned long long now, idle_time, idle_enter, idle_exit;
        unsigned int seq;
 
        do {
-               now = get_tod_clock();
                seq = read_seqcount_begin(&idle->seqcount);
                idle_time = READ_ONCE(idle->idle_time);
                idle_enter = READ_ONCE(idle->clock_idle_enter);
                idle_exit = READ_ONCE(idle->clock_idle_exit);
        } while (read_seqcount_retry(&idle->seqcount, seq));
-       idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+       in_idle = 0;
+       now = get_tod_clock();
+       if (idle_enter) {
+               if (idle_exit) {
+                       in_idle = idle_exit - idle_enter;
+               } else if (now > idle_enter) {
+                       in_idle = now - idle_enter;
+               }
+       }
+       idle_time += in_idle;
        return sprintf(buf, "%llu\n", idle_time >> 12);
 }
 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 u64 arch_cpu_idle_time(int cpu)
 {
        struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
-       unsigned long long now, idle_enter, idle_exit;
+       unsigned long long now, idle_enter, idle_exit, in_idle;
        unsigned int seq;
 
        do {
-               now = get_tod_clock();
                seq = read_seqcount_begin(&idle->seqcount);
                idle_enter = READ_ONCE(idle->clock_idle_enter);
                idle_exit = READ_ONCE(idle->clock_idle_exit);
        } while (read_seqcount_retry(&idle->seqcount, seq));
-
-       return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
+       in_idle = 0;
+       now = get_tod_clock();
+       if (idle_enter) {
+               if (idle_exit) {
+                       in_idle = idle_exit - idle_enter;
+               } else if (now > idle_enter) {
+                       in_idle = now - idle_enter;
+               }
+       }
+       return cputime_to_nsecs(in_idle);
 }
 
 void arch_cpu_idle_enter(void)
index 3b664cb..d5035de 100644 (file)
@@ -27,6 +27,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
                *(u32 *)loc = val;
                break;
        case R_390_64:          /* Direct 64 bit.  */
+       case R_390_GLOB_DAT:
                *(u64 *)loc = val;
                break;
        case R_390_PC16:        /* PC relative 16 bit.  */
index 8fc9daa..a8204f9 100644 (file)
@@ -46,10 +46,15 @@ bool unwind_next_frame(struct unwind_state *state)
 
        regs = state->regs;
        if (unlikely(regs)) {
-               sp = READ_ONCE_NOCHECK(regs->gprs[15]);
-               if (unlikely(outside_of_stack(state, sp))) {
-                       if (!update_stack_info(state, sp))
-                               goto out_err;
+               if (state->reuse_sp) {
+                       sp = state->sp;
+                       state->reuse_sp = false;
+               } else {
+                       sp = READ_ONCE_NOCHECK(regs->gprs[15]);
+                       if (unlikely(outside_of_stack(state, sp))) {
+                               if (!update_stack_info(state, sp))
+                                       goto out_err;
+                       }
                }
                sf = (struct stack_frame *) sp;
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
@@ -107,9 +112,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
 {
        struct stack_info *info = &state->stack_info;
        unsigned long *mask = &state->stack_mask;
+       bool reliable, reuse_sp;
        struct stack_frame *sf;
        unsigned long ip;
-       bool reliable;
 
        memset(state, 0, sizeof(*state));
        state->task = task;
@@ -134,10 +139,12 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        if (regs) {
                ip = READ_ONCE_NOCHECK(regs->psw.addr);
                reliable = true;
+               reuse_sp = true;
        } else {
                sf = (struct stack_frame *) sp;
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
                reliable = false;
+               reuse_sp = false;
        }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -151,5 +158,6 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        state->sp = sp;
        state->ip = ip;
        state->reliable = reliable;
+       state->reuse_sp = reuse_sp;
 }
 EXPORT_SYMBOL_GPL(__unwind_start);
index 510a182..a51c892 100644 (file)
@@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
        }
 
        if (write) {
-               len = *lenp;
-               if (copy_from_user(buf, buffer,
-                                  len > sizeof(buf) ? sizeof(buf) : len))
+               len = min(*lenp, sizeof(buf));
+               if (copy_from_user(buf, buffer, len))
                        return -EFAULT;
-               buf[sizeof(buf) - 1] = '\0';
+               buf[len - 1] = '\0';
                cmm_skip_blanks(buf, &p);
                nr = simple_strtoul(p, &p, 0);
                cmm_skip_blanks(p, &p);
                seconds = simple_strtoul(p, &p, 0);
                cmm_set_timeout(nr, seconds);
+               *ppos += *lenp;
        } else {
                len = sprintf(buf, "%ld %ld\n",
                              cmm_timeout_pages, cmm_timeout_seconds);
@@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
                        len = *lenp;
                if (copy_to_user(buffer, buf, len))
                        return -EFAULT;
+               *lenp = len;
+               *ppos += len;
        }
-       *lenp = len;
-       *ppos += len;
        return 0;
 }
 
index 324a239..997ffe4 100644 (file)
@@ -65,14 +65,14 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS
 #
 # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
 #
-CFLAGS_REMOVE_vdso-note.o = -pg
 CFLAGS_REMOVE_vclock_gettime.o = -pg
+CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
 
 $(obj)/%.so: OBJCOPYFLAGS := -S
 $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
-CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds)
 VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
 
 #This makes sure the $(obj) subdirectory exists even though vdso32/
index 612535c..6627d7c 100644 (file)
@@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        spin_unlock_irq(&ubd_dev->lock);
 
-       if (ret < 0)
-               blk_mq_requeue_request(req, true);
+       if (ret < 0) {
+               if (ret == -ENOMEM)
+                       res = BLK_STS_RESOURCE;
+               else
+                       res = BLK_STS_DEV_RESOURCE;
+       }
 
        return res;
 }
index d6e1faa..8ef8513 100644 (file)
@@ -1940,6 +1940,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
 
          If unsure, say y.
 
+choice
+       prompt "TSX enable mode"
+       depends on CPU_SUP_INTEL
+       default X86_INTEL_TSX_MODE_OFF
+       help
+         Intel's TSX (Transactional Synchronization Extensions) feature
+         allows to optimize locking protocols through lock elision which
+         can lead to a noticeable performance boost.
+
+         On the other hand it has been shown that TSX can be exploited
+         to form side channel attacks (e.g. TAA) and chances are there
+         will be more of those attacks discovered in the future.
+
+         Therefore TSX is not enabled by default (aka tsx=off). An admin
+         might override this decision by tsx=on the command line parameter.
+         Even with TSX enabled, the kernel will attempt to enable the best
+         possible TAA mitigation setting depending on the microcode available
+         for the particular machine.
+
+         This option allows to set the default tsx mode between tsx=on, =off
+         and =auto. See Documentation/admin-guide/kernel-parameters.txt for more
+         details.
+
+         Say off if not sure, auto if TSX is in use but it should be used on safe
+         platforms or on if TSX is in use and the security aspect of tsx is not
+         relevant.
+
+config X86_INTEL_TSX_MODE_OFF
+       bool "off"
+       help
+         TSX is disabled if possible - equals to tsx=off command line parameter.
+
+config X86_INTEL_TSX_MODE_ON
+       bool "on"
+       help
+         TSX is always enabled on TSX capable HW - equals the tsx=on command
+         line parameter.
+
+config X86_INTEL_TSX_MODE_AUTO
+       bool "auto"
+       help
+         TSX is enabled on TSX capable HW that is believed to be safe against
+         side channel attacks- equals the tsx=auto command line parameter.
+endchoice
+
 config EFI
        bool "EFI runtime service support"
        depends on ACPI
index d6662fd..82bc60c 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/e820/types.h>
 #include <asm/setup.h>
 #include <asm/desc.h>
+#include <asm/boot.h>
 
 #include "../string.h"
 #include "eboot.h"
@@ -813,7 +814,8 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
                status = efi_relocate_kernel(sys_table, &bzimage_addr,
                                             hdr->init_size, hdr->init_size,
                                             hdr->pref_address,
-                                            hdr->kernel_alignment);
+                                            hdr->kernel_alignment,
+                                            LOAD_PHYSICAL_ADDR);
                if (status != EFI_SUCCESS) {
                        efi_printk(sys_table, "efi_relocate_kernel() failed!\n");
                        goto fail;
index 5b35b7e..26c3635 100644 (file)
@@ -377,7 +377,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
                                          struct hw_perf_event *hwc, u64 config)
 {
        config &= ~perf_ibs->cnt_mask;
-       wrmsrl(hwc->config_base, config);
+       if (boot_cpu_data.x86 == 0x10)
+               wrmsrl(hwc->config_base, config);
        config &= ~perf_ibs->enable_mask;
        wrmsrl(hwc->config_base, config);
 }
@@ -553,7 +554,8 @@ static struct perf_ibs perf_ibs_op = {
        },
        .msr                    = MSR_AMD64_IBSOPCTL,
        .config_mask            = IBS_OP_CONFIG_MASK,
-       .cnt_mask               = IBS_OP_MAX_CNT,
+       .cnt_mask               = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
+                                 IBS_OP_CUR_CNT_RAND,
        .enable_mask            = IBS_OP_ENABLE,
        .valid_mask             = IBS_OP_VAL,
        .max_period             = IBS_OP_MAX_CNT << 4,
@@ -614,7 +616,7 @@ fail:
        if (event->attr.sample_type & PERF_SAMPLE_RAW)
                offset_max = perf_ibs->offset_max;
        else if (check_rip)
-               offset_max = 2;
+               offset_max = 3;
        else
                offset_max = 1;
        do {
index 74e80ed..05e43d0 100644 (file)
@@ -627,7 +627,7 @@ static struct topa *topa_alloc(int cpu, gfp_t gfp)
         * link as the 2nd entry in the table
         */
        if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
-               TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p);
+               TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
                TOPA_ENTRY(&tp->topa, 1)->end = 1;
        }
 
index 6fc2e06..86467f8 100644 (file)
@@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
        local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
        uncore_enable_event(box, event);
 
-       if (box->n_active == 1) {
-               uncore_enable_box(box);
+       if (box->n_active == 1)
                uncore_pmu_start_hrtimer(box);
-       }
 }
 
 void uncore_pmu_event_stop(struct perf_event *event, int flags)
@@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
 
-               if (box->n_active == 0) {
-                       uncore_disable_box(box);
+               if (box->n_active == 0)
                        uncore_pmu_cancel_hrtimer(box);
-               }
        }
 
        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
@@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
        return ret;
 }
 
+static void uncore_pmu_enable(struct pmu *pmu)
+{
+       struct intel_uncore_pmu *uncore_pmu;
+       struct intel_uncore_box *box;
+
+       uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+       if (!uncore_pmu)
+               return;
+
+       box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+       if (!box)
+               return;
+
+       if (uncore_pmu->type->ops->enable_box)
+               uncore_pmu->type->ops->enable_box(box);
+}
+
+static void uncore_pmu_disable(struct pmu *pmu)
+{
+       struct intel_uncore_pmu *uncore_pmu;
+       struct intel_uncore_box *box;
+
+       uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+       if (!uncore_pmu)
+               return;
+
+       box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+       if (!box)
+               return;
+
+       if (uncore_pmu->type->ops->disable_box)
+               uncore_pmu->type->ops->disable_box(box);
+}
+
 static ssize_t uncore_get_attr_cpumask(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
@@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
                pmu->pmu = (struct pmu) {
                        .attr_groups    = pmu->type->attr_groups,
                        .task_ctx_nr    = perf_invalid_context,
+                       .pmu_enable     = uncore_pmu_enable,
+                       .pmu_disable    = uncore_pmu_disable,
                        .event_init     = uncore_pmu_event_init,
                        .add            = uncore_pmu_event_add,
                        .del            = uncore_pmu_event_del,
index f36f7be..bbfdaa7 100644 (file)
@@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
        return -EINVAL;
 }
 
-static inline void uncore_disable_box(struct intel_uncore_box *box)
-{
-       if (box->pmu->type->ops->disable_box)
-               box->pmu->type->ops->disable_box(box);
-}
-
-static inline void uncore_enable_box(struct intel_uncore_box *box)
-{
-       if (box->pmu->type->ops->enable_box)
-               box->pmu->type->ops->enable_box(box);
-}
-
 static inline void uncore_disable_event(struct intel_uncore_box *box,
                                struct perf_event *event)
 {
index 0652d3e..c4fbe37 100644 (file)
 #define X86_BUG_MDS                    X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
 #define X86_BUG_MSBDS_ONLY             X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
 #define X86_BUG_SWAPGS                 X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+#define X86_BUG_TAA                    X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
+#define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 50eb430..4fc6148 100644 (file)
@@ -312,9 +312,12 @@ struct kvm_rmap_head {
 struct kvm_mmu_page {
        struct list_head link;
        struct hlist_node hash_link;
+       struct list_head lpage_disallowed_link;
+
        bool unsync;
        u8 mmu_valid_gen;
        bool mmio_cached;
+       bool lpage_disallowed; /* Can't be replaced by an equiv large page */
 
        /*
         * The following two entries are used to key the shadow page in the
@@ -859,6 +862,7 @@ struct kvm_arch {
         */
        struct list_head active_mmu_pages;
        struct list_head zapped_obsolete_pages;
+       struct list_head lpage_disallowed_mmu_pages;
        struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
 
@@ -933,6 +937,7 @@ struct kvm_arch {
        bool exception_payload_enabled;
 
        struct kvm_pmu_event_filter *pmu_event_filter;
+       struct task_struct *nx_lpage_recovery_thread;
 };
 
 struct kvm_vm_stat {
@@ -946,6 +951,7 @@ struct kvm_vm_stat {
        ulong mmu_unsync;
        ulong remote_tlb_flush;
        ulong lpages;
+       ulong nx_lpage_splits;
        ulong max_mmu_page_hash_collisions;
 };
 
@@ -1189,7 +1195,7 @@ struct kvm_x86_ops {
        int (*set_nested_state)(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user *user_kvm_nested_state,
                                struct kvm_nested_state *kvm_state);
-       void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
index 20ce682..6a31246 100644 (file)
                                                  * Microarchitectural Data
                                                  * Sampling (MDS) vulnerabilities.
                                                  */
+#define ARCH_CAP_PSCHANGE_MC_NO                BIT(6)   /*
+                                                 * The processor is not susceptible to a
+                                                 * machine check error due to modifying the
+                                                 * code page size along with either the
+                                                 * physical address or cache type
+                                                 * without TLB invalidation.
+                                                 */
+#define ARCH_CAP_TSX_CTRL_MSR          BIT(7)  /* MSR for TSX control is available. */
+#define ARCH_CAP_TAA_NO                        BIT(8)  /*
+                                                * Not susceptible to
+                                                * TSX Async Abort (TAA) vulnerabilities.
+                                                */
 
 #define MSR_IA32_FLUSH_CMD             0x0000010b
 #define L1D_FLUSH                      BIT(0)  /*
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
 
+#define MSR_IA32_TSX_CTRL              0x00000122
+#define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
+#define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
+
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 #define MSR_IA32_SYSENTER_EIP          0x00000176
index 80bc209..5c24a7b 100644 (file)
@@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
 #include <asm/segment.h>
 
 /**
- * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
  *
  * This uses the otherwise unused and obsolete VERW instruction in
  * combination with microcode which triggers a CPU buffer flush when the
@@ -337,7 +337,7 @@ static inline void mds_clear_cpu_buffers(void)
 }
 
 /**
- * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
  *
  * Clear CPU buffers if the corresponding static key is enabled
  */
index 6e0a3b4..54f5d54 100644 (file)
@@ -988,4 +988,11 @@ enum mds_mitigations {
        MDS_MITIGATION_VMWERV,
 };
 
+enum taa_mitigations {
+       TAA_MITIGATION_OFF,
+       TAA_MITIGATION_UCODE_NEEDED,
+       TAA_MITIGATION_VERW,
+       TAA_MITIGATION_TSX_DISABLED,
+};
+
 #endif /* _ASM_X86_PROCESSOR_H */
index e00c9e8..ac9fc51 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <asm/cpufeatures.h>
 #include <asm/alternative.h>
+#include <linux/stringify.h>
 
 /*
  * The hypercall definitions differ in the low word of the %edx argument
@@ -20,8 +21,8 @@
  */
 
 /* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT    "0x5658"
-#define VMWARE_HYPERVISOR_PORT_HB "0x5659"
+#define VMWARE_HYPERVISOR_PORT    0x5658
+#define VMWARE_HYPERVISOR_PORT_HB 0x5659
 
 /* Current vmcall / vmmcall version */
 #define VMWARE_HYPERVISOR_HB   BIT(0)
@@ -29,7 +30,8 @@
 
 /* The low bandwidth call. The low word of edx is presumed clear. */
 #define VMWARE_HYPERCALL                                               \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT ", %%dx; inl (%%dx)", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT) ", %%dx; " \
+                     "inl (%%dx), %%eax",                              \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 
@@ -38,7 +40,8 @@
  * HB and OUT bits set.
  */
 #define VMWARE_HYPERCALL_HB_OUT                                                \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep outsb", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+                     "rep outsb",                                      \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 
@@ -47,7 +50,8 @@
  * HB bit set.
  */
 #define VMWARE_HYPERCALL_HB_IN                                         \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep insb", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+                     "rep insb",                                       \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 #endif
index 9e2dd2b..2b0faf8 100644 (file)
@@ -1586,9 +1586,6 @@ static void setup_local_APIC(void)
 {
        int cpu = smp_processor_id();
        unsigned int value;
-#ifdef CONFIG_X86_32
-       int logical_apicid, ldr_apicid;
-#endif
 
        if (disable_apic) {
                disable_ioapic_support();
@@ -1626,16 +1623,21 @@ static void setup_local_APIC(void)
        apic->init_apic_ldr();
 
 #ifdef CONFIG_X86_32
-       /*
-        * APIC LDR is initialized.  If logical_apicid mapping was
-        * initialized during get_smp_config(), make sure it matches the
-        * actual value.
-        */
-       logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
-       ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
-       WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid);
-       /* always use the value from LDR */
-       early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
+       if (apic->dest_logical) {
+               int logical_apicid, ldr_apicid;
+
+               /*
+                * APIC LDR is initialized.  If logical_apicid mapping was
+                * initialized during get_smp_config(), make sure it matches
+                * the actual value.
+                */
+               logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+               ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+               if (logical_apicid != BAD_APICID)
+                       WARN_ON(logical_apicid != ldr_apicid);
+               /* Always use the value from LDR. */
+               early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
+       }
 #endif
 
        /*
index d7a1e5a..890f600 100644 (file)
@@ -30,7 +30,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
 
 ifdef CONFIG_CPU_SUP_INTEL
-obj-y                  += intel.o intel_pconfig.o
+obj-y                  += intel.o intel_pconfig.o tsx.o
 obj-$(CONFIG_PM)       += intel_epb.o
 endif
 obj-$(CONFIG_CPU_SUP_AMD)              += amd.o
index 91c2561..4c7b0fa 100644 (file)
@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
 static void __init mds_select_mitigation(void);
+static void __init taa_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
@@ -105,6 +106,7 @@ void __init check_bugs(void)
        ssb_select_mitigation();
        l1tf_select_mitigation();
        mds_select_mitigation();
+       taa_select_mitigation();
 
        arch_smt_update();
 
@@ -269,6 +271,100 @@ static int __init mds_cmdline(char *str)
 early_param("mds", mds_cmdline);
 
 #undef pr_fmt
+#define pr_fmt(fmt)    "TAA: " fmt
+
+/* Default mitigation for TAA-affected CPUs */
+static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
+static bool taa_nosmt __ro_after_init;
+
+static const char * const taa_strings[] = {
+       [TAA_MITIGATION_OFF]            = "Vulnerable",
+       [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
+       [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
+       [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
+};
+
+static void __init taa_select_mitigation(void)
+{
+       u64 ia32_cap;
+
+       if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+               taa_mitigation = TAA_MITIGATION_OFF;
+               return;
+       }
+
+       /* TSX previously disabled by tsx=off */
+       if (!boot_cpu_has(X86_FEATURE_RTM)) {
+               taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
+               goto out;
+       }
+
+       if (cpu_mitigations_off()) {
+               taa_mitigation = TAA_MITIGATION_OFF;
+               return;
+       }
+
+       /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
+       if (taa_mitigation == TAA_MITIGATION_OFF)
+               goto out;
+
+       if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+               taa_mitigation = TAA_MITIGATION_VERW;
+       else
+               taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+       /*
+        * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
+        * A microcode update fixes this behavior to clear CPU buffers. It also
+        * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
+        * ARCH_CAP_TSX_CTRL_MSR bit.
+        *
+        * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+        * update is required.
+        */
+       ia32_cap = x86_read_arch_cap_msr();
+       if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+           !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+               taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+       /*
+        * TSX is enabled, select alternate mitigation for TAA which is
+        * the same as MDS. Enable MDS static branch to clear CPU buffers.
+        *
+        * For guests that can't determine whether the correct microcode is
+        * present on host, enable the mitigation for UCODE_NEEDED as well.
+        */
+       static_branch_enable(&mds_user_clear);
+
+       if (taa_nosmt || cpu_mitigations_auto_nosmt())
+               cpu_smt_disable(false);
+
+out:
+       pr_info("%s\n", taa_strings[taa_mitigation]);
+}
+
+static int __init tsx_async_abort_parse_cmdline(char *str)
+{
+       if (!boot_cpu_has_bug(X86_BUG_TAA))
+               return 0;
+
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off")) {
+               taa_mitigation = TAA_MITIGATION_OFF;
+       } else if (!strcmp(str, "full")) {
+               taa_mitigation = TAA_MITIGATION_VERW;
+       } else if (!strcmp(str, "full,nosmt")) {
+               taa_mitigation = TAA_MITIGATION_VERW;
+               taa_nosmt = true;
+       }
+
+       return 0;
+}
+early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
+
+#undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V1 : " fmt
 
 enum spectre_v1_mitigation {
@@ -786,13 +882,10 @@ static void update_mds_branch_idle(void)
 }
 
 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
 
 void cpu_bugs_smt_update(void)
 {
-       /* Enhanced IBRS implies STIBP. No update required. */
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
-               return;
-
        mutex_lock(&spec_ctrl_mutex);
 
        switch (spectre_v2_user) {
@@ -819,6 +912,17 @@ void cpu_bugs_smt_update(void)
                break;
        }
 
+       switch (taa_mitigation) {
+       case TAA_MITIGATION_VERW:
+       case TAA_MITIGATION_UCODE_NEEDED:
+               if (sched_smt_active())
+                       pr_warn_once(TAA_MSG_SMT);
+               break;
+       case TAA_MITIGATION_TSX_DISABLED:
+       case TAA_MITIGATION_OFF:
+               break;
+       }
+
        mutex_unlock(&spec_ctrl_mutex);
 }
 
@@ -1149,6 +1253,9 @@ void x86_spec_ctrl_setup_ap(void)
                x86_amd_ssb_disable();
 }
 
+bool itlb_multihit_kvm_mitigation;
+EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
+
 #undef pr_fmt
 #define pr_fmt(fmt)    "L1TF: " fmt
 
@@ -1304,11 +1411,24 @@ static ssize_t l1tf_show_state(char *buf)
                       l1tf_vmx_states[l1tf_vmx_mitigation],
                       sched_smt_active() ? "vulnerable" : "disabled");
 }
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+       if (itlb_multihit_kvm_mitigation)
+               return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
+       else
+               return sprintf(buf, "KVM: Vulnerable\n");
+}
 #else
 static ssize_t l1tf_show_state(char *buf)
 {
        return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
 }
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+       return sprintf(buf, "Processor vulnerable\n");
+}
 #endif
 
 static ssize_t mds_show_state(char *buf)
@@ -1328,6 +1448,21 @@ static ssize_t mds_show_state(char *buf)
                       sched_smt_active() ? "vulnerable" : "disabled");
 }
 
+static ssize_t tsx_async_abort_show_state(char *buf)
+{
+       if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+           (taa_mitigation == TAA_MITIGATION_OFF))
+               return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
+
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+               return sprintf(buf, "%s; SMT Host state unknown\n",
+                              taa_strings[taa_mitigation]);
+       }
+
+       return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+                      sched_smt_active() ? "vulnerable" : "disabled");
+}
+
 static char *stibp_state(void)
 {
        if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -1398,6 +1533,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
        case X86_BUG_MDS:
                return mds_show_state(buf);
 
+       case X86_BUG_TAA:
+               return tsx_async_abort_show_state(buf);
+
+       case X86_BUG_ITLB_MULTIHIT:
+               return itlb_multihit_show_state(buf);
+
        default:
                break;
        }
@@ -1434,4 +1575,14 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
 }
+
+ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
+}
+
+ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
+}
 #endif
index 9ae7d1b..fffe219 100644 (file)
@@ -1016,13 +1016,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
-#define NO_SPECULATION BIT(0)
-#define NO_MELTDOWN    BIT(1)
-#define NO_SSB         BIT(2)
-#define NO_L1TF                BIT(3)
-#define NO_MDS         BIT(4)
-#define MSBDS_ONLY     BIT(5)
-#define NO_SWAPGS      BIT(6)
+#define NO_SPECULATION         BIT(0)
+#define NO_MELTDOWN            BIT(1)
+#define NO_SSB                 BIT(2)
+#define NO_L1TF                        BIT(3)
+#define NO_MDS                 BIT(4)
+#define MSBDS_ONLY             BIT(5)
+#define NO_SWAPGS              BIT(6)
+#define NO_ITLB_MULTIHIT       BIT(7)
 
 #define VULNWL(_vendor, _family, _model, _whitelist)   \
        { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1043,27 +1044,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
 
        /* Intel Family 6 */
-       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION),
-       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION),
-       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION),
-       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
-       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
-
-       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_SILVERMONT_D,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION | NO_ITLB_MULTIHIT),
+
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SILVERMONT_D,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
        VULNWL_INTEL(CORE_YONAH,                NO_SSB),
 
-       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
-       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
        /*
         * Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1073,15 +1074,17 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
         * good enough for our purposes.
         */
 
+       VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT),
+
        /* AMD Family 0xf - 0x12 */
-       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
-       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
        {}
 };
 
@@ -1092,19 +1095,30 @@ static bool __init cpu_matches(unsigned long which)
        return m && !!(m->driver_data & which);
 }
 
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+u64 x86_read_arch_cap_msr(void)
 {
        u64 ia32_cap = 0;
 
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+       return ia32_cap;
+}
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+       u64 ia32_cap = x86_read_arch_cap_msr();
+
+       /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+       if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+               setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
        if (cpu_matches(NO_SPECULATION))
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
        setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
-       if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
-
        if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1121,6 +1135,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        if (!cpu_matches(NO_SWAPGS))
                setup_force_cpu_bug(X86_BUG_SWAPGS);
 
+       /*
+        * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
+        *      - TSX is supported or
+        *      - TSX_CTRL is present
+        *
+        * TSX_CTRL check is needed for cases when TSX could be disabled before
+        * the kernel boot e.g. kexec.
+        * TSX_CTRL check alone is not sufficient for cases when the microcode
+        * update is not present or running as guest that don't get TSX_CTRL.
+        */
+       if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+           (cpu_has(c, X86_FEATURE_RTM) ||
+            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+               setup_force_cpu_bug(X86_BUG_TAA);
+
        if (cpu_matches(NO_MELTDOWN))
                return;
 
@@ -1554,6 +1583,8 @@ void __init identify_boot_cpu(void)
 #endif
        cpu_detect_tlb(&boot_cpu_data);
        setup_cr_pinning();
+
+       tsx_init();
 }
 
 void identify_secondary_cpu(struct cpuinfo_x86 *c)
index c0e2407..38ab6e1 100644 (file)
@@ -44,6 +44,22 @@ struct _tlb_table {
 extern const struct cpu_dev *const __x86_cpu_dev_start[],
                            *const __x86_cpu_dev_end[];
 
+#ifdef CONFIG_CPU_SUP_INTEL
+enum tsx_ctrl_states {
+       TSX_CTRL_ENABLE,
+       TSX_CTRL_DISABLE,
+       TSX_CTRL_NOT_SUPPORTED,
+};
+
+extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
+
+extern void __init tsx_init(void);
+extern void tsx_enable(void);
+extern void tsx_disable(void);
+#else
+static inline void tsx_init(void) { }
+#endif /* CONFIG_CPU_SUP_INTEL */
+
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
@@ -62,4 +78,6 @@ unsigned int aperfmperf_get_khz(int cpu);
 
 extern void x86_spec_ctrl_setup_ap(void);
 
+extern u64 x86_read_arch_cap_msr(void);
+
 #endif /* ARCH_X86_CPU_H */
index c2fdc00..11d5c59 100644 (file)
@@ -762,6 +762,11 @@ static void init_intel(struct cpuinfo_x86 *c)
                detect_tme(c);
 
        init_intel_misc_features(c);
+
+       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+               tsx_enable();
+       if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+               tsx_disable();
 }
 
 #ifdef CONFIG_X86_32
index efbd54c..055c861 100644 (file)
@@ -522,6 +522,10 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        int ret = 0;
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               ret = -ENOENT;
+               goto out;
+       }
 
        md.priv = of->kn->priv;
        resid = md.u.rid;
index a46dee8..2e3b06d 100644 (file)
@@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
        }
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
-       rdt_last_cmd_clear();
        if (!rdtgrp) {
                ret = -ENOENT;
-               rdt_last_cmd_puts("Directory was removed\n");
                goto unlock;
        }
 
@@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
        int ret;
 
        prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
-       rdt_last_cmd_clear();
        if (!prdtgrp) {
                ret = -ENODEV;
-               rdt_last_cmd_puts("Directory was removed\n");
                goto out_unlock;
        }
 
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
new file mode 100644 (file)
index 0000000..3e20d32
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Transactional Synchronization Extensions (TSX) control.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ *     Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+
+#include <asm/cmdline.h>
+
+#include "cpu.h"
+
+enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
+
+void tsx_disable(void)
+{
+       u64 tsx;
+
+       rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+       /* Force all transactions to immediately abort */
+       tsx |= TSX_CTRL_RTM_DISABLE;
+
+       /*
+        * Ensure TSX support is not enumerated in CPUID.
+        * This is visible to userspace and will ensure they
+        * do not waste resources trying TSX transactions that
+        * will always abort.
+        */
+       tsx |= TSX_CTRL_CPUID_CLEAR;
+
+       wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+void tsx_enable(void)
+{
+       u64 tsx;
+
+       rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+       /* Enable the RTM feature in the cpu */
+       tsx &= ~TSX_CTRL_RTM_DISABLE;
+
+       /*
+        * Ensure TSX support is enumerated in CPUID.
+        * This is visible to userspace and will ensure they
+        * can enumerate and use the TSX feature.
+        */
+       tsx &= ~TSX_CTRL_CPUID_CLEAR;
+
+       wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+static bool __init tsx_ctrl_is_supported(void)
+{
+       u64 ia32_cap = x86_read_arch_cap_msr();
+
+       /*
+        * TSX is controlled via MSR_IA32_TSX_CTRL.  However, support for this
+        * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+        *
+        * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+        * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+        * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+        * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+        * tsx= cmdline requests will do nothing on CPUs without
+        * MSR_IA32_TSX_CTRL support.
+        */
+       return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
+}
+
+static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+{
+       if (boot_cpu_has_bug(X86_BUG_TAA))
+               return TSX_CTRL_DISABLE;
+
+       return TSX_CTRL_ENABLE;
+}
+
+void __init tsx_init(void)
+{
+       char arg[5] = {};
+       int ret;
+
+       if (!tsx_ctrl_is_supported())
+               return;
+
+       ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
+       if (ret >= 0) {
+               if (!strcmp(arg, "on")) {
+                       tsx_ctrl_state = TSX_CTRL_ENABLE;
+               } else if (!strcmp(arg, "off")) {
+                       tsx_ctrl_state = TSX_CTRL_DISABLE;
+               } else if (!strcmp(arg, "auto")) {
+                       tsx_ctrl_state = x86_get_tsx_auto_mode();
+               } else {
+                       tsx_ctrl_state = TSX_CTRL_DISABLE;
+                       pr_err("tsx: invalid option, defaulting to off\n");
+               }
+       } else {
+               /* tsx= not provided */
+               if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO))
+                       tsx_ctrl_state = x86_get_tsx_auto_mode();
+               else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF))
+                       tsx_ctrl_state = TSX_CTRL_DISABLE;
+               else
+                       tsx_ctrl_state = TSX_CTRL_ENABLE;
+       }
+
+       if (tsx_ctrl_state == TSX_CTRL_DISABLE) {
+               tsx_disable();
+
+               /*
+                * tsx_disable() will change the state of the
+                * RTM CPUID bit.  Clear it here since it is now
+                * expected to be not set.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_RTM);
+       } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
+
+               /*
+                * HW defaults TSX to be enabled at bootup.
+                * We may still need the TSX enable support
+                * during init for special cases like
+                * kexec after TSX is disabled.
+                */
+               tsx_enable();
+
+               /*
+                * tsx_enable() will change the state of the
+                * RTM CPUID bit.  Force it here since it is now
+                * expected to be set.
+                */
+               setup_force_cpu_cap(X86_FEATURE_RTM);
+       }
+}
index 753b8cf..87b9789 100644 (file)
@@ -94,6 +94,13 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
        BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
 
        begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
+       /*
+        * Handle the case where stack trace is collected _before_
+        * cea_exception_stacks had been initialized.
+        */
+       if (!begin)
+               return false;
+
        end = begin + sizeof(struct cea_exception_stacks);
        /* Bail if @stack is outside the exception stack area. */
        if (stk < begin || stk >= end)
index 6f6b1d0..4cba91e 100644 (file)
@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_INTEL, 0x3ec4,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        { PCI_VENDOR_ID_BROADCOM, 0x4331,
          PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
index c59454c..7e322e2 100644 (file)
@@ -1505,6 +1505,9 @@ void __init tsc_init(void)
                return;
        }
 
+       if (tsc_clocksource_reliable || no_tsc_watchdog)
+               clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+
        clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
        detect_art();
 }
index 9c5029c..f68c0c7 100644 (file)
@@ -363,7 +363,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
 
        /* cpuid 7.0.ecx*/
        const u32 kvm_cpuid_7_0_ecx_x86_features =
-               F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
+               F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
                F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
                F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
index 87b0fcc..b29d00b 100644 (file)
@@ -111,11 +111,6 @@ static inline int apic_enabled(struct kvm_lapic *apic)
        (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
         APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 
-static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
-{
-       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
-}
-
 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
 {
        return apic->vcpu->vcpu_id;
index 2aad7e2..1f50148 100644 (file)
@@ -242,4 +242,9 @@ static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
        return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
 }
 
+static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+{
+       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+}
+
 #endif
index 24c23c6..2ce9da5 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/uaccess.h>
 #include <linux/hash.h>
 #include <linux/kern_levels.h>
+#include <linux/kthread.h>
 
 #include <asm/page.h>
 #include <asm/pat.h>
 #include <asm/kvm_page_track.h>
 #include "trace.h"
 
+extern bool itlb_multihit_kvm_mitigation;
+
+static int __read_mostly nx_huge_pages = -1;
+#ifdef CONFIG_PREEMPT_RT
+/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
+static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
+#else
+static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+#endif
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
+
+static struct kernel_param_ops nx_huge_pages_ops = {
+       .set = set_nx_huge_pages,
+       .get = param_get_bool,
+};
+
+static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
+       .set = set_nx_huge_pages_recovery_ratio,
+       .get = param_get_uint,
+};
+
+module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages, "bool");
+module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
+               &nx_huge_pages_recovery_ratio, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
+
 /*
  * When setting this variable to true it enables Two-Dimensional-Paging
  * where the hardware walks 2 page tables:
@@ -352,6 +382,11 @@ static inline bool spte_ad_need_write_protect(u64 spte)
        return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
 }
 
+static bool is_nx_huge_page_enabled(void)
+{
+       return READ_ONCE(nx_huge_pages);
+}
+
 static inline u64 spte_shadow_accessed_mask(u64 spte)
 {
        MMU_WARN_ON(is_mmio_spte(spte));
@@ -1190,6 +1225,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
        kvm_mmu_gfn_disallow_lpage(slot, gfn);
 }
 
+static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       if (sp->lpage_disallowed)
+               return;
+
+       ++kvm->stat.nx_lpage_splits;
+       list_add_tail(&sp->lpage_disallowed_link,
+                     &kvm->arch.lpage_disallowed_mmu_pages);
+       sp->lpage_disallowed = true;
+}
+
 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        struct kvm_memslots *slots;
@@ -1207,6 +1253,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
        kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
 
+static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       --kvm->stat.nx_lpage_splits;
+       sp->lpage_disallowed = false;
+       list_del(&sp->lpage_disallowed_link);
+}
+
 static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
                                          struct kvm_memory_slot *slot)
 {
@@ -2792,6 +2845,9 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
                        kvm_reload_remote_mmus(kvm);
        }
 
+       if (sp->lpage_disallowed)
+               unaccount_huge_nx_page(kvm, sp);
+
        sp->role.invalid = 1;
        return list_unstable;
 }
@@ -3013,6 +3069,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        if (!speculative)
                spte |= spte_shadow_accessed_mask(spte);
 
+       if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
+           is_nx_huge_page_enabled()) {
+               pte_access &= ~ACC_EXEC_MASK;
+       }
+
        if (pte_access & ACC_EXEC_MASK)
                spte |= shadow_x_mask;
        else
@@ -3233,9 +3294,32 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
        __direct_pte_prefetch(vcpu, sp, sptep);
 }
 
+static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
+                                      gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
+{
+       int level = *levelp;
+       u64 spte = *it.sptep;
+
+       if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
+           is_nx_huge_page_enabled() &&
+           is_shadow_present_pte(spte) &&
+           !is_large_pte(spte)) {
+               /*
+                * A small SPTE exists for this pfn, but FNAME(fetch)
+                * and __direct_map would like to create a large PTE
+                * instead: just force them to go down another level,
+                * patching back for them into pfn the next 9 bits of
+                * the address.
+                */
+               u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
+               *pfnp |= gfn & page_mask;
+               (*levelp)--;
+       }
+}
+
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
                        int map_writable, int level, kvm_pfn_t pfn,
-                       bool prefault)
+                       bool prefault, bool lpage_disallowed)
 {
        struct kvm_shadow_walk_iterator it;
        struct kvm_mmu_page *sp;
@@ -3248,6 +3332,12 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
 
        trace_kvm_mmu_spte_requested(gpa, level, pfn);
        for_each_shadow_entry(vcpu, gpa, it) {
+               /*
+                * We cannot overwrite existing page tables with an NX
+                * large page, as the leaf could be executable.
+                */
+               disallowed_hugepage_adjust(it, gfn, &pfn, &level);
+
                base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
                if (it.level == level)
                        break;
@@ -3258,6 +3348,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
                                              it.level - 1, true, ACC_ALL);
 
                        link_shadow_page(vcpu, it.sptep, sp);
+                       if (lpage_disallowed)
+                               account_huge_nx_page(vcpu->kvm, sp);
                }
        }
 
@@ -3306,7 +3398,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         * here.
         */
        if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
-           level == PT_PAGE_TABLE_LEVEL &&
+           !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
            PageTransCompoundMap(pfn_to_page(pfn)) &&
            !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
                unsigned long mask;
@@ -3550,11 +3642,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 {
        int r;
        int level;
-       bool force_pt_level = false;
+       bool force_pt_level;
        kvm_pfn_t pfn;
        unsigned long mmu_seq;
        bool map_writable, write = error_code & PFERR_WRITE_MASK;
+       bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+                               is_nx_huge_page_enabled();
 
+       force_pt_level = lpage_disallowed;
        level = mapping_level(vcpu, gfn, &force_pt_level);
        if (likely(!force_pt_level)) {
                /*
@@ -3588,7 +3683,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
                goto out_unlock;
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
-       r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
+       r = __direct_map(vcpu, v, write, map_writable, level, pfn,
+                        prefault, false);
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
@@ -4174,6 +4270,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
        unsigned long mmu_seq;
        int write = error_code & PFERR_WRITE_MASK;
        bool map_writable;
+       bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+                               is_nx_huge_page_enabled();
 
        MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
 
@@ -4184,8 +4282,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
        if (r)
                return r;
 
-       force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
-                                                          PT_DIRECTORY_LEVEL);
+       force_pt_level =
+               lpage_disallowed ||
+               !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
        level = mapping_level(vcpu, gfn, &force_pt_level);
        if (likely(!force_pt_level)) {
                if (level > PT_DIRECTORY_LEVEL &&
@@ -4214,7 +4313,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
                goto out_unlock;
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
-       r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
+       r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
+                        prefault, lpage_disallowed);
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
@@ -5914,9 +6014,9 @@ restart:
                 * the guest, and the guest page table is using 4K page size
                 * mapping if the indirect sp has level = 1.
                 */
-               if (sp->role.direct &&
-                       !kvm_is_reserved_pfn(pfn) &&
-                       PageTransCompoundMap(pfn_to_page(pfn))) {
+               if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
+                   !kvm_is_zone_device_pfn(pfn) &&
+                   PageTransCompoundMap(pfn_to_page(pfn))) {
                        pte_list_remove(rmap_head, sptep);
 
                        if (kvm_available_flush_tlb_with_range())
@@ -6155,10 +6255,59 @@ static void kvm_set_mmio_spte_mask(void)
        kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
 }
 
+static bool get_nx_auto_mode(void)
+{
+       /* Return true when CPU has the bug, and mitigations are ON */
+       return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
+}
+
+static void __set_nx_huge_pages(bool val)
+{
+       nx_huge_pages = itlb_multihit_kvm_mitigation = val;
+}
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+{
+       bool old_val = nx_huge_pages;
+       bool new_val;
+
+       /* In "auto" mode deploy workaround only if CPU has the bug. */
+       if (sysfs_streq(val, "off"))
+               new_val = 0;
+       else if (sysfs_streq(val, "force"))
+               new_val = 1;
+       else if (sysfs_streq(val, "auto"))
+               new_val = get_nx_auto_mode();
+       else if (strtobool(val, &new_val) < 0)
+               return -EINVAL;
+
+       __set_nx_huge_pages(new_val);
+
+       if (new_val != old_val) {
+               struct kvm *kvm;
+
+               mutex_lock(&kvm_lock);
+
+               list_for_each_entry(kvm, &vm_list, vm_list) {
+                       mutex_lock(&kvm->slots_lock);
+                       kvm_mmu_zap_all_fast(kvm);
+                       mutex_unlock(&kvm->slots_lock);
+
+                       wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+               }
+               mutex_unlock(&kvm_lock);
+       }
+
+       return 0;
+}
+
 int kvm_mmu_module_init(void)
 {
        int ret = -ENOMEM;
 
+       if (nx_huge_pages == -1)
+               __set_nx_huge_pages(get_nx_auto_mode());
+
        /*
         * MMU roles use union aliasing which is, generally speaking, an
         * undefined behavior. However, we supposedly know how compilers behave
@@ -6238,3 +6387,116 @@ void kvm_mmu_module_exit(void)
        unregister_shrinker(&mmu_shrinker);
        mmu_audit_disable();
 }
+
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
+{
+       unsigned int old_val;
+       int err;
+
+       old_val = nx_huge_pages_recovery_ratio;
+       err = param_set_uint(val, kp);
+       if (err)
+               return err;
+
+       if (READ_ONCE(nx_huge_pages) &&
+           !old_val && nx_huge_pages_recovery_ratio) {
+               struct kvm *kvm;
+
+               mutex_lock(&kvm_lock);
+
+               list_for_each_entry(kvm, &vm_list, vm_list)
+                       wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+
+               mutex_unlock(&kvm_lock);
+       }
+
+       return err;
+}
+
+static void kvm_recover_nx_lpages(struct kvm *kvm)
+{
+       int rcu_idx;
+       struct kvm_mmu_page *sp;
+       unsigned int ratio;
+       LIST_HEAD(invalid_list);
+       ulong to_zap;
+
+       rcu_idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+
+       ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+       to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+       while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
+               /*
+                * We use a separate list instead of just using active_mmu_pages
+                * because the number of lpage_disallowed pages is expected to
+                * be relatively small compared to the total.
+                */
+               sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
+                                     struct kvm_mmu_page,
+                                     lpage_disallowed_link);
+               WARN_ON_ONCE(!sp->lpage_disallowed);
+               kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+               WARN_ON_ONCE(sp->lpage_disallowed);
+
+               if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       if (to_zap)
+                               cond_resched_lock(&kvm->mmu_lock);
+               }
+       }
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, rcu_idx);
+}
+
+static long get_nx_lpage_recovery_timeout(u64 start_time)
+{
+       return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
+               ? start_time + 60 * HZ - get_jiffies_64()
+               : MAX_SCHEDULE_TIMEOUT;
+}
+
+static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
+{
+       u64 start_time;
+       long remaining_time;
+
+       while (true) {
+               start_time = get_jiffies_64();
+               remaining_time = get_nx_lpage_recovery_timeout(start_time);
+
+               set_current_state(TASK_INTERRUPTIBLE);
+               while (!kthread_should_stop() && remaining_time > 0) {
+                       schedule_timeout(remaining_time);
+                       remaining_time = get_nx_lpage_recovery_timeout(start_time);
+                       set_current_state(TASK_INTERRUPTIBLE);
+               }
+
+               set_current_state(TASK_RUNNING);
+
+               if (kthread_should_stop())
+                       return 0;
+
+               kvm_recover_nx_lpages(kvm);
+       }
+}
+
+int kvm_mmu_post_init_vm(struct kvm *kvm)
+{
+       int err;
+
+       err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
+                                         "kvm-nx-lpage-recovery",
+                                         &kvm->arch.nx_lpage_recovery_thread);
+       if (!err)
+               kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
+
+       return err;
+}
+
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+{
+       if (kvm->arch.nx_lpage_recovery_thread)
+               kthread_stop(kvm->arch.nx_lpage_recovery_thread);
+}
index 11f8ec8..d55674f 100644 (file)
@@ -210,4 +210,8 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn);
 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+
+int kvm_mmu_post_init_vm(struct kvm *kvm);
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+
 #endif
index 7d5cdb3..97b21e7 100644 (file)
@@ -614,13 +614,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                         struct guest_walker *gw,
                         int write_fault, int hlevel,
-                        kvm_pfn_t pfn, bool map_writable, bool prefault)
+                        kvm_pfn_t pfn, bool map_writable, bool prefault,
+                        bool lpage_disallowed)
 {
        struct kvm_mmu_page *sp = NULL;
        struct kvm_shadow_walk_iterator it;
        unsigned direct_access, access = gw->pt_access;
        int top_level, ret;
-       gfn_t base_gfn;
+       gfn_t gfn, base_gfn;
 
        direct_access = gw->pte_access;
 
@@ -665,13 +666,25 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        link_shadow_page(vcpu, it.sptep, sp);
        }
 
-       base_gfn = gw->gfn;
+       /*
+        * FNAME(page_fault) might have clobbered the bottom bits of
+        * gw->gfn, restore them from the virtual address.
+        */
+       gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
+       base_gfn = gfn;
 
        trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
 
        for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
                clear_sp_write_flooding_count(it.sptep);
-               base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+
+               /*
+                * We cannot overwrite existing page tables with an NX
+                * large page, as the leaf could be executable.
+                */
+               disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
+
+               base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
                if (it.level == hlevel)
                        break;
 
@@ -683,6 +696,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
                                              it.level - 1, true, direct_access);
                        link_shadow_page(vcpu, it.sptep, sp);
+                       if (lpage_disallowed)
+                               account_huge_nx_page(vcpu->kvm, sp);
                }
        }
 
@@ -759,9 +774,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        int r;
        kvm_pfn_t pfn;
        int level = PT_PAGE_TABLE_LEVEL;
-       bool force_pt_level = false;
        unsigned long mmu_seq;
        bool map_writable, is_self_change_mapping;
+       bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+                               is_nx_huge_page_enabled();
+       bool force_pt_level = lpage_disallowed;
 
        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
@@ -851,7 +868,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        if (!force_pt_level)
                transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
        r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
-                        level, pfn, map_writable, prefault);
+                        level, pfn, map_writable, prefault, lpage_disallowed);
        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 
 out_unlock:
index f8ecb6d..c5673bd 100644 (file)
@@ -734,8 +734,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        vcpu->arch.efer = efer;
-       if (!npt_enabled && !(efer & EFER_LMA))
-               efer &= ~EFER_LME;
+
+       if (!npt_enabled) {
+               /* Shadow paging assumes NX to be available.  */
+               efer |= EFER_NX;
+
+               if (!(efer & EFER_LMA))
+                       efer &= ~EFER_LME;
+       }
 
        to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
        mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -4591,6 +4597,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        int ret = 0;
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
+       u32 id = kvm_xapic_id(vcpu->arch.apic);
 
        if (ldr == svm->ldr_reg)
                return 0;
@@ -4598,7 +4605,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        avic_invalidate_logical_id_entry(vcpu);
 
        if (ldr)
-               ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
+               ret = avic_ldr_write(vcpu, id, ldr);
 
        if (!ret)
                svm->ldr_reg = ldr;
@@ -4610,8 +4617,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
 {
        u64 *old, *new;
        struct vcpu_svm *svm = to_svm(vcpu);
-       u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
-       u32 id = (apic_id_reg >> 24) & 0xff;
+       u32 id = kvm_xapic_id(vcpu->arch.apic);
 
        if (vcpu->vcpu_id == id)
                return 0;
index e76eb4f..0e7c930 100644 (file)
@@ -2917,7 +2917,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                                                 struct vmcs12 *vmcs12);
 
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2937,19 +2937,18 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                        vmx->nested.apic_access_page = NULL;
                }
                page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
-               /*
-                * If translation failed, no matter: This feature asks
-                * to exit when accessing the given address, and if it
-                * can never be accessed, this feature won't do
-                * anything anyway.
-                */
                if (!is_error_page(page)) {
                        vmx->nested.apic_access_page = page;
                        hpa = page_to_phys(vmx->nested.apic_access_page);
                        vmcs_write64(APIC_ACCESS_ADDR, hpa);
                } else {
-                       secondary_exec_controls_clearbit(vmx,
-                               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+                       pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
+                                            __func__);
+                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       vcpu->run->internal.suberror =
+                               KVM_INTERNAL_ERROR_EMULATION;
+                       vcpu->run->internal.ndata = 0;
+                       return false;
                }
        }
 
@@ -2994,6 +2993,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
        else
                exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+       return true;
 }
 
 /*
@@ -3032,13 +3032,15 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 /*
  * If from_vmentry is false, this is being called from state restore (either RSM
  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
-+ *
-+ * Returns:
-+ *   0 - success, i.e. proceed with actual VMEnter
-+ *   1 - consistency check VMExit
-+ *  -1 - consistency check VMFail
+ *
+ * Returns:
+ *     NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
+ *     NVMX_ENTRY_VMFAIL:  Consistency check VMFail
+ *     NVMX_ENTRY_VMEXIT:  Consistency check VMExit
+ *     NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
  */
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                       bool from_vmentry)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -3081,11 +3083,12 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
        prepare_vmcs02_early(vmx, vmcs12);
 
        if (from_vmentry) {
-               nested_get_vmcs12_pages(vcpu);
+               if (unlikely(!nested_get_vmcs12_pages(vcpu)))
+                       return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
 
                if (nested_vmx_check_vmentry_hw(vcpu)) {
                        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-                       return -1;
+                       return NVMX_VMENTRY_VMFAIL;
                }
 
                if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
@@ -3149,7 +3152,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
         * returned as far as L1 is concerned. It will only return (and set
         * the success flag) when L2 exits (see nested_vmx_vmexit()).
         */
-       return 0;
+       return NVMX_VMENTRY_SUCCESS;
 
        /*
         * A failed consistency check that leads to a VMExit during L1's
@@ -3165,14 +3168,14 @@ vmentry_fail_vmexit:
        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
 
        if (!from_vmentry)
-               return 1;
+               return NVMX_VMENTRY_VMEXIT;
 
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
        vmcs12->exit_qualification = exit_qual;
        if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
                vmx->nested.need_vmcs12_to_shadow_sync = true;
-       return 1;
+       return NVMX_VMENTRY_VMEXIT;
 }
 
 /*
@@ -3182,9 +3185,9 @@ vmentry_fail_vmexit:
 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 {
        struct vmcs12 *vmcs12;
+       enum nvmx_vmentry_status status;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
-       int ret;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
@@ -3244,13 +3247,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         * the nested entry.
         */
        vmx->nested.nested_run_pending = 1;
-       ret = nested_vmx_enter_non_root_mode(vcpu, true);
-       vmx->nested.nested_run_pending = !ret;
-       if (ret > 0)
-               return 1;
-       else if (ret)
-               return nested_vmx_failValid(vcpu,
-                       VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+       status = nested_vmx_enter_non_root_mode(vcpu, true);
+       if (unlikely(status != NVMX_VMENTRY_SUCCESS))
+               goto vmentry_failed;
 
        /* Hide L1D cache contents from the nested guest.  */
        vmx->vcpu.arch.l1tf_flush_l1d = true;
@@ -3281,6 +3280,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                return kvm_vcpu_halt(vcpu);
        }
        return 1;
+
+vmentry_failed:
+       vmx->nested.nested_run_pending = 0;
+       if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
+               return 0;
+       if (status == NVMX_VMENTRY_VMEXIT)
+               return 1;
+       WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
+       return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 }
 
 /*
index 187d39b..6280f33 100644 (file)
@@ -6,6 +6,16 @@
 #include "vmcs12.h"
 #include "vmx.h"
 
+/*
+ * Status returned by nested_vmx_enter_non_root_mode():
+ */
+enum nvmx_vmentry_status {
+       NVMX_VMENTRY_SUCCESS,           /* Entered VMX non-root mode */
+       NVMX_VMENTRY_VMFAIL,            /* Consistency check VMFail */
+       NVMX_VMENTRY_VMEXIT,            /* Consistency check VMExit */
+       NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
+};
+
 void vmx_leave_nested(struct kvm_vcpu *vcpu);
 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
                                bool apicv);
@@ -13,7 +23,8 @@ void nested_vmx_hardware_unsetup(void);
 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
 void nested_vmx_vcpu_setup(void);
 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                    bool from_vmentry);
 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                       u32 exit_intr_info, unsigned long exit_qualification);
index e7970a2..04a8212 100644 (file)
@@ -969,17 +969,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
        u64 guest_efer = vmx->vcpu.arch.efer;
        u64 ignore_bits = 0;
 
-       if (!enable_ept) {
-               /*
-                * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
-                * host CPUID is more efficient than testing guest CPUID
-                * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
-                */
-               if (boot_cpu_has(X86_FEATURE_SMEP))
-                       guest_efer |= EFER_NX;
-               else if (!(guest_efer & EFER_NX))
-                       ignore_bits |= EFER_NX;
-       }
+       /* Shadow paging assumes NX to be available.  */
+       if (!enable_ept)
+               guest_efer |= EFER_NX;
 
        /*
         * LMA and LME handled by hardware; SCE meaningless outside long mode.
@@ -1276,6 +1268,18 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
                return;
 
+       /*
+        * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+        * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
+        * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
+        * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
+        * correctly.
+        */
+       if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
+               pi_clear_sn(pi_desc);
+               goto after_clear_sn;
+       }
+
        /* The full case.  */
        do {
                old.control = new.control = pi_desc->control;
@@ -1291,6 +1295,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        } while (cmpxchg64(&pi_desc->control, old.control,
                           new.control) != old.control);
 
+after_clear_sn:
+
        /*
         * Clear SN before reading the bitmap.  The VT-d firmware
         * writes the bitmap and reads SN atomically (5.2.3 in the
@@ -1299,7 +1305,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
         */
        smp_mb__after_atomic();
 
-       if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
+       if (!pi_is_pir_empty(pi_desc))
                pi_set_on(pi_desc);
 }
 
@@ -5543,14 +5549,6 @@ static int handle_encls(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-static int handle_unexpected_vmexit(struct kvm_vcpu *vcpu)
-{
-       kvm_skip_emulated_instruction(vcpu);
-       WARN_ONCE(1, "Unexpected VM-Exit Reason = 0x%x",
-               vmcs_read32(VM_EXIT_REASON));
-       return 1;
-}
-
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -5602,15 +5600,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
        [EXIT_REASON_RDRAND]                  = handle_invalid_op,
        [EXIT_REASON_RDSEED]                  = handle_invalid_op,
-       [EXIT_REASON_XSAVES]                  = handle_unexpected_vmexit,
-       [EXIT_REASON_XRSTORS]                 = handle_unexpected_vmexit,
        [EXIT_REASON_PML_FULL]                = handle_pml_full,
        [EXIT_REASON_INVPCID]                 = handle_invpcid,
        [EXIT_REASON_VMFUNC]                  = handle_vmx_instruction,
        [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
        [EXIT_REASON_ENCLS]                   = handle_encls,
-       [EXIT_REASON_UMWAIT]                  = handle_unexpected_vmexit,
-       [EXIT_REASON_TPAUSE]                  = handle_unexpected_vmexit,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -6157,7 +6151,7 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
        if (pi_test_on(&vmx->pi_desc)) {
                pi_clear_on(&vmx->pi_desc);
                /*
-                * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+                * IOMMU can write to PID.ON, so the barrier matters even on UP.
                 * But on x86 this is just a compiler barrier anyway.
                 */
                smp_mb__after_atomic();
@@ -6187,7 +6181,10 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 
 static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
 {
-       return pi_test_on(vcpu_to_pi_desc(vcpu));
+       struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+       return pi_test_on(pi_desc) ||
+               (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
 }
 
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
index bee1668..5a0f34b 100644 (file)
@@ -355,6 +355,11 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
        return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
 }
 
+static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
+{
+       return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
+}
+
 static inline void pi_set_sn(struct pi_desc *pi_desc)
 {
        set_bit(POSTED_INTR_SN,
@@ -373,6 +378,12 @@ static inline void pi_clear_on(struct pi_desc *pi_desc)
                (unsigned long *)&pi_desc->control);
 }
 
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+       clear_bit(POSTED_INTR_SN,
+               (unsigned long *)&pi_desc->control);
+}
+
 static inline int pi_test_on(struct pi_desc *pi_desc)
 {
        return test_bit(POSTED_INTR_ON,
index 661e2bf..5d53052 100644 (file)
@@ -213,6 +213,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "mmu_unsync", VM_STAT(mmu_unsync) },
        { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
        { "largepages", VM_STAT(lpages, .mode = 0444) },
+       { "nx_largepages_splitted", VM_STAT(nx_lpage_splits, .mode = 0444) },
        { "max_mmu_page_hash_collisions",
                VM_STAT(max_mmu_page_hash_collisions) },
        { NULL }
@@ -360,8 +361,7 @@ EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 asmlinkage __visible void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
-       if (!kvm_rebooting)
-               BUG();
+       BUG_ON(!kvm_rebooting);
 }
 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 
@@ -1133,13 +1133,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  *
- * This list is modified at module load time to reflect the
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
+ * extract the supported MSRs from the related const lists.
+ * msrs_to_save is selected from the msrs_to_save_all to reflect the
  * capabilities of the host cpu. This capabilities test skips MSRs that are
- * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
+ * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
  * may depend on host virtualization features rather than host cpu features.
  */
 
-static u32 msrs_to_save[] = {
+static const u32 msrs_to_save_all[] = {
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -1180,9 +1182,10 @@ static u32 msrs_to_save[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
 };
 
+static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
 static unsigned num_msrs_to_save;
 
-static u32 emulated_msrs[] = {
+static const u32 emulated_msrs_all[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
@@ -1221,7 +1224,7 @@ static u32 emulated_msrs[] = {
         * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
         * We always support the "true" VMX control MSRs, even if the host
         * processor does not, so I am putting these registers here rather
-        * than in msrs_to_save.
+        * than in msrs_to_save_all.
         */
        MSR_IA32_VMX_BASIC,
        MSR_IA32_VMX_TRUE_PINBASED_CTLS,
@@ -1240,13 +1243,14 @@ static u32 emulated_msrs[] = {
        MSR_KVM_POLL_CONTROL,
 };
 
+static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
 static unsigned num_emulated_msrs;
 
 /*
  * List of msr numbers which are used to expose MSR-based features that
  * can be used by a hypervisor to validate requested CPU features.
  */
-static u32 msr_based_features[] = {
+static const u32 msr_based_features_all[] = {
        MSR_IA32_VMX_BASIC,
        MSR_IA32_VMX_TRUE_PINBASED_CTLS,
        MSR_IA32_VMX_PINBASED_CTLS,
@@ -1271,6 +1275,7 @@ static u32 msr_based_features[] = {
        MSR_IA32_ARCH_CAPABILITIES,
 };
 
+static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
 static unsigned int num_msr_based_features;
 
 static u64 kvm_get_arch_capabilities(void)
@@ -1281,6 +1286,14 @@ static u64 kvm_get_arch_capabilities(void)
                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
 
        /*
+        * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
+        * the nested hypervisor runs with NX huge pages.  If it is not,
+        * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
+        * L1 guests, so it need not worry about its own (L2) guests.
+        */
+       data |= ARCH_CAP_PSCHANGE_MC_NO;
+
+       /*
         * If we're doing cache flushes (either "always" or "cond")
         * we will do one whenever the guest does a vmlaunch/vmresume.
         * If an outer hypervisor is doing the cache flush for us
@@ -1299,6 +1312,25 @@ static u64 kvm_get_arch_capabilities(void)
        if (!boot_cpu_has_bug(X86_BUG_MDS))
                data |= ARCH_CAP_MDS_NO;
 
+       /*
+        * On TAA affected systems, export MDS_NO=0 when:
+        *      - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1.
+        *      - Updated microcode is present. This is detected by
+        *        the presence of ARCH_CAP_TSX_CTRL_MSR and ensures
+        *        that VERW clears CPU buffers.
+        *
+        * When MDS_NO=0 is exported, guests deploy clear CPU buffer
+        * mitigation and don't complain:
+        *
+        *      "Vulnerable: Clear CPU buffers attempted, no microcode"
+        *
+        * If TSX is disabled on the system, guests are also mitigated against
+        * TAA and clear CPU buffer mitigation is not required for guests.
+        */
+       if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) &&
+           (data & ARCH_CAP_TSX_CTRL_MSR))
+               data &= ~ARCH_CAP_MDS_NO;
+
        return data;
 }
 
@@ -2537,6 +2569,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.pv_time_enabled = false;
+       vcpu->arch.time = 0;
 }
 
 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
@@ -2702,8 +2735,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_KVM_SYSTEM_TIME: {
                struct kvm_arch *ka = &vcpu->kvm->arch;
 
-               kvmclock_reset(vcpu);
-
                if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
                        bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
 
@@ -2717,14 +2748,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
 
                /* we verify if the enable bit is set... */
+               vcpu->arch.pv_time_enabled = false;
                if (!(data & 1))
                        break;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
                     &vcpu->arch.pv_time, data & ~1ULL,
                     sizeof(struct pvclock_vcpu_time_info)))
-                       vcpu->arch.pv_time_enabled = false;
-               else
                        vcpu->arch.pv_time_enabled = true;
 
                break;
@@ -5093,22 +5123,26 @@ static void kvm_init_msr_list(void)
 {
        struct x86_pmu_capability x86_pmu;
        u32 dummy[2];
-       unsigned i, j;
+       unsigned i;
 
        BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
-                        "Please update the fixed PMCs in msrs_to_save[]");
+                        "Please update the fixed PMCs in msrs_to_saved_all[]");
 
        perf_get_x86_pmu_capability(&x86_pmu);
 
-       for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
-               if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
+       num_msrs_to_save = 0;
+       num_emulated_msrs = 0;
+       num_msr_based_features = 0;
+
+       for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
+               if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
                        continue;
 
                /*
                 * Even MSRs that are valid in the host may not be exposed
                 * to the guests in some cases.
                 */
-               switch (msrs_to_save[i]) {
+               switch (msrs_to_save_all[i]) {
                case MSR_IA32_BNDCFGS:
                        if (!kvm_mpx_supported())
                                continue;
@@ -5136,17 +5170,17 @@ static void kvm_init_msr_list(void)
                        break;
                case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
                        if (!kvm_x86_ops->pt_supported() ||
-                               msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >=
+                               msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
                                intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
                                continue;
                        break;
                case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
-                       if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
+                       if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
                            min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
                                continue;
                        break;
                case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
-                       if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
+                       if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
                            min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
                                continue;
                }
@@ -5154,34 +5188,25 @@ static void kvm_init_msr_list(void)
                        break;
                }
 
-               if (j < i)
-                       msrs_to_save[j] = msrs_to_save[i];
-               j++;
+               msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
        }
-       num_msrs_to_save = j;
 
-       for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-               if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+       for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
+               if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
                        continue;
 
-               if (j < i)
-                       emulated_msrs[j] = emulated_msrs[i];
-               j++;
+               emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
        }
-       num_emulated_msrs = j;
 
-       for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+       for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
                struct kvm_msr_entry msr;
 
-               msr.index = msr_based_features[i];
+               msr.index = msr_based_features_all[i];
                if (kvm_get_msr_feature(&msr))
                        continue;
 
-               if (j < i)
-                       msr_based_features[j] = msr_based_features[i];
-               j++;
+               msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
        }
-       num_msr_based_features = j;
 }
 
 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -7941,8 +7966,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_immediate_exit = false;
 
        if (kvm_request_pending(vcpu)) {
-               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
-                       kvm_x86_ops->get_vmcs12_pages(vcpu);
+               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
+                       if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
+                               r = 0;
+                               goto out;
+                       }
+               }
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
@@ -9427,6 +9456,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+       INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
        atomic_set(&kvm->arch.noncoherent_dma_count, 0);
 
@@ -9455,6 +9485,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        return kvm_x86_ops->vm_init(kvm);
 }
 
+int kvm_arch_post_init_vm(struct kvm *kvm)
+{
+       return kvm_mmu_post_init_vm(kvm);
+}
+
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
 {
        vcpu_load(vcpu);
@@ -9556,6 +9591,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 }
 EXPORT_SYMBOL_GPL(x86_set_memory_region);
 
+void kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+       kvm_mmu_pre_destroy_vm(kvm);
+}
+
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        if (current->mm == kvm->mm) {
index 58f79ab..5bfea37 100644 (file)
@@ -117,6 +117,14 @@ static void __init xen_banner(void)
        printk(KERN_INFO "Xen version: %d.%d%s%s\n",
               version >> 16, version & 0xffff, extra.extraversion,
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+
+#ifdef CONFIG_X86_32
+       pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"
+               "Support for running as 32-bit PV-guest under Xen will soon be removed\n"
+               "from the Linux kernel!\n"
+               "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n"
+               "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n");
+#endif
 }
 
 static void __init xen_pv_init_platform(void)
index 0319d63..0c62144 100644 (file)
@@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
        }
 }
 
+
+static
+void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+       /*
+        * To prevent bfqq's service guarantees from being violated,
+        * bfqq may be left busy, i.e., queued for service, even if
+        * empty (see comments in __bfq_bfqq_expire() for
+        * details). But, if no process will send requests to bfqq any
+        * longer, then there is no point in keeping bfqq queued for
+        * service. In addition, keeping bfqq queued for service, but
+        * with no process ref any longer, may have caused bfqq to be
+        * freed when dequeued from service. But this is assumed to
+        * never happen.
+        */
+       if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
+           bfqq != bfqd->in_service_queue)
+               bfq_del_bfqq_busy(bfqd, bfqq, false);
+
+       bfq_put_queue(bfqq);
+}
+
 static void
 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
                struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
@@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
         */
        new_bfqq->pid = -1;
        bfqq->bic = NULL;
-       /* release process reference to bfqq */
-       bfq_put_queue(bfqq);
+       bfq_release_process_ref(bfqd, bfqq);
 }
 
 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 
        bfq_put_cooperator(bfqq);
 
-       bfq_put_queue(bfqq); /* release process reference */
+       bfq_release_process_ref(bfqd, bfqq);
 }
 
 static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
@@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
 
        bfqq = bic_to_bfqq(bic, false);
        if (bfqq) {
-               /* release process reference on this queue */
-               bfq_put_queue(bfqq);
+               bfq_release_process_ref(bfqd, bfqq);
                bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
                bic_set_bfqq(bic, bfqq, false);
        }
@@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
 
        bfq_put_cooperator(bfqq);
 
-       bfq_put_queue(bfqq);
+       bfq_release_process_ref(bfqq->bfqd, bfqq);
        return NULL;
 }
 
index 8f0ed62..b1170ec 100644 (file)
@@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
                return false;
 
-       if (bio->bi_vcnt > 0) {
+       if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
                struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 
                if (page_is_mergeable(bv, page, len, off, same_page)) {
index 5d21027..1eb8895 100644 (file)
@@ -934,9 +934,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                int i;
                bool has_stats = false;
 
+               spin_lock_irq(&blkg->q->queue_lock);
+
+               if (!blkg->online)
+                       goto skip;
+
                dname = blkg_dev_name(blkg);
                if (!dname)
-                       continue;
+                       goto skip;
 
                /*
                 * Hooray string manipulation, count is the size written NOT
@@ -946,8 +951,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                 */
                off += scnprintf(buf+off, size-off, "%s ", dname);
 
-               spin_lock_irq(&blkg->q->queue_lock);
-
                blkg_rwstat_recursive_sum(blkg, NULL,
                                offsetof(struct blkcg_gq, stat_bytes), &rwstat);
                rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
@@ -960,8 +963,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
                dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
 
-               spin_unlock_irq(&blkg->q->queue_lock);
-
                if (rbytes || wbytes || rios || wios) {
                        has_stats = true;
                        off += scnprintf(buf+off, size-off,
@@ -999,6 +1000,8 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                                seq_commit(sf, -1);
                        }
                }
+       skip:
+               spin_unlock_irq(&blkg->q->queue_lock);
        }
 
        rcu_read_unlock();
index 2a3db80..e01267f 100644 (file)
@@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
        atomic64_set(&iocg->active_period, cur_period);
 
        /* already activated or breaking leaf-only constraint? */
-       for (i = iocg->level; i > 0; i--)
-               if (!list_empty(&iocg->active_list))
+       if (!list_empty(&iocg->active_list))
+               goto succeed_unlock;
+       for (i = iocg->level - 1; i > 0; i--)
+               if (!list_empty(&iocg->ancestors[i]->active_list))
                        goto fail_unlock;
+
        if (iocg->child_active_sum)
                goto fail_unlock;
 
@@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
                ioc_start_period(ioc, now);
        }
 
+succeed_unlock:
        spin_unlock_irq(&ioc->lock);
        return true;
 
@@ -2110,10 +2114,10 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
                        goto einval;
        }
 
-       spin_lock_irq(&iocg->ioc->lock);
+       spin_lock(&iocg->ioc->lock);
        iocg->cfg_weight = v;
        weight_updated(iocg);
-       spin_unlock_irq(&iocg->ioc->lock);
+       spin_unlock(&iocg->ioc->lock);
 
        blkg_conf_finish(&ctx);
        return nbytes;
index 1413324..14e68f2 100644 (file)
@@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev,
        nfit_device_lock(dev);
        nd_desc = dev_get_drvdata(dev);
        if (!nd_desc) {
-               device_unlock(dev);
+               nfit_device_unlock(dev);
                return rc;
        }
        acpi_desc = to_acpi_desc(nd_desc);
index 08da9c2..62114a0 100644 (file)
@@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb,
                                   unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       int cpu = policy->cpu;
 
        if (event == CPUFREQ_CREATE_POLICY) {
-               acpi_thermal_cpufreq_init(cpu);
-               acpi_processor_ppc_init(cpu);
+               acpi_thermal_cpufreq_init(policy);
+               acpi_processor_ppc_init(policy);
        } else if (event == CPUFREQ_REMOVE_POLICY) {
-               acpi_processor_ppc_exit(cpu);
-               acpi_thermal_cpufreq_exit(cpu);
+               acpi_processor_ppc_exit(policy);
+               acpi_thermal_cpufreq_exit(policy);
        }
 
        return 0;
index ed56c6d..2ae95df 100644 (file)
@@ -642,6 +642,19 @@ static int acpi_idle_bm_check(void)
        return bm_status;
 }
 
+static void wait_for_freeze(void)
+{
+#ifdef CONFIG_X86
+       /* No delay is needed if we are in guest */
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return;
+#endif
+       /* Dummy wait op - must do something useless after P_LVL2 read
+          because chipsets cannot guarantee that STPCLK# signal
+          gets asserted in time to freeze execution properly. */
+       inl(acpi_gbl_FADT.xpm_timer_block.address);
+}
+
 /**
  * acpi_idle_do_entry - enter idle state using the appropriate method
  * @cx: cstate data
@@ -658,10 +671,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
        } else {
                /* IO port based C-state */
                inb(cx->address);
-               /* Dummy wait op - must do something useless after P_LVL2 read
-                  because chipsets cannot guarantee that STPCLK# signal
-                  gets asserted in time to freeze execution properly. */
-               inl(acpi_gbl_FADT.xpm_timer_block.address);
+               wait_for_freeze();
        }
 }
 
@@ -682,8 +692,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
                        safe_halt();
                else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
                        inb(cx->address);
-                       /* See comment in acpi_idle_do_entry() */
-                       inl(acpi_gbl_FADT.xpm_timer_block.address);
+                       wait_for_freeze();
                } else
                        return -ENODEV;
        }
index 930a49f..5909e8f 100644 (file)
@@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
        pr->performance_platform_limit = (int)ppc;
 
        if (ppc >= pr->performance->state_count ||
-           unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+           unlikely(!freq_qos_request_active(&pr->perflib_req)))
                return 0;
 
-       ret = dev_pm_qos_update_request(&pr->perflib_req,
+       ret = freq_qos_update_request(&pr->perflib_req,
                        pr->performance->states[ppc].core_frequency * 1000);
        if (ret < 0) {
                pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
@@ -157,28 +157,36 @@ void acpi_processor_ignore_ppc_init(void)
                ignore_ppc = 0;
 }
 
-void acpi_processor_ppc_init(int cpu)
+void acpi_processor_ppc_init(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-       int ret;
+       unsigned int cpu;
 
-       if (!pr)
-               return;
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+               int ret;
+
+               if (!pr)
+                       continue;
 
-       ret = dev_pm_qos_add_request(get_cpu_device(cpu),
-                                    &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    INT_MAX);
-       if (ret < 0)
-               pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
-                      ret);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          &pr->perflib_req,
+                                          FREQ_QOS_MAX, INT_MAX);
+               if (ret < 0)
+                       pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+                              cpu, ret);
+       }
 }
 
-void acpi_processor_ppc_exit(int cpu)
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
+       unsigned int cpu;
 
-       if (pr)
-               dev_pm_qos_remove_request(&pr->perflib_req);
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+
+               if (pr)
+                       freq_qos_remove_request(&pr->perflib_req);
+       }
 }
 
 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
index 8227c7d..41feb88 100644 (file)
@@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
                pr = per_cpu(processors, i);
 
-               if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+               if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
                        continue;
 
                policy = cpufreq_cpu_get(i);
@@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
                cpufreq_cpu_put(policy);
 
-               ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+               ret = freq_qos_update_request(&pr->thermal_req, max_freq);
                if (ret < 0) {
                        pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
                                pr->id, ret);
@@ -125,28 +125,36 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
        return 0;
 }
 
-void acpi_thermal_cpufreq_init(int cpu)
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-       int ret;
+       unsigned int cpu;
 
-       if (!pr)
-               return;
-
-       ret = dev_pm_qos_add_request(get_cpu_device(cpu),
-                                    &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    INT_MAX);
-       if (ret < 0)
-               pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
-                      ret);
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+               int ret;
+
+               if (!pr)
+                       continue;
+
+               ret = freq_qos_add_request(&policy->constraints,
+                                          &pr->thermal_req,
+                                          FREQ_QOS_MAX, INT_MAX);
+               if (ret < 0)
+                       pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+                              cpu, ret);
+       }
 }
 
-void acpi_thermal_cpufreq_exit(int cpu)
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
+       unsigned int cpu;
+
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, policy->cpu);
 
-       if (pr)
-               dev_pm_qos_remove_request(&pr->thermal_req);
+               if (pr)
+                       freq_qos_remove_request(&pr->thermal_req);
+       }
 }
 #else                          /* ! CONFIG_CPU_FREQ */
 static int cpufreq_get_max_state(unsigned int cpu)
index f39f075..fe15236 100644 (file)
@@ -409,9 +409,11 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
                 */
                rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
                if (IS_ERR(rstc)) {
-                       if (PTR_ERR(rstc) != -EPROBE_DEFER)
-                               dev_err(&dev->dev, "Can't get amba reset!\n");
-                       return PTR_ERR(rstc);
+                       ret = PTR_ERR(rstc);
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(&dev->dev, "can't get reset: %d\n",
+                                       ret);
+                       goto err_reset;
                }
                reset_control_deassert(rstc);
                reset_control_put(rstc);
@@ -472,6 +474,12 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
        release_resource(&dev->res);
  err_out:
        return ret;
+
+ err_reset:
+       amba_put_disable_pclk(dev);
+       iounmap(tmp);
+       dev_pm_domain_detach(&dev->dev, true);
+       goto err_release;
 }
 
 /*
index 5b9ac21..265d9dd 100644 (file)
@@ -97,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
 #define SZ_1K                               0x400
 #endif
 
-#ifndef SZ_4M
-#define SZ_4M                               0x400000
-#endif
-
 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 
 enum {
@@ -5177,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        if (proc->tsk != current->group_leader)
                return -EINVAL;
 
-       if ((vma->vm_end - vma->vm_start) > SZ_4M)
-               vma->vm_end = vma->vm_start + SZ_4M;
-
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
                     __func__, proc->pid, vma->vm_start, vma->vm_end,
index d42a8b2..eb76a82 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cacheflush.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
+#include <linux/sizes.h>
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        alloc->buffer = (void __user *)vma->vm_start;
        mutex_unlock(&binder_alloc_mmap_lock);
 
-       alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+       alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+                                  SZ_4M);
+       alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                               sizeof(alloc->pages[0]),
                               GFP_KERNEL);
        if (alloc->pages == NULL) {
@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                failure_string = "alloc page array";
                goto err_alloc_pages_failed;
        }
-       alloc->buffer_size = vma->vm_end - vma->vm_start;
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
index e742780..8befce0 100644 (file)
@@ -153,17 +153,13 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
 {
        int rc, i;
 
-       if (hpriv->ahci_regulator) {
-               rc = regulator_enable(hpriv->ahci_regulator);
-               if (rc)
-                       return rc;
-       }
+       rc = regulator_enable(hpriv->ahci_regulator);
+       if (rc)
+               return rc;
 
-       if (hpriv->phy_regulator) {
-               rc = regulator_enable(hpriv->phy_regulator);
-               if (rc)
-                       goto disable_ahci_pwrs;
-       }
+       rc = regulator_enable(hpriv->phy_regulator);
+       if (rc)
+               goto disable_ahci_pwrs;
 
        for (i = 0; i < hpriv->nports; i++) {
                if (!hpriv->target_pwrs[i])
@@ -181,11 +177,9 @@ disable_target_pwrs:
                if (hpriv->target_pwrs[i])
                        regulator_disable(hpriv->target_pwrs[i]);
 
-       if (hpriv->phy_regulator)
-               regulator_disable(hpriv->phy_regulator);
+       regulator_disable(hpriv->phy_regulator);
 disable_ahci_pwrs:
-       if (hpriv->ahci_regulator)
-               regulator_disable(hpriv->ahci_regulator);
+       regulator_disable(hpriv->ahci_regulator);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
@@ -207,10 +201,8 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
                regulator_disable(hpriv->target_pwrs[i]);
        }
 
-       if (hpriv->ahci_regulator)
-               regulator_disable(hpriv->ahci_regulator);
-       if (hpriv->phy_regulator)
-               regulator_disable(hpriv->phy_regulator);
+       regulator_disable(hpriv->ahci_regulator);
+       regulator_disable(hpriv->phy_regulator);
 }
 EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
 /**
@@ -359,7 +351,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
        struct regulator *target_pwr;
        int rc = 0;
 
-       target_pwr = regulator_get_optional(dev, "target");
+       target_pwr = regulator_get(dev, "target");
 
        if (!IS_ERR(target_pwr))
                hpriv->target_pwrs[port] = target_pwr;
@@ -436,16 +428,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
                hpriv->clks[i] = clk;
        }
 
-       hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+       hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
        if (IS_ERR(hpriv->ahci_regulator)) {
                rc = PTR_ERR(hpriv->ahci_regulator);
-               if (rc == -EPROBE_DEFER)
+               if (rc != 0)
                        goto err_out;
-               rc = 0;
-               hpriv->ahci_regulator = NULL;
        }
 
-       hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+       hpriv->phy_regulator = devm_regulator_get(dev, "phy");
        if (IS_ERR(hpriv->phy_regulator)) {
                rc = PTR_ERR(hpriv->phy_regulator);
                if (rc == -EPROBE_DEFER)
index cc37511..6265871 100644 (file)
@@ -554,12 +554,27 @@ ssize_t __weak cpu_show_mds(struct device *dev,
        return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
 static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
 static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
@@ -568,6 +583,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_spec_store_bypass.attr,
        &dev_attr_l1tf.attr,
        &dev_attr_mds.attr,
+       &dev_attr_tsx_async_abort.attr,
+       &dev_attr_itlb_multihit.attr,
        NULL
 };
 
index 55907c2..84c4e1f 100644 (file)
@@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
        }
        return ret;
 }
+
+struct for_each_memory_block_cb_data {
+       walk_memory_blocks_func_t func;
+       void *arg;
+};
+
+static int for_each_memory_block_cb(struct device *dev, void *data)
+{
+       struct memory_block *mem = to_memory_block(dev);
+       struct for_each_memory_block_cb_data *cb_data = data;
+
+       return cb_data->func(mem, cb_data->arg);
+}
+
+/**
+ * for_each_memory_block - walk through all present memory blocks
+ *
+ * @arg: argument passed to func
+ * @func: callback for each memory block walked
+ *
+ * This function walks through all present memory blocks, calling func on
+ * each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
+{
+       struct for_each_memory_block_cb_data cb_data = {
+               .func = func,
+               .arg = arg,
+       };
+
+       return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
+                               for_each_memory_block_cb);
+}
index 8db98a1..bbddb26 100644 (file)
@@ -188,6 +188,26 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
 EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
 
 /**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+       if (dev->pm_domain && dev->pm_domain->start)
+               return dev->pm_domain->start(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
  * dev_pm_domain_set - Set PM domain of a device.
  * @dev: Device whose PM domain is to be set.
  * @pd: PM domain to be set, or NULL.
index cc85e87..8e5725b 100644 (file)
@@ -634,6 +634,13 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
        return ret;
 }
 
+static int genpd_dev_pm_start(struct device *dev)
+{
+       struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+       return genpd_start_dev(genpd, dev);
+}
+
 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
                                     unsigned long val, void *ptr)
 {
@@ -922,24 +929,6 @@ static int __init genpd_power_off_unused(void)
 }
 late_initcall(genpd_power_off_unused);
 
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-
-static bool genpd_present(const struct generic_pm_domain *genpd)
-{
-       const struct generic_pm_domain *gpd;
-
-       if (IS_ERR_OR_NULL(genpd))
-               return false;
-
-       list_for_each_entry(gpd, &gpd_list, gpd_list_node)
-               if (gpd == genpd)
-                       return true;
-
-       return false;
-}
-
-#endif
-
 #ifdef CONFIG_PM_SLEEP
 
 /**
@@ -1354,8 +1343,8 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
 {
        struct generic_pm_domain *genpd;
 
-       genpd = dev_to_genpd(dev);
-       if (!genpd_present(genpd))
+       genpd = dev_to_genpd_safe(dev);
+       if (!genpd)
                return;
 
        if (suspend) {
@@ -1805,6 +1794,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
        genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
        genpd->domain.ops.restore_noirq = genpd_restore_noirq;
        genpd->domain.ops.complete = genpd_complete;
+       genpd->domain.start = genpd_dev_pm_start;
 
        if (genpd->flags & GENPD_FLAG_PM_CLK) {
                genpd->dev_ops.stop = pm_clk_suspend;
@@ -2020,6 +2010,16 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
        return 0;
 }
 
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+       const struct generic_pm_domain *gpd;
+
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+               if (gpd == genpd)
+                       return true;
+       return false;
+}
+
 /**
  * of_genpd_add_provider_simple() - Register a simple PM domain provider
  * @np: Device node pointer associated with the PM domain provider.
index 6c90fd7..350dcaf 100644 (file)
@@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
 
        spin_lock_irqsave(&dev->power.lock, flags);
 
-       switch (type) {
-       case DEV_PM_QOS_RESUME_LATENCY:
+       if (type == DEV_PM_QOS_RESUME_LATENCY) {
                ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
                        : pm_qos_read_value(&qos->resume_latency);
-               break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
-                       : pm_qos_read_value(&qos->min_frequency);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
-                       : pm_qos_read_value(&qos->max_frequency);
-               break;
-       default:
+       } else {
                WARN_ON(1);
                ret = 0;
        }
@@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
                        req->dev->power.set_latency_tolerance(req->dev, value);
                }
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = pm_qos_update_target(&qos->min_frequency,
-                                          &req->data.pnode, action, value);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = pm_qos_update_target(&qos->max_frequency,
-                                          &req->data.pnode, action, value);
-               break;
        case DEV_PM_QOS_FLAGS:
                ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
                                          action, value);
@@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
        c->type = PM_QOS_MIN;
 
-       c = &qos->min_frequency;
-       plist_head_init(&c->list);
-       c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->type = PM_QOS_MAX;
-       c->notifiers = ++n;
-       BLOCKING_INIT_NOTIFIER_HEAD(n);
-
-       c = &qos->max_frequency;
-       plist_head_init(&c->list);
-       c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->type = PM_QOS_MIN;
-       c->notifiers = ++n;
-       BLOCKING_INIT_NOTIFIER_HEAD(n);
-
        INIT_LIST_HEAD(&qos->flags.list);
 
        spin_lock_irq(&dev->power.lock);
@@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                memset(req, 0, sizeof(*req));
        }
 
-       c = &qos->min_frequency;
-       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
-               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       }
-
-       c = &qos->max_frequency;
-       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
-               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       }
-
        f = &qos->flags;
        list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
        switch(req->type) {
        case DEV_PM_QOS_RESUME_LATENCY:
        case DEV_PM_QOS_LATENCY_TOLERANCE:
-       case DEV_PM_QOS_MIN_FREQUENCY:
-       case DEV_PM_QOS_MAX_FREQUENCY:
                curr_value = req->data.pnode.prio;
                break;
        case DEV_PM_QOS_FLAGS:
@@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
                ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
                                                       notifier);
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
-                                                      notifier);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
-                                                      notifier);
-               break;
        default:
                WARN_ON(1);
                ret = -EINVAL;
@@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
                ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
                                                         notifier);
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
-                                                        notifier);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
-                                                        notifier);
-               break;
        default:
                WARN_ON(1);
                ret = -EINVAL;
index 5b24876..a18155c 100644 (file)
@@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
 
        if (nc->tentative && connection->agreed_pro_version < 92) {
                rcu_read_unlock();
-               mutex_unlock(&sock->mutex);
                drbd_err(connection, "--dry-run is not supported by peer");
                return -EOPNOTSUPP;
        }
index 478aa86..a94ee45 100644 (file)
@@ -385,17 +385,16 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        struct nbd_device *nbd = cmd->nbd;
        struct nbd_config *config;
 
+       if (!mutex_trylock(&cmd->lock))
+               return BLK_EH_RESET_TIMER;
+
        if (!refcount_inc_not_zero(&nbd->config_refs)) {
                cmd->status = BLK_STS_TIMEOUT;
+               mutex_unlock(&cmd->lock);
                goto done;
        }
        config = nbd->config;
 
-       if (!mutex_trylock(&cmd->lock)) {
-               nbd_config_put(nbd);
-               return BLK_EH_RESET_TIMER;
-       }
-
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
                                    "Connection timed out, retrying (%d/%d alive)\n",
@@ -711,6 +710,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                ret = -ENOENT;
                goto out;
        }
+       if (cmd->status != BLK_STS_OK) {
+               dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
+                       req);
+               ret = -ENOENT;
+               goto out;
+       }
        if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
                dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
                        req);
@@ -792,7 +797,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
+       mutex_lock(&cmd->lock);
        cmd->status = BLK_STS_IOERR;
+       mutex_unlock(&cmd->lock);
+
        blk_mq_complete_request(req);
        return true;
 }
@@ -972,6 +980,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
+static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+                                    int *err)
+{
+       struct socket *sock;
+
+       *err = 0;
+       sock = sockfd_lookup(fd, err);
+       if (!sock)
+               return NULL;
+
+       if (sock->ops->shutdown == sock_no_shutdown) {
+               dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+               *err = -EINVAL;
+               return NULL;
+       }
+
+       return sock;
+}
+
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
                          bool netlink)
 {
@@ -981,7 +1008,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        struct nbd_sock *nsock;
        int err;
 
-       sock = sockfd_lookup(arg, &err);
+       sock = nbd_get_socket(nbd, arg, &err);
        if (!sock)
                return err;
 
@@ -1033,7 +1060,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        int i;
        int err;
 
-       sock = sockfd_lookup(arg, &err);
+       sock = nbd_get_socket(nbd, arg, &err);
        if (!sock)
                return err;
 
index 3913667..13527a0 100644 (file)
@@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
        struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
        struct ceph_osd_data *osd_data;
        u64 objno;
-       u8 state, new_state, current_state;
+       u8 state, new_state, uninitialized_var(current_state);
        bool has_current_state;
        void *p;
 
index 76b73dd..10f6368 100644 (file)
@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
 
        cancel_work_sync(&card->event_work);
 
+       destroy_workqueue(card->event_wq);
        rsxx_destroy_dev(card);
        rsxx_dma_destroy(card);
+       destroy_workqueue(card->creg_ctrl.creg_wq);
 
        spin_lock_irqsave(&card->irq_lock, flags);
        rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
index ad50efb..2b6670d 100644 (file)
@@ -74,6 +74,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @clk_disable_quirk: module specific clock disable quirk
  * @reset_done_quirk: module specific reset done quirk
  * @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
  */
 struct sysc {
        struct device *dev;
@@ -100,6 +101,7 @@ struct sysc {
        void (*clk_disable_quirk)(struct sysc *sysc);
        void (*reset_done_quirk)(struct sysc *sysc);
        void (*module_enable_quirk)(struct sysc *sysc);
+       void (*module_disable_quirk)(struct sysc *sysc);
 };
 
 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -959,6 +961,9 @@ static int sysc_disable_module(struct device *dev)
        if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
                return 0;
 
+       if (ddata->module_disable_quirk)
+               ddata->module_disable_quirk(ddata);
+
        regbits = ddata->cap->regbits;
        reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
 
@@ -1248,6 +1253,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_MODULE_QUIRK_SGX),
        SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
                   SYSC_MODULE_QUIRK_WDT),
+       /* Watchdog on am3 and am4 */
+       SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+                  SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
 
 #ifdef DEBUG
        SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -1440,14 +1448,14 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
                                   !(val & 0x10), 100,
                                   MAX_MODULE_SOFTRESET_WAIT);
        if (error)
-               dev_warn(ddata->dev, "wdt disable spr failed\n");
+               dev_warn(ddata->dev, "wdt disable step1 failed\n");
 
-       sysc_write(ddata, wps, 0x5555);
+       sysc_write(ddata, spr, 0x5555);
        error = readl_poll_timeout(ddata->module_va + wps, val,
                                   !(val & 0x10), 100,
                                   MAX_MODULE_SOFTRESET_WAIT);
        if (error)
-               dev_warn(ddata->dev, "wdt disable wps failed\n");
+               dev_warn(ddata->dev, "wdt disable step2 failed\n");
 }
 
 static void sysc_init_module_quirks(struct sysc *ddata)
@@ -1471,8 +1479,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
        if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
                ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
 
-       if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+       if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
                ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+               ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+       }
 }
 
 static int sysc_clockdomain_init(struct sysc *ddata)
index 80b850e..8d53b8e 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/err.h>
-#include <linux/freezer.h>
 #include <linux/fs.h>
 #include <linux/hw_random.h>
 #include <linux/kernel.h>
@@ -422,9 +421,7 @@ static int hwrng_fillfn(void *unused)
 {
        long rc;
 
-       set_freezable();
-
-       while (!kthread_freezable_should_stop(NULL)) {
+       while (!kthread_should_stop()) {
                struct hwrng *rng;
 
                rng = get_current_rng();
index de434fe..01b8868 100644 (file)
 #include <linux/percpu.h>
 #include <linux/cryptohash.h>
 #include <linux/fips.h>
-#include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/workqueue.h>
 #include <linux/irq.h>
@@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
         * We'll be woken up again once below random_write_wakeup_thresh,
         * or when the calling thread is about to terminate.
         */
-       wait_event_freezable(random_write_wait,
-                       kthread_should_stop() ||
+       wait_event_interruptible(random_write_wait, kthread_should_stop() ||
                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
        mix_pool_bytes(poolp, buffer, count);
        credit_entropy_bits(poolp, entropy);
index 87083b3..37c2266 100644 (file)
@@ -297,7 +297,10 @@ static int clk_main_probe_frequency(struct regmap *regmap)
                regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
                if (mcfr & AT91_PMC_MAINRDY)
                        return 0;
-               usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
+               if (system_state < SYSTEM_RUNNING)
+                       udelay(MAINF_LOOP_MIN_WAIT);
+               else
+                       usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
        } while (time_before(prep_time, timeout));
 
        return -ETIMEDOUT;
index 9790ddf..86238d5 100644 (file)
@@ -43,6 +43,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
 };
 
 static const struct clk_programmable_layout sam9x60_programmable_layout = {
+       .pres_mask = 0xff,
        .pres_shift = 8,
        .css_mask = 0x1f,
        .have_slck_mck = 0,
index 9bfe9a2..fac0ca5 100644 (file)
@@ -76,7 +76,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw)
 
        writel(tmp | osc->bits->cr_osc32en, sckcr);
 
-       usleep_range(osc->startup_usec, osc->startup_usec + 1);
+       if (system_state < SYSTEM_RUNNING)
+               udelay(osc->startup_usec);
+       else
+               usleep_range(osc->startup_usec, osc->startup_usec + 1);
 
        return 0;
 }
@@ -187,7 +190,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
 
        writel(readl(sckcr) | osc->bits->cr_rcen, sckcr);
 
-       usleep_range(osc->startup_usec, osc->startup_usec + 1);
+       if (system_state < SYSTEM_RUNNING)
+               udelay(osc->startup_usec);
+       else
+               usleep_range(osc->startup_usec, osc->startup_usec + 1);
 
        return 0;
 }
@@ -288,7 +294,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
 
        writel(tmp, sckcr);
 
-       usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
+       if (system_state < SYSTEM_RUNNING)
+               udelay(SLOWCK_SW_TIME_USEC);
+       else
+               usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
 
        return 0;
 }
@@ -533,7 +542,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
                return 0;
        }
 
-       usleep_range(osc->startup_usec, osc->startup_usec + 1);
+       if (system_state < SYSTEM_RUNNING)
+               udelay(osc->startup_usec);
+       else
+               usleep_range(osc->startup_usec, osc->startup_usec + 1);
        osc->prepared = true;
 
        return 0;
index 1c1bb39..b1318e6 100644 (file)
@@ -266,10 +266,11 @@ static int aspeed_g6_clk_enable(struct clk_hw *hw)
 
        /* Enable clock */
        if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
-               regmap_write(gate->map, get_clock_reg(gate), clk);
-       } else {
-               /* Use set to clear register */
+               /* Clock is clear to enable, so use set to clear register */
                regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
+       } else {
+               /* Clock is set to enable, so use write to set register */
+               regmap_write(gate->map, get_clock_reg(gate), clk);
        }
 
        if (gate->reset_idx >= 0) {
index 067ab87..172589e 100644 (file)
@@ -638,7 +638,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
                                           clks[IMX8MM_CLK_A53_DIV],
                                           clks[IMX8MM_CLK_A53_SRC],
                                           clks[IMX8MM_ARM_PLL_OUT],
-                                          clks[IMX8MM_CLK_24M]);
+                                          clks[IMX8MM_SYS_PLL1_800M]);
 
        imx_check_clocks(clks, ARRAY_SIZE(clks));
 
index 47a4b44..58b5ace 100644 (file)
@@ -610,7 +610,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
                                           clks[IMX8MN_CLK_A53_DIV],
                                           clks[IMX8MN_CLK_A53_SRC],
                                           clks[IMX8MN_ARM_PLL_OUT],
-                                          clks[IMX8MN_CLK_24M]);
+                                          clks[IMX8MN_SYS_PLL1_800M]);
 
        imx_check_clocks(clks, ARRAY_SIZE(clks));
 
index ea4c791..b3af61c 100644 (file)
@@ -343,6 +343,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
                .offset = HHI_SYS_CPU_CLK_CNTL0,
                .mask = 0x3,
                .shift = 0,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk_dyn0_sel",
@@ -353,8 +354,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
                        { .hw = &g12a_fclk_div3.hw },
                },
                .num_parents = 3,
-               /* This sub-tree is used a parking clock */
-               .flags = CLK_SET_RATE_NO_REPARENT,
+               .flags = CLK_SET_RATE_PARENT,
        },
 };
 
@@ -410,6 +410,7 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
                .offset = HHI_SYS_CPU_CLK_CNTL0,
                .mask = 0x1,
                .shift = 2,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk_dyn0",
@@ -466,6 +467,7 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
                .offset = HHI_SYS_CPU_CLK_CNTL0,
                .mask = 0x1,
                .shift = 10,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk_dyn",
@@ -485,6 +487,7 @@ static struct clk_regmap g12a_cpu_clk = {
                .offset = HHI_SYS_CPU_CLK_CNTL0,
                .mask = 0x1,
                .shift = 11,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk",
@@ -504,6 +507,7 @@ static struct clk_regmap g12b_cpu_clk = {
                .offset = HHI_SYS_CPU_CLK_CNTL0,
                .mask = 0x1,
                .shift = 11,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk",
@@ -523,6 +527,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
                .offset = HHI_SYS_CPUB_CLK_CNTL,
                .mask = 0x3,
                .shift = 0,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpub_clk_dyn0_sel",
@@ -533,6 +538,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
                        { .hw = &g12a_fclk_div3.hw },
                },
                .num_parents = 3,
+               .flags = CLK_SET_RATE_PARENT,
        },
 };
 
@@ -567,6 +573,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
                .offset = HHI_SYS_CPUB_CLK_CNTL,
                .mask = 0x1,
                .shift = 2,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpub_clk_dyn0",
@@ -644,6 +651,7 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
                .offset = HHI_SYS_CPUB_CLK_CNTL,
                .mask = 0x1,
                .shift = 10,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpub_clk_dyn",
@@ -663,6 +671,7 @@ static struct clk_regmap g12b_cpub_clk = {
                .offset = HHI_SYS_CPUB_CLK_CNTL,
                .mask = 0x1,
                .shift = 11,
+               .flags = CLK_MUX_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "cpub_clk",
index 7cfb998..1f9c056 100644 (file)
@@ -935,6 +935,7 @@ static struct clk_regmap gxbb_sar_adc_clk_div = {
                        &gxbb_sar_adc_clk_sel.hw
                },
                .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
        },
 };
 
index 7670cc5..31466cd 100644 (file)
@@ -165,12 +165,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
        GATE_BUS_CPU,
        GATE_SCLK_CPU,
        CLKOUT_CMU_CPU,
+       CPLL_CON0,
+       DPLL_CON0,
        EPLL_CON0,
        EPLL_CON1,
        EPLL_CON2,
        RPLL_CON0,
        RPLL_CON1,
        RPLL_CON2,
+       IPLL_CON0,
+       SPLL_CON0,
+       VPLL_CON0,
+       MPLL_CON0,
        SRC_TOP0,
        SRC_TOP1,
        SRC_TOP2,
@@ -1172,8 +1178,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
                        GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
 
-       GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
-
        /* CDREX */
        GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex",
                        GATE_BUS_CDREX0, 0, 0, 0),
@@ -1248,6 +1252,15 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
        { DIV2_RATIO0, 0, 0x30 },       /* DIV dout_gscl_blk_300 */
 };
 
+static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = {
+       GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = {
+       { GATE_IP_G3D, 0x3ff, 0x3ff },  /* G3D gates */
+       { SRC_TOP5, 0, BIT(16) },       /* MUX mout_user_aclk_g3d */
+};
+
 static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = {
        DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
 };
@@ -1320,6 +1333,14 @@ static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
        .pd_name        = "GSC",
 };
 
+static const struct exynos5_subcmu_info exynos5x_g3d_subcmu = {
+       .gate_clks      = exynos5x_g3d_gate_clks,
+       .nr_gate_clks   = ARRAY_SIZE(exynos5x_g3d_gate_clks),
+       .suspend_regs   = exynos5x_g3d_suspend_regs,
+       .nr_suspend_regs = ARRAY_SIZE(exynos5x_g3d_suspend_regs),
+       .pd_name        = "G3D",
+};
+
 static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
        .div_clks       = exynos5x_mfc_div_clks,
        .nr_div_clks    = ARRAY_SIZE(exynos5x_mfc_div_clks),
@@ -1351,6 +1372,7 @@ static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
 static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
        &exynos5x_disp_subcmu,
        &exynos5x_gsc_subcmu,
+       &exynos5x_g3d_subcmu,
        &exynos5x_mfc_subcmu,
        &exynos5x_mscl_subcmu,
 };
@@ -1358,6 +1380,7 @@ static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
 static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
        &exynos5x_disp_subcmu,
        &exynos5x_gsc_subcmu,
+       &exynos5x_g3d_subcmu,
        &exynos5x_mfc_subcmu,
        &exynos5x_mscl_subcmu,
        &exynos5800_mau_subcmu,
index 7824c2b..4b1aa93 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/slab.h>
 
 #include <dt-bindings/clock/exynos5433.h>
 
@@ -5584,6 +5585,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
 
        data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
                                                    info->nr_clk_regs);
+       if (!data->clk_save)
+               return -ENOMEM;
        data->nr_clk_save = info->nr_clk_regs;
        data->clk_suspend = info->suspend_regs;
        data->nr_clk_suspend = info->nr_suspend_regs;
@@ -5592,12 +5595,19 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
        if (data->nr_pclks > 0) {
                data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
                                           data->nr_pclks, GFP_KERNEL);
-
+               if (!data->pclks) {
+                       kfree(data->clk_save);
+                       return -ENOMEM;
+               }
                for (i = 0; i < data->nr_pclks; i++) {
                        struct clk *clk = of_clk_get(dev->of_node, i);
 
-                       if (IS_ERR(clk))
+                       if (IS_ERR(clk)) {
+                               kfree(data->clk_save);
+                               while (--i >= 0)
+                                       clk_put(data->pclks[i]);
                                return PTR_ERR(clk);
+                       }
                        data->pclks[i] = clk;
                }
        }
index dcac139..ef29582 100644 (file)
@@ -1224,7 +1224,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
 
        /* Enforce d1 = 0, d2 = 0 for Audio PLL */
        val = readl(reg + SUN9I_A80_PLL_AUDIO_REG);
-       val &= (BIT(16) & BIT(18));
+       val &= ~(BIT(16) | BIT(18));
        writel(val, reg + SUN9I_A80_PLL_AUDIO_REG);
 
        /* Enforce P = 1 for both CPU cluster PLLs */
index d3a4338..27201fd 100644 (file)
@@ -1080,8 +1080,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
                                                 rate_hw, rate_ops,
                                                 gate_hw, &clk_gate_ops,
                                                 clkflags |
-                                                data->div[i].critical ?
-                                                       CLK_IS_CRITICAL : 0);
+                                                (data->div[i].critical ?
+                                                       CLK_IS_CRITICAL : 0));
 
                WARN_ON(IS_ERR(clk_data->clks[i]));
        }
index a01ca93..f65e16c 100644 (file)
@@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
        struct clk_init_data init = { NULL };
        const char **parent_names = NULL;
        struct clk *clk;
-       int ret;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw) {
@@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
        clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
 
        if (!IS_ERR(clk)) {
-               ret = ti_clk_add_alias(NULL, clk, node->name);
-               if (ret) {
-                       clk_unregister(clk);
-                       goto cleanup;
-               }
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
                kfree(parent_names);
                return;
index 975995e..b0c0690 100644 (file)
@@ -100,11 +100,12 @@ static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
         * can be from a timer that requires pm_runtime access, which
         * will eventually bring us here with timekeeping_suspended,
         * during both suspend entry and resume paths. This happens
-        * at least on am43xx platform.
+        * at least on am43xx platform. Account for flakeyness
+        * with udelay() by multiplying the timeout value by 2.
         */
        if (unlikely(_early_timeout || timekeeping_suspended)) {
                if (time->cycles++ < timeout) {
-                       udelay(1);
+                       udelay(1 * 2);
                        return false;
                }
        } else {
index 354b27d..62812f8 100644 (file)
@@ -328,12 +328,13 @@ static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
        return 0;
 }
 
+static const unsigned int sh_mtu2_channel_offsets[] = {
+       0x300, 0x380, 0x000,
+};
+
 static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
                                 struct sh_mtu2_device *mtu)
 {
-       static const unsigned int channel_offsets[] = {
-               0x300, 0x380, 0x000,
-       };
        char name[6];
        int irq;
        int ret;
@@ -356,7 +357,7 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
                return ret;
        }
 
-       ch->base = mtu->mapbase + channel_offsets[index];
+       ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
        ch->index = index;
 
        return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
@@ -408,7 +409,12 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
        }
 
        /* Allocate and setup the channels. */
-       mtu->num_channels = 3;
+       ret = platform_irq_count(pdev);
+       if (ret < 0)
+               goto err_unmap;
+
+       mtu->num_channels = min_t(unsigned int, ret,
+                                 ARRAY_SIZE(sh_mtu2_channel_offsets));
 
        mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
                                GFP_KERNEL);
index a562f49..9318edc 100644 (file)
@@ -268,15 +268,12 @@ static int __init mtk_syst_init(struct device_node *node)
 
        ret = timer_of_init(node, &to);
        if (ret)
-               goto err;
+               return ret;
 
        clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
                                        TIMER_SYNC_TICKS, 0xffffffff);
 
        return 0;
-err:
-       timer_of_cleanup(&to);
-       return ret;
 }
 
 static int __init mtk_gpt_init(struct device_node *node)
@@ -293,7 +290,7 @@ static int __init mtk_gpt_init(struct device_node *node)
 
        ret = timer_of_init(node, &to);
        if (ret)
-               goto err;
+               return ret;
 
        /* Configure clock source */
        mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
@@ -311,9 +308,6 @@ static int __init mtk_gpt_init(struct device_node *node)
        mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
 
        return 0;
-err:
-       timer_of_cleanup(&to);
-       return ret;
 }
 TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
 TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
index a905796..3858d86 100644 (file)
@@ -49,14 +49,6 @@ config ARM_ARMADA_8K_CPUFREQ
 
          If in doubt, say N.
 
-# big LITTLE core layer and glue drivers
-config ARM_BIG_LITTLE_CPUFREQ
-       tristate "Generic ARM big LITTLE CPUfreq driver"
-       depends on ARM_CPU_TOPOLOGY && HAVE_CLK
-       select PM_OPP
-       help
-         This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
-
 config ARM_SCPI_CPUFREQ
        tristate "SCPI based CPUfreq driver"
        depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
@@ -69,7 +61,9 @@ config ARM_SCPI_CPUFREQ
 
 config ARM_VEXPRESS_SPC_CPUFREQ
        tristate "Versatile Express SPC based CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+       depends on ARM_CPU_TOPOLOGY && HAVE_CLK
+       depends on ARCH_VEXPRESS_SPC
+       select PM_OPP
        help
          This add the CPUfreq driver support for Versatile Express
          big.LITTLE platforms using SPC for power management.
index 9a9f5cc..f6670c4 100644 (file)
@@ -47,8 +47,6 @@ obj-$(CONFIG_X86_SFI_CPUFREQ)         += sfi-cpufreq.o
 
 ##################################################################################
 # ARM SoC drivers
-obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)   += arm_big_little.o
-
 obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ)  += armada-37xx-cpufreq.o
 obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ)    += armada-8k-cpufreq.o
 obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ)  += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
deleted file mode 100644 (file)
index 7fe52fc..0000000
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * ARM big.LITTLE Platforms CPUFreq support
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/topology.h>
-#include <linux/types.h>
-
-#include "arm_big_little.h"
-
-/* Currently we support only two clusters */
-#define A15_CLUSTER    0
-#define A7_CLUSTER     1
-#define MAX_CLUSTERS   2
-
-#ifdef CONFIG_BL_SWITCHER
-#include <asm/bL_switcher.h>
-static bool bL_switching_enabled;
-#define is_bL_switching_enabled()      bL_switching_enabled
-#define set_switching_enabled(x)       (bL_switching_enabled = (x))
-#else
-#define is_bL_switching_enabled()      false
-#define set_switching_enabled(x)       do { } while (0)
-#define bL_switch_request(...)         do { } while (0)
-#define bL_switcher_put_enabled()      do { } while (0)
-#define bL_switcher_get_enabled()      do { } while (0)
-#endif
-
-#define ACTUAL_FREQ(cluster, freq)  ((cluster == A7_CLUSTER) ? freq << 1 : freq)
-#define VIRT_FREQ(cluster, freq)    ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
-static const struct cpufreq_arm_bL_ops *arm_bL_ops;
-static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
-static atomic_t cluster_usage[MAX_CLUSTERS + 1];
-
-static unsigned int clk_big_min;       /* (Big) clock frequencies */
-static unsigned int clk_little_max;    /* Maximum clock frequency (Little) */
-
-static DEFINE_PER_CPU(unsigned int, physical_cluster);
-static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-
-static struct mutex cluster_lock[MAX_CLUSTERS];
-
-static inline int raw_cpu_to_cluster(int cpu)
-{
-       return topology_physical_package_id(cpu);
-}
-
-static inline int cpu_to_cluster(int cpu)
-{
-       return is_bL_switching_enabled() ?
-               MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
-}
-
-static unsigned int find_cluster_maxfreq(int cluster)
-{
-       int j;
-       u32 max_freq = 0, cpu_freq;
-
-       for_each_online_cpu(j) {
-               cpu_freq = per_cpu(cpu_last_req_freq, j);
-
-               if ((cluster == per_cpu(physical_cluster, j)) &&
-                               (max_freq < cpu_freq))
-                       max_freq = cpu_freq;
-       }
-
-       pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
-                       max_freq);
-
-       return max_freq;
-}
-
-static unsigned int clk_get_cpu_rate(unsigned int cpu)
-{
-       u32 cur_cluster = per_cpu(physical_cluster, cpu);
-       u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
-
-       /* For switcher we use virtual A7 clock rates */
-       if (is_bL_switching_enabled())
-               rate = VIRT_FREQ(cur_cluster, rate);
-
-       pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
-                       cur_cluster, rate);
-
-       return rate;
-}
-
-static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
-{
-       if (is_bL_switching_enabled()) {
-               pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
-                                       cpu));
-
-               return per_cpu(cpu_last_req_freq, cpu);
-       } else {
-               return clk_get_cpu_rate(cpu);
-       }
-}
-
-static unsigned int
-bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
-{
-       u32 new_rate, prev_rate;
-       int ret;
-       bool bLs = is_bL_switching_enabled();
-
-       mutex_lock(&cluster_lock[new_cluster]);
-
-       if (bLs) {
-               prev_rate = per_cpu(cpu_last_req_freq, cpu);
-               per_cpu(cpu_last_req_freq, cpu) = rate;
-               per_cpu(physical_cluster, cpu) = new_cluster;
-
-               new_rate = find_cluster_maxfreq(new_cluster);
-               new_rate = ACTUAL_FREQ(new_cluster, new_rate);
-       } else {
-               new_rate = rate;
-       }
-
-       pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
-                       __func__, cpu, old_cluster, new_cluster, new_rate);
-
-       ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
-       if (!ret) {
-               /*
-                * FIXME: clk_set_rate hasn't returned an error here however it
-                * may be that clk_change_rate failed due to hardware or
-                * firmware issues and wasn't able to report that due to the
-                * current design of the clk core layer. To work around this
-                * problem we will read back the clock rate and check it is
-                * correct. This needs to be removed once clk core is fixed.
-                */
-               if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
-                       ret = -EIO;
-       }
-
-       if (WARN_ON(ret)) {
-               pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
-                               new_cluster);
-               if (bLs) {
-                       per_cpu(cpu_last_req_freq, cpu) = prev_rate;
-                       per_cpu(physical_cluster, cpu) = old_cluster;
-               }
-
-               mutex_unlock(&cluster_lock[new_cluster]);
-
-               return ret;
-       }
-
-       mutex_unlock(&cluster_lock[new_cluster]);
-
-       /* Recalc freq for old cluster when switching clusters */
-       if (old_cluster != new_cluster) {
-               pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
-                               __func__, cpu, old_cluster, new_cluster);
-
-               /* Switch cluster */
-               bL_switch_request(cpu, new_cluster);
-
-               mutex_lock(&cluster_lock[old_cluster]);
-
-               /* Set freq of old cluster if there are cpus left on it */
-               new_rate = find_cluster_maxfreq(old_cluster);
-               new_rate = ACTUAL_FREQ(old_cluster, new_rate);
-
-               if (new_rate) {
-                       pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
-                                       __func__, old_cluster, new_rate);
-
-                       if (clk_set_rate(clk[old_cluster], new_rate * 1000))
-                               pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
-                                               __func__, ret, old_cluster);
-               }
-               mutex_unlock(&cluster_lock[old_cluster]);
-       }
-
-       return 0;
-}
-
-/* Set clock frequency */
-static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
-               unsigned int index)
-{
-       u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
-       unsigned int freqs_new;
-       int ret;
-
-       cur_cluster = cpu_to_cluster(cpu);
-       new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
-
-       freqs_new = freq_table[cur_cluster][index].frequency;
-
-       if (is_bL_switching_enabled()) {
-               if ((actual_cluster == A15_CLUSTER) &&
-                               (freqs_new < clk_big_min)) {
-                       new_cluster = A7_CLUSTER;
-               } else if ((actual_cluster == A7_CLUSTER) &&
-                               (freqs_new > clk_little_max)) {
-                       new_cluster = A15_CLUSTER;
-               }
-       }
-
-       ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
-
-       if (!ret) {
-               arch_set_freq_scale(policy->related_cpus, freqs_new,
-                                   policy->cpuinfo.max_freq);
-       }
-
-       return ret;
-}
-
-static inline u32 get_table_count(struct cpufreq_frequency_table *table)
-{
-       int count;
-
-       for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
-               ;
-
-       return count;
-}
-
-/* get the minimum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_min(struct cpufreq_frequency_table *table)
-{
-       struct cpufreq_frequency_table *pos;
-       uint32_t min_freq = ~0;
-       cpufreq_for_each_entry(pos, table)
-               if (pos->frequency < min_freq)
-                       min_freq = pos->frequency;
-       return min_freq;
-}
-
-/* get the maximum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_max(struct cpufreq_frequency_table *table)
-{
-       struct cpufreq_frequency_table *pos;
-       uint32_t max_freq = 0;
-       cpufreq_for_each_entry(pos, table)
-               if (pos->frequency > max_freq)
-                       max_freq = pos->frequency;
-       return max_freq;
-}
-
-static int merge_cluster_tables(void)
-{
-       int i, j, k = 0, count = 1;
-       struct cpufreq_frequency_table *table;
-
-       for (i = 0; i < MAX_CLUSTERS; i++)
-               count += get_table_count(freq_table[i]);
-
-       table = kcalloc(count, sizeof(*table), GFP_KERNEL);
-       if (!table)
-               return -ENOMEM;
-
-       freq_table[MAX_CLUSTERS] = table;
-
-       /* Add in reverse order to get freqs in increasing order */
-       for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
-               for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
-                               j++) {
-                       table[k].frequency = VIRT_FREQ(i,
-                                       freq_table[i][j].frequency);
-                       pr_debug("%s: index: %d, freq: %d\n", __func__, k,
-                                       table[k].frequency);
-                       k++;
-               }
-       }
-
-       table[k].driver_data = k;
-       table[k].frequency = CPUFREQ_TABLE_END;
-
-       pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
-
-       return 0;
-}
-
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
-                                           const struct cpumask *cpumask)
-{
-       u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
-
-       if (!freq_table[cluster])
-               return;
-
-       clk_put(clk[cluster]);
-       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-       if (arm_bL_ops->free_opp_table)
-               arm_bL_ops->free_opp_table(cpumask);
-       dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
-}
-
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
-                                          const struct cpumask *cpumask)
-{
-       u32 cluster = cpu_to_cluster(cpu_dev->id);
-       int i;
-
-       if (atomic_dec_return(&cluster_usage[cluster]))
-               return;
-
-       if (cluster < MAX_CLUSTERS)
-               return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
-
-       for_each_present_cpu(i) {
-               struct device *cdev = get_cpu_device(i);
-               if (!cdev) {
-                       pr_err("%s: failed to get cpu%d device\n", __func__, i);
-                       return;
-               }
-
-               _put_cluster_clk_and_freq_table(cdev, cpumask);
-       }
-
-       /* free virtual table */
-       kfree(freq_table[cluster]);
-}
-
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
-                                          const struct cpumask *cpumask)
-{
-       u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
-       int ret;
-
-       if (freq_table[cluster])
-               return 0;
-
-       ret = arm_bL_ops->init_opp_table(cpumask);
-       if (ret) {
-               dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
-                               __func__, cpu_dev->id, ret);
-               goto out;
-       }
-
-       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
-       if (ret) {
-               dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
-                               __func__, cpu_dev->id, ret);
-               goto free_opp_table;
-       }
-
-       clk[cluster] = clk_get(cpu_dev, NULL);
-       if (!IS_ERR(clk[cluster])) {
-               dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
-                               __func__, clk[cluster], freq_table[cluster],
-                               cluster);
-               return 0;
-       }
-
-       dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
-                       __func__, cpu_dev->id, cluster);
-       ret = PTR_ERR(clk[cluster]);
-       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-
-free_opp_table:
-       if (arm_bL_ops->free_opp_table)
-               arm_bL_ops->free_opp_table(cpumask);
-out:
-       dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
-                       cluster);
-       return ret;
-}
-
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
-                                         const struct cpumask *cpumask)
-{
-       u32 cluster = cpu_to_cluster(cpu_dev->id);
-       int i, ret;
-
-       if (atomic_inc_return(&cluster_usage[cluster]) != 1)
-               return 0;
-
-       if (cluster < MAX_CLUSTERS) {
-               ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
-               if (ret)
-                       atomic_dec(&cluster_usage[cluster]);
-               return ret;
-       }
-
-       /*
-        * Get data for all clusters and fill virtual cluster with a merge of
-        * both
-        */
-       for_each_present_cpu(i) {
-               struct device *cdev = get_cpu_device(i);
-               if (!cdev) {
-                       pr_err("%s: failed to get cpu%d device\n", __func__, i);
-                       return -ENODEV;
-               }
-
-               ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
-               if (ret)
-                       goto put_clusters;
-       }
-
-       ret = merge_cluster_tables();
-       if (ret)
-               goto put_clusters;
-
-       /* Assuming 2 cluster, set clk_big_min and clk_little_max */
-       clk_big_min = get_table_min(freq_table[0]);
-       clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
-
-       pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
-                       __func__, cluster, clk_big_min, clk_little_max);
-
-       return 0;
-
-put_clusters:
-       for_each_present_cpu(i) {
-               struct device *cdev = get_cpu_device(i);
-               if (!cdev) {
-                       pr_err("%s: failed to get cpu%d device\n", __func__, i);
-                       return -ENODEV;
-               }
-
-               _put_cluster_clk_and_freq_table(cdev, cpumask);
-       }
-
-       atomic_dec(&cluster_usage[cluster]);
-
-       return ret;
-}
-
-/* Per-CPU initialization */
-static int bL_cpufreq_init(struct cpufreq_policy *policy)
-{
-       u32 cur_cluster = cpu_to_cluster(policy->cpu);
-       struct device *cpu_dev;
-       int ret;
-
-       cpu_dev = get_cpu_device(policy->cpu);
-       if (!cpu_dev) {
-               pr_err("%s: failed to get cpu%d device\n", __func__,
-                               policy->cpu);
-               return -ENODEV;
-       }
-
-       if (cur_cluster < MAX_CLUSTERS) {
-               int cpu;
-
-               cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
-
-               for_each_cpu(cpu, policy->cpus)
-                       per_cpu(physical_cluster, cpu) = cur_cluster;
-       } else {
-               /* Assumption: during init, we are always running on A15 */
-               per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
-       }
-
-       ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
-       if (ret)
-               return ret;
-
-       policy->freq_table = freq_table[cur_cluster];
-       policy->cpuinfo.transition_latency =
-                               arm_bL_ops->get_transition_latency(cpu_dev);
-
-       dev_pm_opp_of_register_em(policy->cpus);
-
-       if (is_bL_switching_enabled())
-               per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
-
-       dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
-       return 0;
-}
-
-static int bL_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       struct device *cpu_dev;
-       int cur_cluster = cpu_to_cluster(policy->cpu);
-
-       if (cur_cluster < MAX_CLUSTERS) {
-               cpufreq_cooling_unregister(cdev[cur_cluster]);
-               cdev[cur_cluster] = NULL;
-       }
-
-       cpu_dev = get_cpu_device(policy->cpu);
-       if (!cpu_dev) {
-               pr_err("%s: failed to get cpu%d device\n", __func__,
-                               policy->cpu);
-               return -ENODEV;
-       }
-
-       put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
-       dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
-
-       return 0;
-}
-
-static void bL_cpufreq_ready(struct cpufreq_policy *policy)
-{
-       int cur_cluster = cpu_to_cluster(policy->cpu);
-
-       /* Do not register a cpu_cooling device if we are in IKS mode */
-       if (cur_cluster >= MAX_CLUSTERS)
-               return;
-
-       cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
-static struct cpufreq_driver bL_cpufreq_driver = {
-       .name                   = "arm-big-little",
-       .flags                  = CPUFREQ_STICKY |
-                                       CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
-                                       CPUFREQ_NEED_INITIAL_FREQ_CHECK,
-       .verify                 = cpufreq_generic_frequency_table_verify,
-       .target_index           = bL_cpufreq_set_target,
-       .get                    = bL_cpufreq_get_rate,
-       .init                   = bL_cpufreq_init,
-       .exit                   = bL_cpufreq_exit,
-       .ready                  = bL_cpufreq_ready,
-       .attr                   = cpufreq_generic_attr,
-};
-
-#ifdef CONFIG_BL_SWITCHER
-static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
-                                       unsigned long action, void *_arg)
-{
-       pr_debug("%s: action: %ld\n", __func__, action);
-
-       switch (action) {
-       case BL_NOTIFY_PRE_ENABLE:
-       case BL_NOTIFY_PRE_DISABLE:
-               cpufreq_unregister_driver(&bL_cpufreq_driver);
-               break;
-
-       case BL_NOTIFY_POST_ENABLE:
-               set_switching_enabled(true);
-               cpufreq_register_driver(&bL_cpufreq_driver);
-               break;
-
-       case BL_NOTIFY_POST_DISABLE:
-               set_switching_enabled(false);
-               cpufreq_register_driver(&bL_cpufreq_driver);
-               break;
-
-       default:
-               return NOTIFY_DONE;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block bL_switcher_notifier = {
-       .notifier_call = bL_cpufreq_switcher_notifier,
-};
-
-static int __bLs_register_notifier(void)
-{
-       return bL_switcher_register_notifier(&bL_switcher_notifier);
-}
-
-static int __bLs_unregister_notifier(void)
-{
-       return bL_switcher_unregister_notifier(&bL_switcher_notifier);
-}
-#else
-static int __bLs_register_notifier(void) { return 0; }
-static int __bLs_unregister_notifier(void) { return 0; }
-#endif
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
-{
-       int ret, i;
-
-       if (arm_bL_ops) {
-               pr_debug("%s: Already registered: %s, exiting\n", __func__,
-                               arm_bL_ops->name);
-               return -EBUSY;
-       }
-
-       if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
-           !ops->get_transition_latency) {
-               pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
-               return -ENODEV;
-       }
-
-       arm_bL_ops = ops;
-
-       set_switching_enabled(bL_switcher_get_enabled());
-
-       for (i = 0; i < MAX_CLUSTERS; i++)
-               mutex_init(&cluster_lock[i]);
-
-       ret = cpufreq_register_driver(&bL_cpufreq_driver);
-       if (ret) {
-               pr_info("%s: Failed registering platform driver: %s, err: %d\n",
-                               __func__, ops->name, ret);
-               arm_bL_ops = NULL;
-       } else {
-               ret = __bLs_register_notifier();
-               if (ret) {
-                       cpufreq_unregister_driver(&bL_cpufreq_driver);
-                       arm_bL_ops = NULL;
-               } else {
-                       pr_info("%s: Registered platform driver: %s\n",
-                                       __func__, ops->name);
-               }
-       }
-
-       bL_switcher_put_enabled();
-       return ret;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_register);
-
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
-{
-       if (arm_bL_ops != ops) {
-               pr_err("%s: Registered with: %s, can't unregister, exiting\n",
-                               __func__, arm_bL_ops->name);
-               return;
-       }
-
-       bL_switcher_get_enabled();
-       __bLs_unregister_notifier();
-       cpufreq_unregister_driver(&bL_cpufreq_driver);
-       bL_switcher_put_enabled();
-       pr_info("%s: Un-registered platform driver: %s\n", __func__,
-                       arm_bL_ops->name);
-       arm_bL_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
deleted file mode 100644 (file)
index 88a176e..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * ARM big.LITTLE platform's CPUFreq header file
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef CPUFREQ_ARM_BIG_LITTLE_H
-#define CPUFREQ_ARM_BIG_LITTLE_H
-
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct cpufreq_arm_bL_ops {
-       char name[CPUFREQ_NAME_LEN];
-
-       /*
-        * This must set opp table for cpu_dev in a similar way as done by
-        * dev_pm_opp_of_add_table().
-        */
-       int (*init_opp_table)(const struct cpumask *cpumask);
-
-       /* Optional */
-       int (*get_transition_latency)(struct device *cpu_dev);
-       void (*free_opp_table)(const struct cpumask *cpumask);
-};
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
-
-#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
index bca8d1f..54bc767 100644 (file)
@@ -86,7 +86,6 @@ static const struct of_device_id whitelist[] __initconst = {
        { .compatible = "st-ericsson,u9540", },
 
        { .compatible = "ti,omap2", },
-       { .compatible = "ti,omap3", },
        { .compatible = "ti,omap4", },
        { .compatible = "ti,omap5", },
 
@@ -137,6 +136,7 @@ static const struct of_device_id blacklist[] __initconst = {
        { .compatible = "ti,am33xx", },
        { .compatible = "ti,am43", },
        { .compatible = "ti,dra7", },
+       { .compatible = "ti,omap3", },
 
        { }
 };
index bffc11b..7fc1a68 100644 (file)
@@ -720,7 +720,7 @@ static ssize_t store_##file_name                                    \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
-       ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+       ret = freq_qos_update_request(policy->object##_freq_req, val);\
        return ret >= 0 ? count : ret;                                  \
 }
 
@@ -933,6 +933,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
        struct freq_attr *fattr = to_attr(attr);
        ssize_t ret;
 
+       if (!fattr->show)
+               return -EIO;
+
        down_read(&policy->rwsem);
        ret = fattr->show(policy, buf);
        up_read(&policy->rwsem);
@@ -947,6 +950,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
        struct freq_attr *fattr = to_attr(attr);
        ssize_t ret = -EINVAL;
 
+       if (!fattr->store)
+               return -EIO;
+
        /*
         * cpus_read_trylock() is used here to work around a circular lock
         * dependency problem with respect to the cpufreq_register_driver().
@@ -1202,19 +1208,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
                goto err_free_real_cpus;
        }
 
+       freq_constraints_init(&policy->constraints);
+
        policy->nb_min.notifier_call = cpufreq_notifier_min;
        policy->nb_max.notifier_call = cpufreq_notifier_max;
 
-       ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
-                                     DEV_PM_QOS_MIN_FREQUENCY);
+       ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                   &policy->nb_min);
        if (ret) {
                dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
                        ret, cpumask_pr_args(policy->cpus));
                goto err_kobj_remove;
        }
 
-       ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
-                                     DEV_PM_QOS_MAX_FREQUENCY);
+       ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+                                   &policy->nb_max);
        if (ret) {
                dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
                        ret, cpumask_pr_args(policy->cpus));
@@ -1232,8 +1240,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        return policy;
 
 err_min_qos_notifier:
-       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
-                                  DEV_PM_QOS_MIN_FREQUENCY);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                &policy->nb_min);
 err_kobj_remove:
        cpufreq_policy_put_kobj(policy);
 err_free_real_cpus:
@@ -1250,7 +1258,6 @@ err_free_policy:
 
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
-       struct device *dev = get_cpu_device(policy->cpu);
        unsigned long flags;
        int cpu;
 
@@ -1262,10 +1269,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                per_cpu(cpufreq_cpu_data, cpu) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       dev_pm_qos_remove_notifier(dev, &policy->nb_max,
-                                  DEV_PM_QOS_MAX_FREQUENCY);
-       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
-                                  DEV_PM_QOS_MIN_FREQUENCY);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+                                &policy->nb_max);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                &policy->nb_min);
+
+       /* Cancel any pending policy->update work before freeing the policy. */
+       cancel_work_sync(&policy->update);
 
        if (policy->max_freq_req) {
                /*
@@ -1274,10 +1284,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                 */
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                             CPUFREQ_REMOVE_POLICY, policy);
-               dev_pm_qos_remove_request(policy->max_freq_req);
+               freq_qos_remove_request(policy->max_freq_req);
        }
 
-       dev_pm_qos_remove_request(policy->min_freq_req);
+       freq_qos_remove_request(policy->min_freq_req);
        kfree(policy->min_freq_req);
 
        cpufreq_policy_put_kobj(policy);
@@ -1357,8 +1367,6 @@ static int cpufreq_online(unsigned int cpu)
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
        if (new_policy) {
-               struct device *dev = get_cpu_device(cpu);
-
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
                        add_cpu_dev_symlink(policy, j);
@@ -1369,36 +1377,31 @@ static int cpufreq_online(unsigned int cpu)
                if (!policy->min_freq_req)
                        goto out_destroy_policy;
 
-               ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
-                                            DEV_PM_QOS_MIN_FREQUENCY,
-                                            policy->min);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          policy->min_freq_req, FREQ_QOS_MIN,
+                                          policy->min);
                if (ret < 0) {
                        /*
-                        * So we don't call dev_pm_qos_remove_request() for an
+                        * So we don't call freq_qos_remove_request() for an
                         * uninitialized request.
                         */
                        kfree(policy->min_freq_req);
                        policy->min_freq_req = NULL;
-
-                       dev_err(dev, "Failed to add min-freq constraint (%d)\n",
-                               ret);
                        goto out_destroy_policy;
                }
 
                /*
                 * This must be initialized right here to avoid calling
-                * dev_pm_qos_remove_request() on uninitialized request in case
+                * freq_qos_remove_request() on uninitialized request in case
                 * of errors.
                 */
                policy->max_freq_req = policy->min_freq_req + 1;
 
-               ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
-                                            DEV_PM_QOS_MAX_FREQUENCY,
-                                            policy->max);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          policy->max_freq_req, FREQ_QOS_MAX,
+                                          policy->max);
                if (ret < 0) {
                        policy->max_freq_req = NULL;
-                       dev_err(dev, "Failed to add max-freq constraint (%d)\n",
-                               ret);
                        goto out_destroy_policy;
                }
 
@@ -2374,7 +2377,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
                       struct cpufreq_policy *new_policy)
 {
        struct cpufreq_governor *old_gov;
-       struct device *cpu_dev = get_cpu_device(policy->cpu);
        int ret;
 
        pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2386,10 +2388,13 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
         * PM QoS framework collects all the requests from users and provide us
         * the final aggregated value here.
         */
-       new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
-       new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+       new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+       new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
 
-       /* verify the cpu speed can be set within this limit */
+       /*
+        * Verify that the CPU speed can be set within these limits and make sure
+        * that min <= max.
+        */
        ret = cpufreq_driver->verify(new_policy);
        if (ret)
                return ret;
@@ -2518,7 +2523,7 @@ static int cpufreq_boost_set_sw(int state)
                        break;
                }
 
-               ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+               ret = freq_qos_update_request(policy->max_freq_req, policy->max);
                if (ret < 0)
                        break;
        }
@@ -2632,6 +2637,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (cpufreq_disabled())
                return -ENODEV;
 
+       /*
+        * The cpufreq core depends heavily on the availability of device
+        * structure, make sure they are available before proceeding further.
+        */
+       if (!get_cpu_device(0))
+               return -EPROBE_DEFER;
+
        if (!driver_data || !driver_data->verify || !driver_data->init ||
            !(driver_data->setpolicy || driver_data->target_index ||
                    driver_data->target) ||
index 35db14c..85a6efd 100644 (file)
@@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
        mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
 
        /*
-        * Early samples without fuses written report "0 0" which means
-        * consumer segment and minimum speed grading.
-        *
-        * According to datasheet minimum speed grading is not supported for
-        * consumer parts so clamp to 1 to avoid warning for "no OPPs"
+        * Early samples without fuses written report "0 0" which may NOT
+        * match any OPP defined in DT. So clamp to minimum OPP defined in
+        * DT to avoid warning for "no OPPs".
         *
         * Applies to i.MX8M series SoCs.
         */
-       if (mkt_segment == 0 && speed_grade == 0 && (
-                       of_machine_is_compatible("fsl,imx8mm") ||
-                       of_machine_is_compatible("fsl,imx8mn") ||
-                       of_machine_is_compatible("fsl,imx8mq")))
-               speed_grade = 1;
+       if (mkt_segment == 0 && speed_grade == 0) {
+               if (of_machine_is_compatible("fsl,imx8mm") ||
+                   of_machine_is_compatible("fsl,imx8mq"))
+                       speed_grade = 1;
+               if (of_machine_is_compatible("fsl,imx8mn"))
+                       speed_grade = 0xb;
+       }
 
        supported_hw[0] = BIT(speed_grade);
        supported_hw[1] = BIT(mkt_segment);
index 9f02de9..d2fa3e9 100644 (file)
@@ -847,11 +847,9 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
        value |= HWP_MAX_PERF(min_perf);
        value |= HWP_MIN_PERF(min_perf);
 
-       /* Set EPP/EPB to min */
+       /* Set EPP to min */
        if (boot_cpu_has(X86_FEATURE_HWP_EPP))
                value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
-       else
-               intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
 
        wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
 }
@@ -1088,10 +1086,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 
 static struct cpufreq_driver intel_pstate;
 
-static void update_qos_request(enum dev_pm_qos_req_type type)
+static void update_qos_request(enum freq_qos_req_type type)
 {
        int max_state, turbo_max, freq, i, perf_pct;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        struct cpufreq_policy *policy;
 
        for_each_possible_cpu(i) {
@@ -1112,7 +1110,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
                else
                        turbo_max = cpu->pstate.turbo_pstate;
 
-               if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+               if (type == FREQ_QOS_MIN) {
                        perf_pct = global.min_perf_pct;
                } else {
                        req++;
@@ -1122,7 +1120,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
                freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
                freq *= cpu->pstate.scaling;
 
-               if (dev_pm_qos_update_request(req, freq) < 0)
+               if (freq_qos_update_request(req, freq) < 0)
                        pr_warn("Failed to update freq constraint: CPU%d\n", i);
        }
 }
@@ -1153,7 +1151,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
        if (intel_pstate_driver == &intel_pstate)
                intel_pstate_update_policies();
        else
-               update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
+               update_qos_request(FREQ_QOS_MAX);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1187,7 +1185,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
        if (intel_pstate_driver == &intel_pstate)
                intel_pstate_update_policies();
        else
-               update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
+               update_qos_request(FREQ_QOS_MIN);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -2381,7 +2379,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        int max_state, turbo_max, min_freq, max_freq, ret;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        struct cpudata *cpu;
        struct device *dev;
 
@@ -2416,15 +2414,15 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
        max_freq *= cpu->pstate.scaling;
 
-       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
-                                    min_freq);
+       ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+                                  min_freq);
        if (ret < 0) {
                dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
                goto free_req;
        }
 
-       ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
-                                    max_freq);
+       ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+                                  max_freq);
        if (ret < 0) {
                dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
                goto remove_min_req;
@@ -2435,7 +2433,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        return 0;
 
 remove_min_req:
-       dev_pm_qos_remove_request(req);
+       freq_qos_remove_request(req);
 free_req:
        kfree(req);
 pstate_exit:
@@ -2446,12 +2444,12 @@ pstate_exit:
 
 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
 
        req = policy->driver_data;
 
-       dev_pm_qos_remove_request(req + 1);
-       dev_pm_qos_remove_request(req);
+       freq_qos_remove_request(req + 1);
+       freq_qos_remove_request(req);
        kfree(req);
 
        return intel_pstate_cpu_exit(policy);
@@ -2664,21 +2662,21 @@ enum {
 
 /* Hardware vendor-specific info that has its own power management modes */
 static struct acpi_platform_list plat_info[] __initdata = {
-       {"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
-       {"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
-       {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+       {"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
+       {"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+       {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
        { } /* End */
 };
 
index 6061850..56f4bc0 100644 (file)
@@ -1041,9 +1041,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
 
 static int init_chip_info(void)
 {
-       unsigned int chip[256];
+       unsigned int *chip;
        unsigned int cpu, i;
        unsigned int prev_chip_id = UINT_MAX;
+       int ret = 0;
+
+       chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
 
        for_each_possible_cpu(cpu) {
                unsigned int id = cpu_to_chip_id(cpu);
@@ -1055,8 +1060,10 @@ static int init_chip_info(void)
        }
 
        chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
-       if (!chips)
-               return -ENOMEM;
+       if (!chips) {
+               ret = -ENOMEM;
+               goto free_and_return;
+       }
 
        for (i = 0; i < nr_chips; i++) {
                chips[i].id = chip[i];
@@ -1066,7 +1073,9 @@ static int init_chip_info(void)
                        per_cpu(chip_info, cpu) =  &chips[i];
        }
 
-       return 0;
+free_and_return:
+       kfree(chip);
+       return ret;
 }
 
 static inline void clean_chip_info(void)
index bc9dd30..037fe23 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
 static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
 {
        struct cpufreq_policy *policy;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        u8 node, slow_mode;
        int cpu, ret;
 
@@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
 
        req = policy->driver_data;
 
-       ret = dev_pm_qos_update_request(req,
+       ret = freq_qos_update_request(req,
                        policy->freq_table[slow_mode].frequency);
        if (ret < 0)
                pr_warn("Failed to update freq constraint: %d\n", ret);
@@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = {
 
 void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        int ret;
 
        if (!cbe_cpufreq_has_pmi)
@@ -113,9 +113,8 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
        if (!req)
                return;
 
-       ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
-                                    DEV_PM_QOS_MAX_FREQUENCY,
-                                    policy->freq_table[0].frequency);
+       ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+                                  policy->freq_table[0].frequency);
        if (ret < 0) {
                pr_err("Failed to add freq constraint (%d)\n", ret);
                kfree(req);
@@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
 
 void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req = policy->driver_data;
+       struct freq_qos_request *req = policy->driver_data;
 
        if (cbe_cpufreq_has_pmi) {
-               dev_pm_qos_remove_request(req);
+               freq_qos_remove_request(req);
                kfree(req);
        }
 }
index af0c00d..c6bdfc3 100644 (file)
@@ -19,7 +19,6 @@
 static struct regulator *vddarm;
 static unsigned long regulator_latency;
 
-#ifdef CONFIG_CPU_S3C6410
 struct s3c64xx_dvfs {
        unsigned int vddarm_min;
        unsigned int vddarm_max;
@@ -48,7 +47,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
        { 0, 4, 800000 },
        { 0, 0, CPUFREQ_TABLE_END },
 };
-#endif
 
 static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
                                      unsigned int index)
@@ -149,11 +147,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
        if (policy->cpu != 0)
                return -EINVAL;
 
-       if (s3c64xx_freq_table == NULL) {
-               pr_err("No frequency information for this CPU\n");
-               return -ENODEV;
-       }
-
        policy->clk = clk_get(NULL, "armclk");
        if (IS_ERR(policy->clk)) {
                pr_err("Unable to obtain ARMCLK: %ld\n",
index 2b51e07..20d1f85 100644 (file)
@@ -1,8 +1,6 @@
 /*
  * System Control and Power Interface (SCPI) based CPUFreq Interface driver
  *
- * It provides necessary ops to arm_big_little cpufreq driver.
- *
  * Copyright (C) 2015 ARM Ltd.
  * Sudeep Holla <sudeep.holla@arm.com>
  *
index eca32e4..9907a16 100644 (file)
@@ -25,7 +25,7 @@
 static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
 
 /**
- * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
  * @versions: Set to the value parsed from efuse
  *
  * Returns 0 if success.
@@ -69,21 +69,16 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
                return PTR_ERR(speedbin);
 
        efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
-       switch (efuse_value) {
-       case 0b0001:
-               *versions = 1;
-               break;
-       case 0b0011:
-               *versions = 2;
-               break;
-       default:
-               /*
-                * For other situations, we treat it as bin0.
-                * This vf table can be run for any good cpu.
-                */
+
+       /*
+        * We treat unexpected efuse values as if the SoC was from
+        * the slowest bin. Expected efuse values are 1-3, slowest
+        * to fastest.
+        */
+       if (efuse_value >= 1 && efuse_value <= 3)
+               *versions = efuse_value - 1;
+       else
                *versions = 0;
-               break;
-       }
 
        kfree(speedbin);
        return 0;
index aeaa883..557cb51 100644 (file)
 #define DRA7_EFUSE_OD_MPU_OPP                  BIT(1)
 #define DRA7_EFUSE_HIGH_MPU_OPP                        BIT(2)
 
+#define OMAP3_CONTROL_DEVICE_STATUS            0x4800244C
+#define OMAP3_CONTROL_IDCODE                   0x4830A204
+#define OMAP34xx_ProdID_SKUID                  0x4830A20C
+#define OMAP3_SYSCON_BASE      (0x48000000 + 0x2000 + 0x270)
+
 #define VERSION_COUNT                          2
 
 struct ti_cpufreq_data;
 
 struct ti_cpufreq_soc_data {
+       const char * const *reg_names;
        unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
                                     unsigned long efuse);
        unsigned long efuse_fallback;
@@ -85,6 +91,13 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
        return calculated_efuse;
 }
 
+static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+                                     unsigned long efuse)
+{
+       /* OPP enable bit ("Speed Binned") */
+       return BIT(efuse);
+}
+
 static struct ti_cpufreq_soc_data am3x_soc_data = {
        .efuse_xlate = amx3_efuse_xlate,
        .efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -112,6 +125,74 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
        .multi_regulator = true,
 };
 
+/*
+ * OMAP35x TRM (SPRUF98K):
+ *  CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ *  Control OMAP Status Register 15:0 (Address 0x4800 244C)
+ *    to separate between omap3503, omap3515, omap3525, omap3530
+ *    and feature presence.
+ *    There are encodings for versions limited to 400/266MHz
+ *    but we ignore.
+ *    Not clear if this also holds for omap34xx.
+ *  some eFuse values e.g. CONTROL_FUSE_OPP1_VDD1
+ *    are stored in the SYSCON register range
+ *  Register 0x4830A20C [ProdID.SKUID] [0:3]
+ *    0x0 for normal 600/430MHz device.
+ *    0x8 for 720/520MHz device.
+ *    Not clear what omap34xx value is.
+ */
+
+static struct ti_cpufreq_soc_data omap34xx_soc_data = {
+       .efuse_xlate = omap3_efuse_xlate,
+       .efuse_offset = OMAP34xx_ProdID_SKUID - OMAP3_SYSCON_BASE,
+       .efuse_shift = 3,
+       .efuse_mask = BIT(3),
+       .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+       .multi_regulator = false,
+};
+
+/*
+ * AM/DM37x TRM (SPRUGN4M)
+ *  CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ *  Control Device Status Register 15:0 (Address 0x4800 244C)
+ *    to separate between am3703, am3715, dm3725, dm3730
+ *    and feature presence.
+ *   Speed Binned = Bit 9
+ *     0 800/600 MHz
+ *     1 1000/800 MHz
+ *  some eFuse values e.g. CONTROL_FUSE_OPP 1G_VDD1
+ *    are stored in the SYSCON register range.
+ *  There is no 0x4830A20C [ProdID.SKUID] register (exists but
+ *    seems to always read as 0).
+ */
+
+static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+
+static struct ti_cpufreq_soc_data omap36xx_soc_data = {
+       .reg_names = omap3_reg_names,
+       .efuse_xlate = omap3_efuse_xlate,
+       .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+       .efuse_shift = 9,
+       .efuse_mask = BIT(9),
+       .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+       .multi_regulator = true,
+};
+
+/*
+ * AM3517 is quite similar to AM/DM37x except that it has no
+ * high speed grade eFuse and no abb ldo
+ */
+
+static struct ti_cpufreq_soc_data am3517_soc_data = {
+       .efuse_xlate = omap3_efuse_xlate,
+       .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+       .efuse_shift = 0,
+       .efuse_mask = 0,
+       .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+       .multi_regulator = false,
+};
+
+
 /**
  * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
  * @opp_data: pointer to ti_cpufreq_data context
@@ -128,7 +209,17 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
 
        ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
                          &efuse);
-       if (ret) {
+       if (ret == -EIO) {
+               /* not a syscon register! */
+               void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+                               opp_data->soc_data->efuse_offset, 4);
+
+               if (!regs)
+                       return -ENOMEM;
+               efuse = readl(regs);
+               iounmap(regs);
+               }
+       else if (ret) {
                dev_err(dev,
                        "Failed to read the efuse value from syscon: %d\n",
                        ret);
@@ -159,7 +250,17 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
 
        ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
                          &revision);
-       if (ret) {
+       if (ret == -EIO) {
+               /* not a syscon register! */
+               void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+                               opp_data->soc_data->rev_offset, 4);
+
+               if (!regs)
+                       return -ENOMEM;
+               revision = readl(regs);
+               iounmap(regs);
+               }
+       else if (ret) {
                dev_err(dev,
                        "Failed to read the revision number from syscon: %d\n",
                        ret);
@@ -189,8 +290,14 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
 
 static const struct of_device_id ti_cpufreq_of_match[] = {
        { .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+       { .compatible = "ti,am3517", .data = &am3517_soc_data, },
        { .compatible = "ti,am43", .data = &am4x_soc_data, },
        { .compatible = "ti,dra7", .data = &dra7_soc_data },
+       { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
+       { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+       /* legacy */
+       { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
+       { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
        {},
 };
 
@@ -212,7 +319,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        struct opp_table *ti_opp_table;
        struct ti_cpufreq_data *opp_data;
-       const char * const reg_names[] = {"vdd", "vbb"};
+       const char * const default_reg_names[] = {"vdd", "vbb"};
        int ret;
 
        match = dev_get_platdata(&pdev->dev);
@@ -268,9 +375,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
        opp_data->opp_table = ti_opp_table;
 
        if (opp_data->soc_data->multi_regulator) {
+               const char * const *reg_names = default_reg_names;
+
+               if (opp_data->soc_data->reg_names)
+                       reg_names = opp_data->soc_data->reg_names;
                ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
                                                         reg_names,
-                                                        ARRAY_SIZE(reg_names));
+                                                        ARRAY_SIZE(default_reg_names));
                if (IS_ERR(ti_opp_table)) {
                        dev_pm_opp_put_supported_hw(opp_data->opp_table);
                        ret =  PTR_ERR(ti_opp_table);
index 5323728..506e3f2 100644 (file)
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Versatile Express SPC CPUFreq Interface driver
  *
- * It provides necessary ops to arm_big_little cpufreq driver.
+ * Copyright (C) 2013 - 2019 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
  *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/device.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
 #include <linux/types.h>
 
-#include "arm_big_little.h"
+/* Currently we support only two clusters */
+#define A15_CLUSTER    0
+#define A7_CLUSTER     1
+#define MAX_CLUSTERS   2
+
+#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled()      bL_switching_enabled
+#define set_switching_enabled(x)       (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled()      false
+#define set_switching_enabled(x)       do { } while (0)
+#define bL_switch_request(...)         do { } while (0)
+#define bL_switcher_put_enabled()      do { } while (0)
+#define bL_switcher_get_enabled()      do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq)  ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq)    ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min;       /* (Big) clock frequencies */
+static unsigned int clk_little_max;    /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
+{
+       return topology_physical_package_id(cpu);
+}
+
+static inline int cpu_to_cluster(int cpu)
+{
+       return is_bL_switching_enabled() ?
+               MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+       int j;
+       u32 max_freq = 0, cpu_freq;
+
+       for_each_online_cpu(j) {
+               cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+               if (cluster == per_cpu(physical_cluster, j) &&
+                   max_freq < cpu_freq)
+                       max_freq = cpu_freq;
+       }
+
+       return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+       u32 cur_cluster = per_cpu(physical_cluster, cpu);
+       u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+       /* For switcher we use virtual A7 clock rates */
+       if (is_bL_switching_enabled())
+               rate = VIRT_FREQ(cur_cluster, rate);
+
+       return rate;
+}
+
+static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
+{
+       if (is_bL_switching_enabled())
+               return per_cpu(cpu_last_req_freq, cpu);
+       else
+               return clk_get_cpu_rate(cpu);
+}
+
+static unsigned int
+ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+       u32 new_rate, prev_rate;
+       int ret;
+       bool bLs = is_bL_switching_enabled();
+
+       mutex_lock(&cluster_lock[new_cluster]);
+
+       if (bLs) {
+               prev_rate = per_cpu(cpu_last_req_freq, cpu);
+               per_cpu(cpu_last_req_freq, cpu) = rate;
+               per_cpu(physical_cluster, cpu) = new_cluster;
+
+               new_rate = find_cluster_maxfreq(new_cluster);
+               new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+       } else {
+               new_rate = rate;
+       }
+
+       ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+       if (!ret) {
+               /*
+                * FIXME: clk_set_rate hasn't returned an error here however it
+                * may be that clk_change_rate failed due to hardware or
+                * firmware issues and wasn't able to report that due to the
+                * current design of the clk core layer. To work around this
+                * problem we will read back the clock rate and check it is
+                * correct. This needs to be removed once clk core is fixed.
+                */
+               if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
+                       ret = -EIO;
+       }
+
+       if (WARN_ON(ret)) {
+               if (bLs) {
+                       per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+                       per_cpu(physical_cluster, cpu) = old_cluster;
+               }
+
+               mutex_unlock(&cluster_lock[new_cluster]);
+
+               return ret;
+       }
+
+       mutex_unlock(&cluster_lock[new_cluster]);
+
+       /* Recalc freq for old cluster when switching clusters */
+       if (old_cluster != new_cluster) {
+               /* Switch cluster */
+               bL_switch_request(cpu, new_cluster);
+
+               mutex_lock(&cluster_lock[old_cluster]);
+
+               /* Set freq of old cluster if there are cpus left on it */
+               new_rate = find_cluster_maxfreq(old_cluster);
+               new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+               if (new_rate &&
+                   clk_set_rate(clk[old_cluster], new_rate * 1000)) {
+                       pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+                              __func__, ret, old_cluster);
+               }
+               mutex_unlock(&cluster_lock[old_cluster]);
+       }
+
+       return 0;
+}
+
+/* Set clock frequency */
+static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
+                                    unsigned int index)
+{
+       u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+       unsigned int freqs_new;
+       int ret;
+
+       cur_cluster = cpu_to_cluster(cpu);
+       new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+       freqs_new = freq_table[cur_cluster][index].frequency;
+
+       if (is_bL_switching_enabled()) {
+               if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
+                       new_cluster = A7_CLUSTER;
+               else if (actual_cluster == A7_CLUSTER &&
+                        freqs_new > clk_little_max)
+                       new_cluster = A15_CLUSTER;
+       }
+
+       ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
+                                     freqs_new);
+
+       if (!ret) {
+               arch_set_freq_scale(policy->related_cpus, freqs_new,
+                                   policy->cpuinfo.max_freq);
+       }
+
+       return ret;
+}
+
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+       int count;
+
+       for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+               ;
+
+       return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+       struct cpufreq_frequency_table *pos;
+       u32 min_freq = ~0;
+
+       cpufreq_for_each_entry(pos, table)
+               if (pos->frequency < min_freq)
+                       min_freq = pos->frequency;
+       return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+       struct cpufreq_frequency_table *pos;
+       u32 max_freq = 0;
+
+       cpufreq_for_each_entry(pos, table)
+               if (pos->frequency > max_freq)
+                       max_freq = pos->frequency;
+       return max_freq;
+}
+
+static bool search_frequency(struct cpufreq_frequency_table *table, int size,
+                            unsigned int freq)
+{
+       int count;
+
+       for (count = 0; count < size; count++) {
+               if (table[count].frequency == freq)
+                       return true;
+       }
+
+       return false;
+}
+
+static int merge_cluster_tables(void)
+{
+       int i, j, k = 0, count = 1;
+       struct cpufreq_frequency_table *table;
+
+       for (i = 0; i < MAX_CLUSTERS; i++)
+               count += get_table_count(freq_table[i]);
+
+       table = kcalloc(count, sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       freq_table[MAX_CLUSTERS] = table;
+
+       /* Add in reverse order to get freqs in increasing order */
+       for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
+               for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+                    j++) {
+                       if (i == A15_CLUSTER &&
+                           search_frequency(table, count, freq_table[i][j].frequency))
+                               continue; /* skip duplicates */
+                       table[k++].frequency =
+                               VIRT_FREQ(i, freq_table[i][j].frequency);
+               }
+       }
+
+       table[k].driver_data = k;
+       table[k].frequency = CPUFREQ_TABLE_END;
+
+       return 0;
+}
 
-static int ve_spc_init_opp_table(const struct cpumask *cpumask)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                           const struct cpumask *cpumask)
 {
-       struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+       u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+       if (!freq_table[cluster])
+               return;
+
+       clk_put(clk[cluster]);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                          const struct cpumask *cpumask)
+{
+       u32 cluster = cpu_to_cluster(cpu_dev->id);
+       int i;
+
+       if (atomic_dec_return(&cluster_usage[cluster]))
+               return;
+
+       if (cluster < MAX_CLUSTERS)
+               return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
+
+       for_each_present_cpu(i) {
+               struct device *cdev = get_cpu_device(i);
+
+               if (!cdev)
+                       return;
+
+               _put_cluster_clk_and_freq_table(cdev, cpumask);
+       }
+
+       /* free virtual table */
+       kfree(freq_table[cluster]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                          const struct cpumask *cpumask)
+{
+       u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+       int ret;
+
+       if (freq_table[cluster])
+               return 0;
+
        /*
         * platform specific SPC code must initialise the opp table
         * so just check if the OPP count is non-zero
         */
-       return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+       ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+       if (ret)
+               goto out;
+
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+       if (ret)
+               goto out;
+
+       clk[cluster] = clk_get(cpu_dev, NULL);
+       if (!IS_ERR(clk[cluster]))
+               return 0;
+
+       dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+               __func__, cpu_dev->id, cluster);
+       ret = PTR_ERR(clk[cluster]);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+out:
+       dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+               cluster);
+       return ret;
 }
 
-static int ve_spc_get_transition_latency(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+                                         const struct cpumask *cpumask)
 {
-       return 1000000; /* 1 ms */
+       u32 cluster = cpu_to_cluster(cpu_dev->id);
+       int i, ret;
+
+       if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+               return 0;
+
+       if (cluster < MAX_CLUSTERS) {
+               ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
+               if (ret)
+                       atomic_dec(&cluster_usage[cluster]);
+               return ret;
+       }
+
+       /*
+        * Get data for all clusters and fill virtual cluster with a merge of
+        * both
+        */
+       for_each_present_cpu(i) {
+               struct device *cdev = get_cpu_device(i);
+
+               if (!cdev)
+                       return -ENODEV;
+
+               ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
+               if (ret)
+                       goto put_clusters;
+       }
+
+       ret = merge_cluster_tables();
+       if (ret)
+               goto put_clusters;
+
+       /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+       clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
+       clk_little_max = VIRT_FREQ(A7_CLUSTER,
+                                  get_table_max(freq_table[A7_CLUSTER]));
+
+       return 0;
+
+put_clusters:
+       for_each_present_cpu(i) {
+               struct device *cdev = get_cpu_device(i);
+
+               if (!cdev)
+                       return -ENODEV;
+
+               _put_cluster_clk_and_freq_table(cdev, cpumask);
+       }
+
+       atomic_dec(&cluster_usage[cluster]);
+
+       return ret;
 }
 
-static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
-       .name   = "vexpress-spc",
-       .get_transition_latency = ve_spc_get_transition_latency,
-       .init_opp_table = ve_spc_init_opp_table,
+/* Per-CPU initialization */
+static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
+{
+       u32 cur_cluster = cpu_to_cluster(policy->cpu);
+       struct device *cpu_dev;
+       int ret;
+
+       cpu_dev = get_cpu_device(policy->cpu);
+       if (!cpu_dev) {
+               pr_err("%s: failed to get cpu%d device\n", __func__,
+                      policy->cpu);
+               return -ENODEV;
+       }
+
+       if (cur_cluster < MAX_CLUSTERS) {
+               int cpu;
+
+               cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+               for_each_cpu(cpu, policy->cpus)
+                       per_cpu(physical_cluster, cpu) = cur_cluster;
+       } else {
+               /* Assumption: during init, we are always running on A15 */
+               per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+       }
+
+       ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+       if (ret)
+               return ret;
+
+       policy->freq_table = freq_table[cur_cluster];
+       policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
+
+       dev_pm_opp_of_register_em(policy->cpus);
+
+       if (is_bL_switching_enabled())
+               per_cpu(cpu_last_req_freq, policy->cpu) =
+                                               clk_get_cpu_rate(policy->cpu);
+
+       dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
+       return 0;
+}
+
+static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       struct device *cpu_dev;
+       int cur_cluster = cpu_to_cluster(policy->cpu);
+
+       if (cur_cluster < MAX_CLUSTERS) {
+               cpufreq_cooling_unregister(cdev[cur_cluster]);
+               cdev[cur_cluster] = NULL;
+       }
+
+       cpu_dev = get_cpu_device(policy->cpu);
+       if (!cpu_dev) {
+               pr_err("%s: failed to get cpu%d device\n", __func__,
+                      policy->cpu);
+               return -ENODEV;
+       }
+
+       put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
+       return 0;
+}
+
+static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
+{
+       int cur_cluster = cpu_to_cluster(policy->cpu);
+
+       /* Do not register a cpu_cooling device if we are in IKS mode */
+       if (cur_cluster >= MAX_CLUSTERS)
+               return;
+
+       cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
+}
+
+static struct cpufreq_driver ve_spc_cpufreq_driver = {
+       .name                   = "vexpress-spc",
+       .flags                  = CPUFREQ_STICKY |
+                                       CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+                                       CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+       .verify                 = cpufreq_generic_frequency_table_verify,
+       .target_index           = ve_spc_cpufreq_set_target,
+       .get                    = ve_spc_cpufreq_get_rate,
+       .init                   = ve_spc_cpufreq_init,
+       .exit                   = ve_spc_cpufreq_exit,
+       .ready                  = ve_spc_cpufreq_ready,
+       .attr                   = cpufreq_generic_attr,
 };
 
+#ifdef CONFIG_BL_SWITCHER
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+                                       unsigned long action, void *_arg)
+{
+       pr_debug("%s: action: %ld\n", __func__, action);
+
+       switch (action) {
+       case BL_NOTIFY_PRE_ENABLE:
+       case BL_NOTIFY_PRE_DISABLE:
+               cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+               break;
+
+       case BL_NOTIFY_POST_ENABLE:
+               set_switching_enabled(true);
+               cpufreq_register_driver(&ve_spc_cpufreq_driver);
+               break;
+
+       case BL_NOTIFY_POST_DISABLE:
+               set_switching_enabled(false);
+               cpufreq_register_driver(&ve_spc_cpufreq_driver);
+               break;
+
+       default:
+               return NOTIFY_DONE;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+       .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+static int __bLs_register_notifier(void)
+{
+       return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+       return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
 static int ve_spc_cpufreq_probe(struct platform_device *pdev)
 {
-       return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+       int ret, i;
+
+       set_switching_enabled(bL_switcher_get_enabled());
+
+       for (i = 0; i < MAX_CLUSTERS; i++)
+               mutex_init(&cluster_lock[i]);
+
+       ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
+       if (ret) {
+               pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+                       __func__, ve_spc_cpufreq_driver.name, ret);
+       } else {
+               ret = __bLs_register_notifier();
+               if (ret)
+                       cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+               else
+                       pr_info("%s: Registered platform driver: %s\n",
+                               __func__, ve_spc_cpufreq_driver.name);
+       }
+
+       bL_switcher_put_enabled();
+       return ret;
 }
 
 static int ve_spc_cpufreq_remove(struct platform_device *pdev)
 {
-       bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+       bL_switcher_get_enabled();
+       __bLs_unregister_notifier();
+       cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+       bL_switcher_put_enabled();
+       pr_info("%s: Un-registered platform driver: %s\n", __func__,
+               ve_spc_cpufreq_driver.name);
        return 0;
 }
 
@@ -68,4 +599,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
 };
 module_platform_driver(ve_spc_cpufreq_platdrv);
 
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL v2");
index 932390b..b0ce9bc 100644 (file)
@@ -95,6 +95,10 @@ static int __init haltpoll_init(void)
        int ret;
        struct cpuidle_driver *drv = &haltpoll_driver;
 
+       /* Do not load haltpoll if idle= is passed */
+       if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+               return -ENODEV;
+
        cpuidle_poll_state_init(drv);
 
        if (!kvm_para_available() ||
index 84b1ebe..1b299e8 100644 (file)
@@ -56,13 +56,10 @@ static u64 get_snooze_timeout(struct cpuidle_device *dev,
                return default_snooze_timeout;
 
        for (i = index + 1; i < drv->state_count; i++) {
-               struct cpuidle_state *s = &drv->states[i];
-               struct cpuidle_state_usage *su = &dev->states_usage[i];
-
-               if (s->disabled || su->disable)
+               if (dev->states_usage[i].disable)
                        continue;
 
-               return s->target_residency * tb_ticks_per_usec;
+               return drv->states[i].target_residency * tb_ticks_per_usec;
        }
 
        return default_snooze_timeout;
index 0895b98..569dbac 100644 (file)
@@ -75,44 +75,45 @@ int cpuidle_play_dead(void)
 
 static int find_deepest_state(struct cpuidle_driver *drv,
                              struct cpuidle_device *dev,
-                             unsigned int max_latency,
+                             u64 max_latency_ns,
                              unsigned int forbidden_flags,
                              bool s2idle)
 {
-       unsigned int latency_req = 0;
+       u64 latency_req = 0;
        int i, ret = 0;
 
        for (i = 1; i < drv->state_count; i++) {
                struct cpuidle_state *s = &drv->states[i];
-               struct cpuidle_state_usage *su = &dev->states_usage[i];
 
-               if (s->disabled || su->disable || s->exit_latency <= latency_req
-                   || s->exit_latency > max_latency
-                   || (s->flags & forbidden_flags)
-                   || (s2idle && !s->enter_s2idle))
+               if (dev->states_usage[i].disable ||
+                   s->exit_latency_ns <= latency_req ||
+                   s->exit_latency_ns > max_latency_ns ||
+                   (s->flags & forbidden_flags) ||
+                   (s2idle && !s->enter_s2idle))
                        continue;
 
-               latency_req = s->exit_latency;
+               latency_req = s->exit_latency_ns;
                ret = i;
        }
        return ret;
 }
 
 /**
- * cpuidle_use_deepest_state - Set/clear governor override flag.
- * @enable: New value of the flag.
+ * cpuidle_use_deepest_state - Set/unset governor override mode.
+ * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
  *
- * Set/unset the current CPU to use the deepest idle state (override governors
- * going forward if set).
+ * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
+ * state with exit latency within @latency_limit_ns (override governors going
+ * forward), or do not override governors if it is zero.
  */
-void cpuidle_use_deepest_state(bool enable)
+void cpuidle_use_deepest_state(u64 latency_limit_ns)
 {
        struct cpuidle_device *dev;
 
        preempt_disable();
        dev = cpuidle_get_device();
        if (dev)
-               dev->use_deepest_state = enable;
+               dev->forced_idle_latency_limit_ns = latency_limit_ns;
        preempt_enable();
 }
 
@@ -122,9 +123,10 @@ void cpuidle_use_deepest_state(bool enable)
  * @dev: cpuidle device for the given CPU.
  */
 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
-                              struct cpuidle_device *dev)
+                              struct cpuidle_device *dev,
+                              u64 latency_limit_ns)
 {
-       return find_deepest_state(drv, dev, UINT_MAX, 0, false);
+       return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
 }
 
 #ifdef CONFIG_SUSPEND
@@ -180,7 +182,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * that interrupts won't be enabled when it exits and allows the tick to
         * be frozen safely.
         */
-       index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
+       index = find_deepest_state(drv, dev, U64_MAX, 0, true);
        if (index > 0)
                enter_s2idle_proper(drv, dev, index);
 
@@ -209,7 +211,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
         * CPU as a broadcast timer, this call may fail if it is not available.
         */
        if (broadcast && tick_broadcast_enter()) {
-               index = find_deepest_state(drv, dev, target_state->exit_latency,
+               index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
                                           CPUIDLE_FLAG_TIMER_STOP, false);
                if (index < 0) {
                        default_idle_call();
@@ -247,7 +249,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
                local_irq_enable();
 
        if (entered_state >= 0) {
-               s64 diff, delay = drv->states[entered_state].exit_latency;
+               s64 diff, delay = drv->states[entered_state].exit_latency_ns;
                int i;
 
                /*
@@ -255,18 +257,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
                 * This can be moved to within driver enter routine,
                 * but that results in multiple copies of same code.
                 */
-               diff = ktime_us_delta(time_end, time_start);
-               if (diff > INT_MAX)
-                       diff = INT_MAX;
+               diff = ktime_sub(time_end, time_start);
 
-               dev->last_residency = (int)diff;
-               dev->states_usage[entered_state].time += dev->last_residency;
+               dev->last_residency_ns = diff;
+               dev->states_usage[entered_state].time_ns += diff;
                dev->states_usage[entered_state].usage++;
 
-               if (diff < drv->states[entered_state].target_residency) {
+               if (diff < drv->states[entered_state].target_residency_ns) {
                        for (i = entered_state - 1; i >= 0; i--) {
-                               if (drv->states[i].disabled ||
-                                   dev->states_usage[i].disable)
+                               if (dev->states_usage[i].disable)
                                        continue;
 
                                /* Shallower states are enabled, so update. */
@@ -275,22 +274,21 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
                        }
                } else if (diff > delay) {
                        for (i = entered_state + 1; i < drv->state_count; i++) {
-                               if (drv->states[i].disabled ||
-                                   dev->states_usage[i].disable)
+                               if (dev->states_usage[i].disable)
                                        continue;
 
                                /*
                                 * Update if a deeper state would have been a
                                 * better match for the observed idle duration.
                                 */
-                               if (diff - delay >= drv->states[i].target_residency)
+                               if (diff - delay >= drv->states[i].target_residency_ns)
                                        dev->states_usage[entered_state].below++;
 
                                break;
                        }
                }
        } else {
-               dev->last_residency = 0;
+               dev->last_residency_ns = 0;
        }
 
        return entered_state;
@@ -380,10 +378,10 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
 
        limit_ns = TICK_NSEC;
        for (i = 1; i < drv->state_count; i++) {
-               if (drv->states[i].disabled || dev->states_usage[i].disable)
+               if (dev->states_usage[i].disable)
                        continue;
 
-               limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+               limit_ns = (u64)drv->states[i].target_residency_ns;
        }
 
        dev->poll_limit_ns = limit_ns;
@@ -554,7 +552,7 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
 static void __cpuidle_device_init(struct cpuidle_device *dev)
 {
        memset(dev->states_usage, 0, sizeof(dev->states_usage));
-       dev->last_residency = 0;
+       dev->last_residency_ns = 0;
        dev->next_hrtimer = 0;
 }
 
@@ -567,12 +565,16 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
  */
 static int __cpuidle_register_device(struct cpuidle_device *dev)
 {
-       int ret;
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+       int i, ret;
 
        if (!try_module_get(drv->owner))
                return -EINVAL;
 
+       for (i = 0; i < drv->state_count; i++)
+               if (drv->states[i].disabled)
+                       dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+
        per_cpu(cpuidle_devices, dev->cpu) = dev;
        list_add(&dev->device_list, &cpuidle_detected_devices);
 
index 80c1a83..c76423a 100644 (file)
@@ -62,24 +62,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
  * __cpuidle_set_driver - set per CPU driver variables for the given driver.
  * @drv: a valid pointer to a struct cpuidle_driver
  *
- * For each CPU in the driver's cpumask, unset the registered driver per CPU
- * to @drv.
- *
- * Returns 0 on success, -EBUSY if the CPUs have driver(s) already.
+ * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver
+ * different from drv already.
  */
 static inline int __cpuidle_set_driver(struct cpuidle_driver *drv)
 {
        int cpu;
 
        for_each_cpu(cpu, drv->cpumask) {
+               struct cpuidle_driver *old_drv;
 
-               if (__cpuidle_get_cpu_driver(cpu)) {
-                       __cpuidle_unset_driver(drv);
+               old_drv = __cpuidle_get_cpu_driver(cpu);
+               if (old_drv && old_drv != drv)
                        return -EBUSY;
-               }
+       }
 
+       for_each_cpu(cpu, drv->cpumask)
                per_cpu(cpuidle_drivers, cpu) = drv;
-       }
 
        return 0;
 }
@@ -166,16 +165,27 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
        if (!drv->cpumask)
                drv->cpumask = (struct cpumask *)cpu_possible_mask;
 
-       /*
-        * Look for the timer stop flag in the different states, so that we know
-        * if the broadcast timer has to be set up.  The loop is in the reverse
-        * order, because usually one of the deeper states have this flag set.
-        */
-       for (i = drv->state_count - 1; i >= 0 ; i--) {
-               if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+       for (i = 0; i < drv->state_count; i++) {
+               struct cpuidle_state *s = &drv->states[i];
+
+               /*
+                * Look for the timer stop flag in the different states and if
+                * it is found, indicate that the broadcast timer has to be set
+                * up.
+                */
+               if (s->flags & CPUIDLE_FLAG_TIMER_STOP)
                        drv->bctimer = 1;
-                       break;
-               }
+
+               /*
+                * The core will use the target residency and exit latency
+                * values in nanoseconds, but allow drivers to provide them in
+                * microseconds too.
+                */
+               if (s->target_residency > 0)
+                       s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
+
+               if (s->exit_latency > 0)
+                       s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
        }
 }
 
@@ -379,3 +389,31 @@ void cpuidle_driver_unref(void)
 
        spin_unlock(&cpuidle_driver_lock);
 }
+
+/**
+ * cpuidle_driver_state_disabled - Disable or enable an idle state
+ * @drv: cpuidle driver owning the state
+ * @idx: State index
+ * @disable: Whether or not to disable the state
+ */
+void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+                                bool disable)
+{
+       unsigned int cpu;
+
+       mutex_lock(&cpuidle_lock);
+
+       for_each_cpu(cpu, drv->cpumask) {
+               struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+
+               if (!dev)
+                       continue;
+
+               if (disable)
+                       dev->states_usage[idx].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+               else
+                       dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
+       }
+
+       mutex_unlock(&cpuidle_lock);
+}
index e9801f2..e48271e 100644 (file)
@@ -107,11 +107,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
  * cpuidle_governor_latency_req - Compute a latency constraint for CPU
  * @cpu: Target CPU
  */
-int cpuidle_governor_latency_req(unsigned int cpu)
+s64 cpuidle_governor_latency_req(unsigned int cpu)
 {
        int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
        struct device *device = get_cpu_device(cpu);
        int device_req = dev_pm_qos_raw_resume_latency(device);
 
-       return device_req < global_req ? device_req : global_req;
+       if (device_req > global_req)
+               device_req = global_req;
+
+       return (s64)device_req * NSEC_PER_USEC;
 }
index 7a703d2..cb2a96e 100644 (file)
@@ -49,7 +49,7 @@ static int haltpoll_select(struct cpuidle_driver *drv,
                           struct cpuidle_device *dev,
                           bool *stop_tick)
 {
-       int latency_req = cpuidle_governor_latency_req(dev->cpu);
+       s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
 
        if (!drv->state_count || latency_req == 0) {
                *stop_tick = false;
@@ -75,10 +75,9 @@ static int haltpoll_select(struct cpuidle_driver *drv,
        return 0;
 }
 
-static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us)
+static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
 {
        unsigned int val;
-       u64 block_ns = block_us*NSEC_PER_USEC;
 
        /* Grow cpu_halt_poll_us if
         * cpu_halt_poll_us < block_ns < guest_halt_poll_us
@@ -115,7 +114,7 @@ static void haltpoll_reflect(struct cpuidle_device *dev, int index)
        dev->last_state_idx = index;
 
        if (index != 0)
-               adjust_poll_limit(dev, dev->last_residency);
+               adjust_poll_limit(dev, dev->last_residency_ns);
 }
 
 /**
index 428eeb8..8e9058c 100644 (file)
@@ -27,8 +27,8 @@ struct ladder_device_state {
        struct {
                u32 promotion_count;
                u32 demotion_count;
-               u32 promotion_time;
-               u32 demotion_time;
+               u64 promotion_time_ns;
+               u64 demotion_time_ns;
        } threshold;
        struct {
                int promotion_count;
@@ -68,9 +68,10 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 {
        struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
        struct ladder_device_state *last_state;
-       int last_residency, last_idx = dev->last_state_idx;
+       int last_idx = dev->last_state_idx;
        int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
-       int latency_req = cpuidle_governor_latency_req(dev->cpu);
+       s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+       s64 last_residency;
 
        /* Special case when user has set very strict latency requirement */
        if (unlikely(latency_req == 0)) {
@@ -80,14 +81,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        last_state = &ldev->states[last_idx];
 
-       last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
+       last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;
 
        /* consider promotion */
        if (last_idx < drv->state_count - 1 &&
-           !drv->states[last_idx + 1].disabled &&
            !dev->states_usage[last_idx + 1].disable &&
-           last_residency > last_state->threshold.promotion_time &&
-           drv->states[last_idx + 1].exit_latency <= latency_req) {
+           last_residency > last_state->threshold.promotion_time_ns &&
+           drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
                last_state->stats.promotion_count++;
                last_state->stats.demotion_count = 0;
                if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -98,13 +98,12 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        /* consider demotion */
        if (last_idx > first_idx &&
-           (drv->states[last_idx].disabled ||
-           dev->states_usage[last_idx].disable ||
-           drv->states[last_idx].exit_latency > latency_req)) {
+           (dev->states_usage[last_idx].disable ||
+           drv->states[last_idx].exit_latency_ns > latency_req)) {
                int i;
 
                for (i = last_idx - 1; i > first_idx; i--) {
-                       if (drv->states[i].exit_latency <= latency_req)
+                       if (drv->states[i].exit_latency_ns <= latency_req)
                                break;
                }
                ladder_do_selection(dev, ldev, last_idx, i);
@@ -112,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
        }
 
        if (last_idx > first_idx &&
-           last_residency < last_state->threshold.demotion_time) {
+           last_residency < last_state->threshold.demotion_time_ns) {
                last_state->stats.demotion_count++;
                last_state->stats.promotion_count = 0;
                if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
@@ -152,9 +151,9 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
                lstate->threshold.demotion_count = DEMOTION_COUNT;
 
                if (i < drv->state_count - 1)
-                       lstate->threshold.promotion_time = state->exit_latency;
+                       lstate->threshold.promotion_time_ns = state->exit_latency_ns;
                if (i > first_idx)
-                       lstate->threshold.demotion_time = state->exit_latency;
+                       lstate->threshold.demotion_time_ns = state->exit_latency_ns;
        }
 
        return 0;
index e5a5d0c..b0a7ad5 100644 (file)
 #include <linux/sched/stat.h>
 #include <linux/math64.h>
 
-/*
- * Please note when changing the tuning values:
- * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
- * a scaling operation multiplication may overflow on 32 bit platforms.
- * In that case, #define RESOLUTION as ULL to get 64 bit result:
- * #define RESOLUTION 1024ULL
- *
- * The default values do not overflow.
- */
 #define BUCKETS 12
 #define INTERVAL_SHIFT 3
 #define INTERVALS (1UL << INTERVAL_SHIFT)
 #define RESOLUTION 1024
 #define DECAY 8
-#define MAX_INTERESTING 50000
-
+#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
 
 /*
  * Concepts and ideas behind the menu governor
@@ -120,14 +110,14 @@ struct menu_device {
        int             needs_update;
        int             tick_wakeup;
 
-       unsigned int    next_timer_us;
+       u64             next_timer_ns;
        unsigned int    bucket;
        unsigned int    correction_factor[BUCKETS];
        unsigned int    intervals[INTERVALS];
        int             interval_ptr;
 };
 
-static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
+static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
 {
        int bucket = 0;
 
@@ -140,15 +130,15 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters
        if (nr_iowaiters)
                bucket = BUCKETS/2;
 
-       if (duration < 10)
+       if (duration_ns < 10ULL * NSEC_PER_USEC)
                return bucket;
-       if (duration < 100)
+       if (duration_ns < 100ULL * NSEC_PER_USEC)
                return bucket + 1;
-       if (duration < 1000)
+       if (duration_ns < 1000ULL * NSEC_PER_USEC)
                return bucket + 2;
-       if (duration < 10000)
+       if (duration_ns < 10000ULL * NSEC_PER_USEC)
                return bucket + 3;
-       if (duration < 100000)
+       if (duration_ns < 100000ULL * NSEC_PER_USEC)
                return bucket + 4;
        return bucket + 5;
 }
@@ -276,13 +266,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                       bool *stop_tick)
 {
        struct menu_device *data = this_cpu_ptr(&menu_devices);
-       int latency_req = cpuidle_governor_latency_req(dev->cpu);
-       int i;
-       int idx;
-       unsigned int interactivity_req;
+       s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
        unsigned int predicted_us;
+       u64 predicted_ns;
+       u64 interactivity_req;
        unsigned long nr_iowaiters;
        ktime_t delta_next;
+       int i, idx;
 
        if (data->needs_update) {
                menu_update(drv, dev);
@@ -290,15 +280,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        }
 
        /* determine the expected residency time, round up */
-       data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
+       data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
 
        nr_iowaiters = nr_iowait_cpu(dev->cpu);
-       data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+       data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
 
        if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
-           ((data->next_timer_us < drv->states[1].target_residency ||
-             latency_req < drv->states[1].exit_latency) &&
-            !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+           ((data->next_timer_ns < drv->states[1].target_residency_ns ||
+             latency_req < drv->states[1].exit_latency_ns) &&
+            !dev->states_usage[0].disable)) {
                /*
                 * In this case state[0] will be used no matter what, so return
                 * it right away and keep the tick running if state[0] is a
@@ -308,18 +298,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                return 0;
        }
 
-       /*
-        * Force the result of multiplication to be 64 bits even if both
-        * operands are 32 bits.
-        * Make sure to round up for half microseconds.
-        */
-       predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
-                                        data->correction_factor[data->bucket],
-                                        RESOLUTION * DECAY);
-       /*
-        * Use the lowest expected idle interval to pick the idle state.
-        */
-       predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
+       /* Round up the result for half microseconds. */
+       predicted_us = div_u64(data->next_timer_ns *
+                              data->correction_factor[data->bucket] +
+                              (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
+                              RESOLUTION * DECAY * NSEC_PER_USEC);
+       /* Use the lowest expected idle interval to pick the idle state. */
+       predicted_ns = (u64)min(predicted_us,
+                               get_typical_interval(data, predicted_us)) *
+                               NSEC_PER_USEC;
 
        if (tick_nohz_tick_stopped()) {
                /*
@@ -330,14 +317,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                 * the known time till the closest timer event for the idle
                 * state selection.
                 */
-               if (predicted_us < TICK_USEC)
-                       predicted_us = ktime_to_us(delta_next);
+               if (predicted_ns < TICK_NSEC)
+                       predicted_ns = delta_next;
        } else {
                /*
                 * Use the performance multiplier and the user-configurable
                 * latency_req to determine the maximum exit latency.
                 */
-               interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
+               interactivity_req = div64_u64(predicted_ns,
+                                             performance_multiplier(nr_iowaiters));
                if (latency_req > interactivity_req)
                        latency_req = interactivity_req;
        }
@@ -349,27 +337,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        idx = -1;
        for (i = 0; i < drv->state_count; i++) {
                struct cpuidle_state *s = &drv->states[i];
-               struct cpuidle_state_usage *su = &dev->states_usage[i];
 
-               if (s->disabled || su->disable)
+               if (dev->states_usage[i].disable)
                        continue;
 
                if (idx == -1)
                        idx = i; /* first enabled state */
 
-               if (s->target_residency > predicted_us) {
+               if (s->target_residency_ns > predicted_ns) {
                        /*
                         * Use a physical idle state, not busy polling, unless
                         * a timer is going to trigger soon enough.
                         */
                        if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
-                           s->exit_latency <= latency_req &&
-                           s->target_residency <= data->next_timer_us) {
-                               predicted_us = s->target_residency;
+                           s->exit_latency_ns <= latency_req &&
+                           s->target_residency_ns <= data->next_timer_ns) {
+                               predicted_ns = s->target_residency_ns;
                                idx = i;
                                break;
                        }
-                       if (predicted_us < TICK_USEC)
+                       if (predicted_ns < TICK_NSEC)
                                break;
 
                        if (!tick_nohz_tick_stopped()) {
@@ -379,7 +366,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                                 * tick in that case and let the governor run
                                 * again in the next iteration of the loop.
                                 */
-                               predicted_us = drv->states[idx].target_residency;
+                               predicted_ns = drv->states[idx].target_residency_ns;
                                break;
                        }
 
@@ -389,13 +376,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                         * closest timer event, select this one to avoid getting
                         * stuck in the shallow one for too long.
                         */
-                       if (drv->states[idx].target_residency < TICK_USEC &&
-                           s->target_residency <= ktime_to_us(delta_next))
+                       if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+                           s->target_residency_ns <= delta_next)
                                idx = i;
 
                        return idx;
                }
-               if (s->exit_latency > latency_req)
+               if (s->exit_latency_ns > latency_req)
                        break;
 
                idx = i;
@@ -409,12 +396,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * expected idle duration is shorter than the tick period length.
         */
        if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
-            predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
-               unsigned int delta_next_us = ktime_to_us(delta_next);
-
+            predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
                *stop_tick = false;
 
-               if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
+               if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
                        /*
                         * The tick is not going to be stopped and the target
                         * residency of the state to be returned is not within
@@ -422,12 +407,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                         * tick, so try to correct that.
                         */
                        for (i = idx - 1; i >= 0; i--) {
-                               if (drv->states[i].disabled ||
-                                   dev->states_usage[i].disable)
+                               if (dev->states_usage[i].disable)
                                        continue;
 
                                idx = i;
-                               if (drv->states[i].target_residency <= delta_next_us)
+                               if (drv->states[i].target_residency_ns <= delta_next)
                                        break;
                        }
                }
@@ -463,7 +447,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        struct menu_device *data = this_cpu_ptr(&menu_devices);
        int last_idx = dev->last_state_idx;
        struct cpuidle_state *target = &drv->states[last_idx];
-       unsigned int measured_us;
+       u64 measured_ns;
        unsigned int new_factor;
 
        /*
@@ -481,7 +465,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * assume the state was never reached and the exit latency is 0.
         */
 
-       if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
+       if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
                /*
                 * The nohz code said that there wouldn't be any events within
                 * the tick boundary (if the tick was stopped), but the idle
@@ -491,7 +475,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                 * have been idle long (but not forever) to help the idle
                 * duration predictor do a better job next time.
                 */
-               measured_us = 9 * MAX_INTERESTING / 10;
+               measured_ns = 9 * MAX_INTERESTING / 10;
        } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
                   dev->poll_time_limit) {
                /*
@@ -501,28 +485,29 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                 * the CPU might have been woken up from idle by the next timer.
                 * Assume that to be the case.
                 */
-               measured_us = data->next_timer_us;
+               measured_ns = data->next_timer_ns;
        } else {
                /* measured value */
-               measured_us = dev->last_residency;
+               measured_ns = dev->last_residency_ns;
 
                /* Deduct exit latency */
-               if (measured_us > 2 * target->exit_latency)
-                       measured_us -= target->exit_latency;
+               if (measured_ns > 2 * target->exit_latency_ns)
+                       measured_ns -= target->exit_latency_ns;
                else
-                       measured_us /= 2;
+                       measured_ns /= 2;
        }
 
        /* Make sure our coefficients do not exceed unity */
-       if (measured_us > data->next_timer_us)
-               measured_us = data->next_timer_us;
+       if (measured_ns > data->next_timer_ns)
+               measured_ns = data->next_timer_ns;
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
        new_factor -= new_factor / DECAY;
 
-       if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
-               new_factor += RESOLUTION * measured_us / data->next_timer_us;
+       if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
+               new_factor += div64_u64(RESOLUTION * measured_ns,
+                                       data->next_timer_ns);
        else
                /*
                 * we were idle so long that we count it as a perfect
@@ -542,7 +527,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        data->correction_factor[data->bucket] = new_factor;
 
        /* update the repeating-pattern data */
-       data->intervals[data->interval_ptr++] = measured_us;
+       data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
        if (data->interval_ptr >= INTERVALS)
                data->interval_ptr = 0;
 }
index b5a0e49..de7e706 100644 (file)
@@ -104,7 +104,7 @@ struct teo_cpu {
        u64 sleep_length_ns;
        struct teo_idle_state states[CPUIDLE_STATE_MAX];
        int interval_idx;
-       unsigned int intervals[INTERVALS];
+       u64 intervals[INTERVALS];
 };
 
 static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -117,9 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
 static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
        struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
-       unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
        int i, idx_hit = -1, idx_timer = -1;
-       unsigned int measured_us;
+       u64 measured_ns;
 
        if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
                /*
@@ -127,23 +126,28 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                 * enough to the closest timer event expected at the idle state
                 * selection time to be discarded.
                 */
-               measured_us = UINT_MAX;
+               measured_ns = U64_MAX;
        } else {
-               unsigned int lat;
+               u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
 
-               lat = drv->states[dev->last_state_idx].exit_latency;
-
-               measured_us = ktime_to_us(cpu_data->time_span_ns);
+               /*
+                * The computations below are to determine whether or not the
+                * (saved) time till the next timer event and the measured idle
+                * duration fall into the same "bin", so use last_residency_ns
+                * for that instead of time_span_ns which includes the cpuidle
+                * overhead.
+                */
+               measured_ns = dev->last_residency_ns;
                /*
                 * The delay between the wakeup and the first instruction
                 * executed by the CPU is not likely to be worst-case every
                 * time, so take 1/2 of the exit latency as a very rough
                 * approximation of the average of it.
                 */
-               if (measured_us >= lat)
-                       measured_us -= lat / 2;
+               if (measured_ns >= lat_ns)
+                       measured_ns -= lat_ns / 2;
                else
-                       measured_us /= 2;
+                       measured_ns /= 2;
        }
 
        /*
@@ -155,9 +159,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 
                cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
 
-               if (drv->states[i].target_residency <= sleep_length_us) {
+               if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) {
                        idx_timer = i;
-                       if (drv->states[i].target_residency <= measured_us)
+                       if (drv->states[i].target_residency_ns <= measured_ns)
                                idx_hit = i;
                }
        }
@@ -193,30 +197,35 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * Save idle duration values corresponding to non-timer wakeups for
         * pattern detection.
         */
-       cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+       cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
        if (cpu_data->interval_idx > INTERVALS)
                cpu_data->interval_idx = 0;
 }
 
+static bool teo_time_ok(u64 interval_ns)
+{
+       return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC;
+}
+
 /**
  * teo_find_shallower_state - Find shallower idle state matching given duration.
  * @drv: cpuidle driver containing state data.
  * @dev: Target CPU.
  * @state_idx: Index of the capping idle state.
- * @duration_us: Idle duration value to match.
+ * @duration_ns: Idle duration value to match.
  */
 static int teo_find_shallower_state(struct cpuidle_driver *drv,
                                    struct cpuidle_device *dev, int state_idx,
-                                   unsigned int duration_us)
+                                   u64 duration_ns)
 {
        int i;
 
        for (i = state_idx - 1; i >= 0; i--) {
-               if (drv->states[i].disabled || dev->states_usage[i].disable)
+               if (dev->states_usage[i].disable)
                        continue;
 
                state_idx = i;
-               if (drv->states[i].target_residency <= duration_us)
+               if (drv->states[i].target_residency_ns <= duration_ns)
                        break;
        }
        return state_idx;
@@ -232,9 +241,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                      bool *stop_tick)
 {
        struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
-       int latency_req = cpuidle_governor_latency_req(dev->cpu);
-       unsigned int duration_us, count;
-       int max_early_idx, constraint_idx, idx, i;
+       s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+       u64 duration_ns;
+       unsigned int hits, misses, early_hits;
+       int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
        ktime_t delta_tick;
 
        if (dev->last_state_idx >= 0) {
@@ -244,50 +254,92 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 
        cpu_data->time_span_ns = local_clock();
 
-       cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
-       duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+       duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+       cpu_data->sleep_length_ns = duration_ns;
 
-       count = 0;
+       hits = 0;
+       misses = 0;
+       early_hits = 0;
        max_early_idx = -1;
+       prev_max_early_idx = -1;
        constraint_idx = drv->state_count;
        idx = -1;
 
        for (i = 0; i < drv->state_count; i++) {
                struct cpuidle_state *s = &drv->states[i];
-               struct cpuidle_state_usage *su = &dev->states_usage[i];
 
-               if (s->disabled || su->disable) {
+               if (dev->states_usage[i].disable) {
+                       /*
+                        * Ignore disabled states with target residencies beyond
+                        * the anticipated idle duration.
+                        */
+                       if (s->target_residency_ns > duration_ns)
+                               continue;
+
+                       /*
+                        * This state is disabled, so the range of idle duration
+                        * values corresponding to it is covered by the current
+                        * candidate state, but still the "hits" and "misses"
+                        * metrics of the disabled state need to be used to
+                        * decide whether or not the state covering the range in
+                        * question is good enough.
+                        */
+                       hits = cpu_data->states[i].hits;
+                       misses = cpu_data->states[i].misses;
+
+                       if (early_hits >= cpu_data->states[i].early_hits ||
+                           idx < 0)
+                               continue;
+
                        /*
-                        * If the "early hits" metric of a disabled state is
-                        * greater than the current maximum, it should be taken
-                        * into account, because it would be a mistake to select
-                        * a deeper state with lower "early hits" metric.  The
-                        * index cannot be changed to point to it, however, so
-                        * just increase the max count alone and let the index
-                        * still point to a shallower idle state.
+                        * If the current candidate state has been the one with
+                        * the maximum "early hits" metric so far, the "early
+                        * hits" metric of the disabled state replaces the
+                        * current "early hits" count to avoid selecting a
+                        * deeper state with lower "early hits" metric.
                         */
-                       if (max_early_idx >= 0 &&
-                           count < cpu_data->states[i].early_hits)
-                               count = cpu_data->states[i].early_hits;
+                       if (max_early_idx == idx) {
+                               early_hits = cpu_data->states[i].early_hits;
+                               continue;
+                       }
+
+                       /*
+                        * The current candidate state is closer to the disabled
+                        * one than the current maximum "early hits" state, so
+                        * replace the latter with it, but in case the maximum
+                        * "early hits" state index has not been set so far,
+                        * check if the current candidate state is not too
+                        * shallow for that role.
+                        */
+                       if (teo_time_ok(drv->states[idx].target_residency_ns)) {
+                               prev_max_early_idx = max_early_idx;
+                               early_hits = cpu_data->states[i].early_hits;
+                               max_early_idx = idx;
+                       }
 
                        continue;
                }
 
-               if (idx < 0)
+               if (idx < 0) {
                        idx = i; /* first enabled state */
+                       hits = cpu_data->states[i].hits;
+                       misses = cpu_data->states[i].misses;
+               }
 
-               if (s->target_residency > duration_us)
+               if (s->target_residency_ns > duration_ns)
                        break;
 
-               if (s->exit_latency > latency_req && constraint_idx > i)
+               if (s->exit_latency_ns > latency_req && constraint_idx > i)
                        constraint_idx = i;
 
                idx = i;
+               hits = cpu_data->states[i].hits;
+               misses = cpu_data->states[i].misses;
 
-               if (count < cpu_data->states[i].early_hits &&
-                   !(tick_nohz_tick_stopped() &&
-                     drv->states[i].target_residency < TICK_USEC)) {
-                       count = cpu_data->states[i].early_hits;
+               if (early_hits < cpu_data->states[i].early_hits &&
+                   teo_time_ok(drv->states[i].target_residency_ns)) {
+                       prev_max_early_idx = max_early_idx;
+                       early_hits = cpu_data->states[i].early_hits;
                        max_early_idx = i;
                }
        }
@@ -300,10 +352,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * "early hits" metric, but if that cannot be determined, just use the
         * state selected so far.
         */
-       if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
-           max_early_idx >= 0) {
-               idx = max_early_idx;
-               duration_us = drv->states[idx].target_residency;
+       if (hits <= misses) {
+               /*
+                * The current candidate state is not suitable, so take the one
+                * whose "early hits" metric is the maximum for the range of
+                * shallower states.
+                */
+               if (idx == max_early_idx)
+                       max_early_idx = prev_max_early_idx;
+
+               if (max_early_idx >= 0) {
+                       idx = max_early_idx;
+                       duration_ns = drv->states[idx].target_residency_ns;
+               }
        }
 
        /*
@@ -316,18 +377,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        if (idx < 0) {
                idx = 0; /* No states enabled. Must use 0. */
        } else if (idx > 0) {
+               unsigned int count = 0;
                u64 sum = 0;
 
-               count = 0;
-
                /*
                 * Count and sum the most recent idle duration values less than
                 * the current expected idle duration value.
                 */
                for (i = 0; i < INTERVALS; i++) {
-                       unsigned int val = cpu_data->intervals[i];
+                       u64 val = cpu_data->intervals[i];
 
-                       if (val >= duration_us)
+                       if (val >= duration_ns)
                                continue;
 
                        count++;
@@ -339,17 +399,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                 * values are in the interesting range.
                 */
                if (count > INTERVALS / 2) {
-                       unsigned int avg_us = div64_u64(sum, count);
+                       u64 avg_ns = div64_u64(sum, count);
 
                        /*
                         * Avoid spending too much time in an idle state that
                         * would be too shallow.
                         */
-                       if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
-                               duration_us = avg_us;
-                               if (drv->states[idx].target_residency > avg_us)
+                       if (teo_time_ok(avg_ns)) {
+                               duration_ns = avg_ns;
+                               if (drv->states[idx].target_residency_ns > avg_ns)
                                        idx = teo_find_shallower_state(drv, dev,
-                                                                      idx, avg_us);
+                                                                      idx, avg_ns);
                        }
                }
        }
@@ -359,9 +419,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * expected idle duration is shorter than the tick period length.
         */
        if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
-           duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
-               unsigned int delta_tick_us = ktime_to_us(delta_tick);
-
+           duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
                *stop_tick = false;
 
                /*
@@ -370,8 +428,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                 * till the closest timer including the tick, try to correct
                 * that.
                 */
-               if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
-                       idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+               if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick)
+                       idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
        }
 
        return idx;
@@ -415,7 +473,7 @@ static int teo_enable_device(struct cpuidle_driver *drv,
        memset(cpu_data, 0, sizeof(*cpu_data));
 
        for (i = 0; i < INTERVALS; i++)
-               cpu_data->intervals[i] = UINT_MAX;
+               cpu_data->intervals[i] = U64_MAX;
 
        return 0;
 }
index c8fa5f4..9f1ace9 100644 (file)
@@ -49,6 +49,8 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
        snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
        state->exit_latency = 0;
        state->target_residency = 0;
+       state->exit_latency_ns = 0;
+       state->target_residency_ns = 0;
        state->power_usage = -1;
        state->enter = poll_idle;
        state->disabled = false;
index 2bb2683..38ef770 100644 (file)
@@ -255,25 +255,6 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
        return sprintf(buf, "%u\n", state->_name);\
 }
 
-#define define_store_state_ull_function(_name) \
-static ssize_t store_state_##_name(struct cpuidle_state *state, \
-                                  struct cpuidle_state_usage *state_usage, \
-                                  const char *buf, size_t size)        \
-{ \
-       unsigned long long value; \
-       int err; \
-       if (!capable(CAP_SYS_ADMIN)) \
-               return -EPERM; \
-       err = kstrtoull(buf, 0, &value); \
-       if (err) \
-               return err; \
-       if (value) \
-               state_usage->_name = 1; \
-       else \
-               state_usage->_name = 0; \
-       return size; \
-}
-
 #define define_show_state_ull_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
                                  struct cpuidle_state_usage *state_usage, \
@@ -292,18 +273,60 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
        return sprintf(buf, "%s\n", state->_name);\
 }
 
-define_show_state_function(exit_latency)
-define_show_state_function(target_residency)
+#define define_show_state_time_function(_name) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+                                 struct cpuidle_state_usage *state_usage, \
+                                 char *buf) \
+{ \
+       return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+}
+
+define_show_state_time_function(exit_latency)
+define_show_state_time_function(target_residency)
 define_show_state_function(power_usage)
 define_show_state_ull_function(usage)
-define_show_state_ull_function(time)
 define_show_state_str_function(name)
 define_show_state_str_function(desc)
-define_show_state_ull_function(disable)
-define_store_state_ull_function(disable)
 define_show_state_ull_function(above)
 define_show_state_ull_function(below)
 
+static ssize_t show_state_time(struct cpuidle_state *state,
+                              struct cpuidle_state_usage *state_usage,
+                              char *buf)
+{
+       return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+}
+
+static ssize_t show_state_disable(struct cpuidle_state *state,
+                                 struct cpuidle_state_usage *state_usage,
+                                 char *buf)
+{
+       return sprintf(buf, "%llu\n",
+                      state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
+}
+
+static ssize_t store_state_disable(struct cpuidle_state *state,
+                                  struct cpuidle_state_usage *state_usage,
+                                  const char *buf, size_t size)
+{
+       unsigned int value;
+       int err;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       err = kstrtouint(buf, 0, &value);
+       if (err)
+               return err;
+
+       if (value)
+               state_usage->disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+       else
+               state_usage->disable &= ~CPUIDLE_STATE_DISABLED_BY_USER;
+
+       return size;
+}
+
 define_one_state_ro(name, show_state_name);
 define_one_state_ro(desc, show_state_desc);
 define_one_state_ro(latency, show_state_exit_latency);
index 774d991..aca7523 100644 (file)
@@ -1297,7 +1297,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
        tp->write_seq = snd_isn;
        tp->snd_nxt = snd_isn;
        tp->snd_una = snd_isn;
-       inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
+       inet_sk(sk)->inet_id = prandom_u32();
        assign_rxopt(sk, opt);
 
        if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
index 0891ab8..98bc5a4 100644 (file)
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return peekmsg(sk, msg, len, nonblock, flags);
 
        if (sk_can_busy_loop(sk) &&
-           skb_queue_empty(&sk->sk_receive_queue) &&
+           skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            sk->sk_state == TCP_ESTABLISHED)
                sk_busy_loop(sk, nonblock);
 
index 446490c..f840e61 100644 (file)
@@ -160,6 +160,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
        int lev, prev_lev, ret = 0;
        unsigned long cur_time;
 
+       lockdep_assert_held(&devfreq->lock);
        cur_time = jiffies;
 
        /* Immediately exit if previous_freq is not initialized yet. */
@@ -409,6 +410,9 @@ static void devfreq_monitor(struct work_struct *work)
  */
 void devfreq_monitor_start(struct devfreq *devfreq)
 {
+       if (devfreq->governor->interrupt_driven)
+               return;
+
        INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
        if (devfreq->profile->polling_ms)
                queue_delayed_work(devfreq_wq, &devfreq->work,
@@ -426,6 +430,9 @@ EXPORT_SYMBOL(devfreq_monitor_start);
  */
 void devfreq_monitor_stop(struct devfreq *devfreq)
 {
+       if (devfreq->governor->interrupt_driven)
+               return;
+
        cancel_delayed_work_sync(&devfreq->work);
 }
 EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -453,6 +460,10 @@ void devfreq_monitor_suspend(struct devfreq *devfreq)
        devfreq_update_status(devfreq, devfreq->previous_freq);
        devfreq->stop_polling = true;
        mutex_unlock(&devfreq->lock);
+
+       if (devfreq->governor->interrupt_driven)
+               return;
+
        cancel_delayed_work_sync(&devfreq->work);
 }
 EXPORT_SYMBOL(devfreq_monitor_suspend);
@@ -473,11 +484,15 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
        if (!devfreq->stop_polling)
                goto out;
 
+       if (devfreq->governor->interrupt_driven)
+               goto out_update;
+
        if (!delayed_work_pending(&devfreq->work) &&
                        devfreq->profile->polling_ms)
                queue_delayed_work(devfreq_wq, &devfreq->work,
                        msecs_to_jiffies(devfreq->profile->polling_ms));
 
+out_update:
        devfreq->last_stat_updated = jiffies;
        devfreq->stop_polling = false;
 
@@ -509,6 +524,9 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
        if (devfreq->stop_polling)
                goto out;
 
+       if (devfreq->governor->interrupt_driven)
+               goto out;
+
        /* if new delay is zero, stop polling */
        if (!new_delay) {
                mutex_unlock(&devfreq->lock);
@@ -625,7 +643,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq = find_device_devfreq(dev);
        mutex_unlock(&devfreq_list_lock);
        if (!IS_ERR(devfreq)) {
-               dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+               dev_err(dev, "%s: devfreq device already exists!\n",
                        __func__);
                err = -EINVAL;
                goto err_out;
@@ -1195,7 +1213,7 @@ static ssize_t available_governors_show(struct device *d,
         * The devfreq with immutable governor (e.g., passive) shows
         * only own governor.
         */
-       if (df->governor->immutable) {
+       if (df->governor && df->governor->immutable) {
                count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
                                  "%s ", df->governor_name);
        /*
@@ -1397,12 +1415,17 @@ static ssize_t trans_stat_show(struct device *dev,
        int i, j;
        unsigned int max_state = devfreq->profile->max_state;
 
-       if (!devfreq->stop_polling &&
-                       devfreq_update_status(devfreq, devfreq->previous_freq))
-               return 0;
        if (max_state == 0)
                return sprintf(buf, "Not Supported.\n");
 
+       mutex_lock(&devfreq->lock);
+       if (!devfreq->stop_polling &&
+                       devfreq_update_status(devfreq, devfreq->previous_freq)) {
+               mutex_unlock(&devfreq->lock);
+               return 0;
+       }
+       mutex_unlock(&devfreq->lock);
+
        len = sprintf(buf, "     From  :   To\n");
        len += sprintf(buf + len, "           :");
        for (i = 0; i < max_state; i++)
index 87b4205..85c7a77 100644 (file)
@@ -673,7 +673,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
        for (i = 0; i < info->num_events; i++) {
                edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
                if (IS_ERR(edev[i])) {
-                       ret = PTR_ERR(edev[i]);
                        dev_err(&pdev->dev,
                                "failed to add devfreq-event device\n");
                        return PTR_ERR(edev[i]);
index bbe5ff9..dc7533c 100644 (file)
@@ -31,6 +31,8 @@
  * @name:              Governor's name
  * @immutable:         Immutable flag for governor. If the value is 1,
  *                     this govenror is never changeable to other governor.
+ * @interrupt_driven:  Devfreq core won't schedule polling work for this
+ *                     governor if value is set to 1.
  * @get_target_freq:   Returns desired operating frequency for the device.
  *                     Basically, get_target_freq will run
  *                     devfreq_dev_profile.get_dev_status() to get the
@@ -49,6 +51,7 @@ struct devfreq_governor {
 
        const char name[DEVFREQ_NAME_LEN];
        const unsigned int immutable;
+       const unsigned int interrupt_driven;
        int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
        int (*event_handler)(struct devfreq *devfreq,
                                unsigned int event, void *data);
index a6ba75f..0b65f89 100644 (file)
 #include <linux/devfreq.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
 #include <linux/reset.h>
+#include <linux/workqueue.h>
 
 #include "governor.h"
 
@@ -33,6 +35,8 @@
 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN             BIT(30)
 #define ACTMON_DEV_CTRL_ENB                                    BIT(31)
 
+#define ACTMON_DEV_CTRL_STOP                                   0x00000000
+
 #define ACTMON_DEV_UPPER_WMARK                                 0x4
 #define ACTMON_DEV_LOWER_WMARK                                 0x8
 #define ACTMON_DEV_INIT_AVG                                    0xc
@@ -68,6 +72,8 @@
 
 #define KHZ                                                    1000
 
+#define KHZ_MAX                                                (ULONG_MAX / KHZ)
+
 /* Assume that the bus is saturated if the utilization is 25% */
 #define BUS_SATURATION_RATIO                                   25
 
@@ -90,9 +96,10 @@ struct tegra_devfreq_device_config {
        unsigned int    boost_down_threshold;
 
        /*
-        * Threshold of activity (cycles) below which the CPU frequency isn't
-        * to be taken into account. This is to avoid increasing the EMC
-        * frequency when the CPU is very busy but not accessing the bus often.
+        * Threshold of activity (cycles translated to kHz) below which the
+        * CPU frequency isn't to be taken into account. This is to avoid
+        * increasing the EMC frequency when the CPU is very busy but not
+        * accessing the bus often.
         */
        u32             avg_dependency_threshold;
 };
@@ -102,7 +109,7 @@ enum tegra_actmon_device {
        MCCPU,
 };
 
-static struct tegra_devfreq_device_config actmon_device_configs[] = {
+static const struct tegra_devfreq_device_config actmon_device_configs[] = {
        {
                /* MCALL: All memory accesses (including from the CPUs) */
                .offset = 0x1c0,
@@ -117,10 +124,10 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
                .offset = 0x200,
                .irq_mask = 1 << 25,
                .boost_up_coeff = 800,
-               .boost_down_coeff = 90,
+               .boost_down_coeff = 40,
                .boost_up_threshold = 27,
                .boost_down_threshold = 10,
-               .avg_dependency_threshold = 50000,
+               .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
        },
 };
 
@@ -156,11 +163,16 @@ struct tegra_devfreq {
        struct clk              *emc_clock;
        unsigned long           max_freq;
        unsigned long           cur_freq;
-       struct notifier_block   rate_change_nb;
+       struct notifier_block   clk_rate_change_nb;
+
+       struct delayed_work     cpufreq_update_work;
+       struct notifier_block   cpu_rate_change_nb;
 
        struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
 
-       int irq;
+       unsigned int            irq;
+
+       bool                    started;
 };
 
 struct tegra_actmon_emc_ratio {
@@ -168,8 +180,8 @@ struct tegra_actmon_emc_ratio {
        unsigned long emc_freq;
 };
 
-static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
-       { 1400000, ULONG_MAX },
+static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+       { 1400000,    KHZ_MAX },
        { 1200000,    750000 },
        { 1100000,    600000 },
        { 1000000,    500000 },
@@ -199,18 +211,26 @@ static void device_writel(struct tegra_devfreq_device *dev, u32 val,
        writel_relaxed(val, dev->regs + offset);
 }
 
-static unsigned long do_percent(unsigned long val, unsigned int pct)
+static unsigned long do_percent(unsigned long long val, unsigned int pct)
 {
-       return val * pct / 100;
+       val = val * pct;
+       do_div(val, 100);
+
+       /*
+        * High freq + high boosting percent + large polling interval are
+        * resulting in integer overflow when watermarks are calculated.
+        */
+       return min_t(u64, val, U32_MAX);
 }
 
 static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
                                           struct tegra_devfreq_device *dev)
 {
-       u32 avg = dev->avg_count;
        u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
-       u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+       u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
+       u32 avg;
 
+       avg = min(dev->avg_count, U32_MAX - band);
        device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
 
        avg = max(dev->avg_count, band);
@@ -220,7 +240,7 @@ static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
                                       struct tegra_devfreq_device *dev)
 {
-       u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+       u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
 
        device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
                      ACTMON_DEV_UPPER_WMARK);
@@ -229,12 +249,6 @@ static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
                      ACTMON_DEV_LOWER_WMARK);
 }
 
-static void actmon_write_barrier(struct tegra_devfreq *tegra)
-{
-       /* ensure the update has reached the ACTMON */
-       readl(tegra->regs + ACTMON_GLB_STATUS);
-}
-
 static void actmon_isr_device(struct tegra_devfreq *tegra,
                              struct tegra_devfreq_device *dev)
 {
@@ -256,10 +270,10 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
 
                dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
 
-               if (dev->boost_freq >= tegra->max_freq)
+               if (dev->boost_freq >= tegra->max_freq) {
+                       dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
                        dev->boost_freq = tegra->max_freq;
-               else
-                       dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+               }
        } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
                /*
                 * new_boost = old_boost * down_coef
@@ -270,31 +284,22 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
 
                dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
 
-               if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
-                       dev->boost_freq = 0;
-               else
-                       dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
-       }
-
-       if (dev->config->avg_dependency_threshold) {
-               if (dev->avg_count >= dev->config->avg_dependency_threshold)
-                       dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
-               else if (dev->boost_freq == 0)
+               if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
                        dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+                       dev->boost_freq = 0;
+               }
        }
 
        device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
 
        device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
-
-       actmon_write_barrier(tegra);
 }
 
 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
                                            unsigned long cpu_freq)
 {
        unsigned int i;
-       struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+       const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
 
        for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
                if (cpu_freq >= ratio->cpu_freq) {
@@ -308,25 +313,37 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
        return 0;
 }
 
+static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
+                                              struct tegra_devfreq_device *dev)
+{
+       unsigned int avg_sustain_coef;
+       unsigned long target_freq;
+
+       target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
+       avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+       target_freq = do_percent(target_freq, avg_sustain_coef);
+
+       return target_freq;
+}
+
 static void actmon_update_target(struct tegra_devfreq *tegra,
                                 struct tegra_devfreq_device *dev)
 {
        unsigned long cpu_freq = 0;
        unsigned long static_cpu_emc_freq = 0;
-       unsigned int avg_sustain_coef;
 
-       if (dev->config->avg_dependency_threshold) {
-               cpu_freq = cpufreq_get(0);
-               static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
-       }
+       dev->target_freq = actmon_device_target_freq(tegra, dev);
 
-       dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
-       avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
-       dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
-       dev->target_freq += dev->boost_freq;
+       if (dev->config->avg_dependency_threshold &&
+           dev->config->avg_dependency_threshold <= dev->target_freq) {
+               cpu_freq = cpufreq_quick_get(0);
+               static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
 
-       if (dev->avg_count >= dev->config->avg_dependency_threshold)
+               dev->target_freq += dev->boost_freq;
                dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+       } else {
+               dev->target_freq += dev->boost_freq;
+       }
 }
 
 static irqreturn_t actmon_thread_isr(int irq, void *data)
@@ -354,8 +371,8 @@ static irqreturn_t actmon_thread_isr(int irq, void *data)
        return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
-static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
-                                      unsigned long action, void *ptr)
+static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
+                                     unsigned long action, void *ptr)
 {
        struct clk_notifier_data *data = ptr;
        struct tegra_devfreq *tegra;
@@ -365,7 +382,7 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
        if (action != POST_RATE_CHANGE)
                return NOTIFY_OK;
 
-       tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
+       tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
 
        tegra->cur_freq = data->new_rate / KHZ;
 
@@ -375,7 +392,79 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
                tegra_devfreq_update_wmark(tegra, dev);
        }
 
-       actmon_write_barrier(tegra);
+       return NOTIFY_OK;
+}
+
+static void tegra_actmon_delayed_update(struct work_struct *work)
+{
+       struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
+                                                  cpufreq_update_work.work);
+
+       mutex_lock(&tegra->devfreq->lock);
+       update_devfreq(tegra->devfreq);
+       mutex_unlock(&tegra->devfreq->lock);
+}
+
+static unsigned long
+tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
+                                 unsigned int cpu_freq)
+{
+       struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
+       unsigned long static_cpu_emc_freq, dev_freq;
+
+       dev_freq = actmon_device_target_freq(tegra, actmon_dev);
+
+       /* check whether CPU's freq is taken into account at all */
+       if (dev_freq < actmon_dev->config->avg_dependency_threshold)
+               return 0;
+
+       static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+
+       if (dev_freq >= static_cpu_emc_freq)
+               return 0;
+
+       return static_cpu_emc_freq;
+}
+
+static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
+                                     unsigned long action, void *ptr)
+{
+       struct cpufreq_freqs *freqs = ptr;
+       struct tegra_devfreq *tegra;
+       unsigned long old, new, delay;
+
+       if (action != CPUFREQ_POSTCHANGE)
+               return NOTIFY_OK;
+
+       tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
+
+       /*
+        * Quickly check whether CPU frequency should be taken into account
+        * at all, without blocking CPUFreq's core.
+        */
+       if (mutex_trylock(&tegra->devfreq->lock)) {
+               old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
+               new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
+               mutex_unlock(&tegra->devfreq->lock);
+
+               /*
+                * If CPU's frequency shouldn't be taken into account at
+                * the moment, then there is no need to update the devfreq's
+                * state because ISR will re-check CPU's frequency on the
+                * next interrupt.
+                */
+               if (old == new)
+                       return NOTIFY_OK;
+       }
+
+       /*
+        * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
+        * to allow asynchronous notifications. This means we can't block
+        * here for too long, otherwise CPUFreq's core will complain with a
+        * warning splat.
+        */
+       delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
+       schedule_delayed_work(&tegra->cpufreq_update_work, delay);
 
        return NOTIFY_OK;
 }
@@ -385,9 +474,12 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
 {
        u32 val = 0;
 
+       /* reset boosting on governor's restart */
+       dev->boost_freq = 0;
+
        dev->target_freq = tegra->cur_freq;
 
-       dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+       dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
        device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
 
        tegra_devfreq_update_avg_wmark(tegra, dev);
@@ -405,45 +497,116 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
                << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
        val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
        val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
-       val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
        val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
        val |= ACTMON_DEV_CTRL_ENB;
 
        device_writel(dev, val, ACTMON_DEV_CTRL);
 }
 
-static void tegra_actmon_start(struct tegra_devfreq *tegra)
+static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
 {
+       struct tegra_devfreq_device *dev = tegra->devices;
        unsigned int i;
 
-       disable_irq(tegra->irq);
+       for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
+               device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
+               device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
+                             ACTMON_DEV_INTR_STATUS);
+       }
+}
 
-       actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+static int tegra_actmon_resume(struct tegra_devfreq *tegra)
+{
+       unsigned int i;
+       int err;
+
+       if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+               return 0;
+
+       actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
                      ACTMON_GLB_PERIOD_CTRL);
 
+       /*
+        * CLK notifications are needed in order to reconfigure the upper
+        * consecutive watermark in accordance to the actual clock rate
+        * to avoid unnecessary upper interrupts.
+        */
+       err = clk_notifier_register(tegra->emc_clock,
+                                   &tegra->clk_rate_change_nb);
+       if (err) {
+               dev_err(tegra->devfreq->dev.parent,
+                       "Failed to register rate change notifier\n");
+               return err;
+       }
+
+       tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
        for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
                tegra_actmon_configure_device(tegra, &tegra->devices[i]);
 
-       actmon_write_barrier(tegra);
+       /*
+        * We are estimating CPU's memory bandwidth requirement based on
+        * amount of memory accesses and system's load, judging by CPU's
+        * frequency. We also don't want to receive events about CPU's
+        * frequency transaction when governor is stopped, hence notifier
+        * is registered dynamically.
+        */
+       err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+       if (err) {
+               dev_err(tegra->devfreq->dev.parent,
+                       "Failed to register rate change notifier: %d\n", err);
+               goto err_stop;
+       }
 
        enable_irq(tegra->irq);
+
+       return 0;
+
+err_stop:
+       tegra_actmon_stop_devices(tegra);
+
+       clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+
+       return err;
 }
 
-static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+static int tegra_actmon_start(struct tegra_devfreq *tegra)
 {
-       unsigned int i;
+       int ret = 0;
 
-       disable_irq(tegra->irq);
+       if (!tegra->started) {
+               tegra->started = true;
 
-       for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
-               device_writel(&tegra->devices[i], 0x00000000, ACTMON_DEV_CTRL);
-               device_writel(&tegra->devices[i], ACTMON_INTR_STATUS_CLEAR,
-                             ACTMON_DEV_INTR_STATUS);
+               ret = tegra_actmon_resume(tegra);
+               if (ret)
+                       tegra->started = false;
        }
 
-       actmon_write_barrier(tegra);
+       return ret;
+}
 
-       enable_irq(tegra->irq);
+static void tegra_actmon_pause(struct tegra_devfreq *tegra)
+{
+       if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+               return;
+
+       disable_irq(tegra->irq);
+
+       cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
+                                   CPUFREQ_TRANSITION_NOTIFIER);
+
+       cancel_delayed_work_sync(&tegra->cpufreq_update_work);
+
+       tegra_actmon_stop_devices(tegra);
+
+       clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+}
+
+static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+{
+       tegra_actmon_pause(tegra);
+       tegra->started = false;
 }
 
 static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
@@ -463,7 +626,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
        rate = dev_pm_opp_get_freq(opp);
        dev_pm_opp_put(opp);
 
-       err = clk_set_min_rate(tegra->emc_clock, rate);
+       err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
        if (err)
                return err;
 
@@ -492,7 +655,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
        stat->private_data = tegra;
 
        /* The below are to be used by the other governors */
-       stat->current_frequency = cur_freq * KHZ;
+       stat->current_frequency = cur_freq;
 
        actmon_dev = &tegra->devices[MCALL];
 
@@ -503,7 +666,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
        stat->busy_time *= 100 / BUS_SATURATION_RATIO;
 
        /* Number of cycles in a sampling period */
-       stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
+       stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
 
        stat->busy_time = min(stat->busy_time, stat->total_time);
 
@@ -511,7 +674,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
 }
 
 static struct devfreq_dev_profile tegra_devfreq_profile = {
-       .polling_ms     = 0,
+       .polling_ms     = ACTMON_SAMPLING_PERIOD,
        .target         = tegra_devfreq_target,
        .get_dev_status = tegra_devfreq_get_dev_status,
 };
@@ -542,7 +705,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
                target_freq = max(target_freq, dev->target_freq);
        }
 
-       *freq = target_freq * KHZ;
+       *freq = target_freq;
 
        return 0;
 }
@@ -551,11 +714,19 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
                                        unsigned int event, void *data)
 {
        struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
+       unsigned int *new_delay = data;
+       int ret = 0;
+
+       /*
+        * Couple devfreq-device with the governor early because it is
+        * needed at the moment of governor's start (used by ISR).
+        */
+       tegra->devfreq = devfreq;
 
        switch (event) {
        case DEVFREQ_GOV_START:
                devfreq_monitor_start(devfreq);
-               tegra_actmon_start(tegra);
+               ret = tegra_actmon_start(tegra);
                break;
 
        case DEVFREQ_GOV_STOP:
@@ -563,6 +734,21 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
                devfreq_monitor_stop(devfreq);
                break;
 
+       case DEVFREQ_GOV_INTERVAL:
+               /*
+                * ACTMON hardware supports up to 256 milliseconds for the
+                * sampling period.
+                */
+               if (*new_delay > 256) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               tegra_actmon_pause(tegra);
+               devfreq_interval_update(devfreq, new_delay);
+               ret = tegra_actmon_resume(tegra);
+               break;
+
        case DEVFREQ_GOV_SUSPEND:
                tegra_actmon_stop(tegra);
                devfreq_monitor_suspend(devfreq);
@@ -570,11 +756,11 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
 
        case DEVFREQ_GOV_RESUME:
                devfreq_monitor_resume(devfreq);
-               tegra_actmon_start(tegra);
+               ret = tegra_actmon_start(tegra);
                break;
        }
 
-       return 0;
+       return ret;
 }
 
 static struct devfreq_governor tegra_devfreq_governor = {
@@ -582,14 +768,16 @@ static struct devfreq_governor tegra_devfreq_governor = {
        .get_target_freq = tegra_governor_get_target,
        .event_handler = tegra_governor_event_handler,
        .immutable = true,
+       .interrupt_driven = true,
 };
 
 static int tegra_devfreq_probe(struct platform_device *pdev)
 {
-       struct tegra_devfreq *tegra;
        struct tegra_devfreq_device *dev;
+       struct tegra_devfreq *tegra;
+       struct devfreq *devfreq;
        unsigned int i;
-       unsigned long rate;
+       long rate;
        int err;
 
        tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
@@ -618,12 +806,22 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
                return PTR_ERR(tegra->emc_clock);
        }
 
-       tegra->irq = platform_get_irq(pdev, 0);
-       if (tegra->irq < 0) {
-               err = tegra->irq;
+       err = platform_get_irq(pdev, 0);
+       if (err < 0) {
                dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
                return err;
        }
+       tegra->irq = err;
+
+       irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
+
+       err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
+                                       actmon_thread_isr, IRQF_ONESHOT,
+                                       "tegra-devfreq", tegra);
+       if (err) {
+               dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
+               return err;
+       }
 
        reset_control_assert(tegra->reset);
 
@@ -636,8 +834,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
 
        reset_control_deassert(tegra->reset);
 
-       tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
-       tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+       rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+       if (rate < 0) {
+               dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
+               return rate;
+       }
+
+       tegra->max_freq = rate / KHZ;
 
        for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
                dev = tegra->devices + i;
@@ -648,7 +851,14 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
        for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
                rate = clk_round_rate(tegra->emc_clock, rate);
 
-               err = dev_pm_opp_add(&pdev->dev, rate, 0);
+               if (rate < 0) {
+                       dev_err(&pdev->dev,
+                               "Failed to round clock rate: %ld\n", rate);
+                       err = rate;
+                       goto remove_opps;
+               }
+
+               err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
                if (err) {
                        dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
                        goto remove_opps;
@@ -657,49 +867,33 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, tegra);
 
-       tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
-       err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "Failed to register rate change notifier\n");
-               goto remove_opps;
-       }
+       tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
+       tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
+
+       INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
+                         tegra_actmon_delayed_update);
 
        err = devfreq_add_governor(&tegra_devfreq_governor);
        if (err) {
                dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
-               goto unreg_notifier;
+               goto remove_opps;
        }
 
        tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
-       tegra->devfreq = devfreq_add_device(&pdev->dev,
-                                           &tegra_devfreq_profile,
-                                           "tegra_actmon",
-                                           NULL);
-       if (IS_ERR(tegra->devfreq)) {
-               err = PTR_ERR(tegra->devfreq);
-               goto remove_governor;
-       }
+       tegra_devfreq_profile.initial_freq /= KHZ;
 
-       err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
-                                       actmon_thread_isr, IRQF_ONESHOT,
-                                       "tegra-devfreq", tegra);
-       if (err) {
-               dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
-               goto remove_devfreq;
+       devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
+                                    "tegra_actmon", NULL);
+       if (IS_ERR(devfreq)) {
+               err = PTR_ERR(devfreq);
+               goto remove_governor;
        }
 
        return 0;
 
-remove_devfreq:
-       devfreq_remove_device(tegra->devfreq);
-
 remove_governor:
        devfreq_remove_governor(&tegra_devfreq_governor);
 
-unreg_notifier:
-       clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
-
 remove_opps:
        dev_pm_opp_remove_all_dynamic(&pdev->dev);
 
@@ -716,7 +910,6 @@ static int tegra_devfreq_remove(struct platform_device *pdev)
        devfreq_remove_device(tegra->devfreq);
        devfreq_remove_governor(&tegra_devfreq_governor);
 
-       clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
        dev_pm_opp_remove_all_dynamic(&pdev->dev);
 
        reset_control_reset(tegra->reset);
index 9ba74ab..c27e206 100644 (file)
@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
        if (!sdma->script_number)
                sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
 
+       if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
+                                 / sizeof(s32)) {
+               dev_err(sdma->dev,
+                       "SDMA script number %d not match with firmware.\n",
+                       sdma->script_number);
+               return;
+       }
+
        for (i = 0; i < sdma->script_number; i++)
                if (addr_arr[i] > 0)
                        saddr_arr[i] = addr_arr[i];
index 8e90a40..ef73f65 100644 (file)
@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
 
        /* remove all transactions, including active transaction */
        spin_lock_irqsave(&bchan->vc.lock, flag);
+       /*
+        * If we have transactions queued, then some might be committed to the
+        * hardware in the desc fifo.  The only way to reset the desc fifo is
+        * to do a hardware reset (either by pipe or the entire block).
+        * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+        * pipe.  If the pipe is left disabled (default state after pipe reset)
+        * and is accessed by a connected hardware engine, a fatal error in
+        * the BAM will occur.  There is a small window where this could happen
+        * with bam_chan_init_hw(), but it is assumed that the caller has
+        * stopped activity on any attached hardware engine.  Make sure to do
+        * this first so that the BAM hardware doesn't cause memory corruption
+        * by accessing freed resources.
+        */
+       if (!list_empty(&bchan->desc_list)) {
+               async_desc = list_first_entry(&bchan->desc_list,
+                                             struct bam_async_desc, desc_node);
+               bam_chan_init_hw(bchan, async_desc->dir);
+       }
+
        list_for_each_entry_safe(async_desc, tmp,
                                 &bchan->desc_list, desc_node) {
                list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
index 525dc73..8546ad0 100644 (file)
 #define SPRD_DMA_SRC_TRSF_STEP_OFFSET  0
 #define SPRD_DMA_TRSF_STEP_MASK                GENMASK(15, 0)
 
+/* SPRD DMA_SRC_BLK_STEP register definition */
+#define SPRD_DMA_LLIST_HIGH_MASK       GENMASK(31, 28)
+#define SPRD_DMA_LLIST_HIGH_SHIFT      28
+
 /* define DMA channel mode & trigger mode mask */
 #define SPRD_DMA_CHN_MODE_MASK         GENMASK(7, 0)
 #define SPRD_DMA_TRG_MODE_MASK         GENMASK(7, 0)
@@ -208,6 +212,7 @@ struct sprd_dma_dev {
        struct sprd_dma_chn     channels[0];
 };
 
+static void sprd_dma_free_desc(struct virt_dma_desc *vd);
 static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
 static struct of_dma_filter_info sprd_dma_info = {
        .filter_fn = sprd_dma_filter_fn,
@@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
 static void sprd_dma_free_chan_resources(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct virt_dma_desc *cur_vd = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&schan->vc.lock, flags);
+       if (schan->cur_desc)
+               cur_vd = &schan->cur_desc->vd;
+
        sprd_dma_stop(schan);
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 
+       if (cur_vd)
+               sprd_dma_free_desc(cur_vd);
+
        vchan_free_chan_resources(&schan->vc);
        pm_runtime_put(chan->device->dev);
 }
@@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
        u32 int_mode = flags & SPRD_DMA_INT_MASK;
        int src_datawidth, dst_datawidth, src_step, dst_step;
        u32 temp, fix_mode = 0, fix_en = 0;
+       phys_addr_t llist_ptr;
 
        if (dir == DMA_MEM_TO_DEV) {
                src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
@@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
                 * Set the link-list pointer point to next link-list
                 * configuration's physical address.
                 */
-               hw->llist_ptr = schan->linklist.phy_addr + temp;
+               llist_ptr = schan->linklist.phy_addr + temp;
+               hw->llist_ptr = lower_32_bits(llist_ptr);
+               hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
+                       SPRD_DMA_LLIST_HIGH_MASK;
        } else {
                hw->llist_ptr = 0;
+               hw->src_blk_step = 0;
        }
 
        hw->frg_step = 0;
-       hw->src_blk_step = 0;
        hw->des_blk_step = 0;
        return 0;
 }
@@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan)
 static int sprd_dma_terminate_all(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct virt_dma_desc *cur_vd = NULL;
        unsigned long flags;
        LIST_HEAD(head);
 
        spin_lock_irqsave(&schan->vc.lock, flags);
+       if (schan->cur_desc)
+               cur_vd = &schan->cur_desc->vd;
+
        sprd_dma_stop(schan);
 
        vchan_get_all_descriptors(&schan->vc, &head);
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 
+       if (cur_vd)
+               sprd_dma_free_desc(cur_vd);
+
        vchan_dma_desc_free_list(&schan->vc, &head);
        return 0;
 }
index 5f8adf5..6e12685 100644 (file)
@@ -40,6 +40,7 @@
 #define ADMA_CH_CONFIG_MAX_BURST_SIZE                   16
 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val)             ((val) & 0xf)
 #define ADMA_CH_CONFIG_MAX_BUFS                                8
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
 
 #define ADMA_CH_FIFO_CTRL                              0x2c
 #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val)         (((val) & 0xf) << 8)
@@ -77,6 +78,7 @@ struct tegra_adma;
  * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
  * @ch_req_rx_shift: Register offset for AHUB receive channel select.
  * @ch_base_offset: Register offset of DMA channel registers.
+ * @has_outstanding_reqs: If DMA channel can have outstanding requests.
  * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
  * @ch_req_mask: Mask for Tx or Rx channel select.
  * @ch_req_max: Maximum number of Tx or Rx channels available.
@@ -95,6 +97,7 @@ struct tegra_adma_chip_data {
        unsigned int ch_req_max;
        unsigned int ch_reg_size;
        unsigned int nr_channels;
+       bool has_outstanding_reqs;
 };
 
 /*
@@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
                         ADMA_CH_CTRL_FLOWCTRL_EN;
        ch_regs->config |= cdata->adma_get_burst_config(burst_size);
        ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+       if (cdata->has_outstanding_reqs)
+               ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
        ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
        ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
 
@@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
        .ch_req_tx_shift        = 28,
        .ch_req_rx_shift        = 24,
        .ch_base_offset         = 0,
+       .has_outstanding_reqs   = false,
        .ch_fifo_ctrl           = TEGRA210_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0xf,
        .ch_req_max             = 10,
@@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
        .ch_req_tx_shift        = 27,
        .ch_req_rx_shift        = 22,
        .ch_base_offset         = 0x10000,
+       .has_outstanding_reqs   = true,
        .ch_fifo_ctrl           = TEGRA186_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0x1f,
        .ch_req_max             = 20,
index 2f946f5..8c2f7eb 100644 (file)
@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
        enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
 {
        struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct dma_async_tx_descriptor *txd = NULL;
+       struct cppi41_dd *cdd = c->cdd;
        struct cppi41_desc *d;
        struct scatterlist *sg;
        unsigned int i;
+       int error;
+
+       error = pm_runtime_get(cdd->ddev.dev);
+       if (error < 0) {
+               pm_runtime_put_noidle(cdd->ddev.dev);
+
+               return NULL;
+       }
+
+       if (cdd->is_suspended)
+               goto err_out_not_ready;
 
        d = c->desc;
        for_each_sg(sgl, sg, sg_len, i) {
@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
                d++;
        }
 
-       return &c->txd;
+       txd = &c->txd;
+
+err_out_not_ready:
+       pm_runtime_mark_last_busy(cdd->ddev.dev);
+       pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+       return txd;
 }
 
 static void cppi41_compute_td_desc(struct cppi41_desc *d)
index e7dc3c4..5d56f1e 100644 (file)
@@ -68,6 +68,9 @@
 #define XILINX_DMA_DMACR_CIRC_EN               BIT(1)
 #define XILINX_DMA_DMACR_RUNSTOP               BIT(0)
 #define XILINX_DMA_DMACR_FSYNCSRC_MASK         GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK            GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK      GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK           GENMASK(11, 8)
 
 #define XILINX_DMA_REG_DMASR                   0x0004
 #define XILINX_DMA_DMASR_EOL_LATE_ERR          BIT(15)
@@ -1354,7 +1357,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
                                           node);
                hw = &segment->hw;
 
-               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+                            xilinx_prep_dma_addr_t(hw->buf_addr));
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -2117,8 +2121,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
        chan->config.gen_lock = cfg->gen_lock;
        chan->config.master = cfg->master;
 
+       dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
        if (cfg->gen_lock && chan->genlock) {
                dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+               dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
                dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
        }
 
@@ -2134,11 +2140,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
        chan->config.delay = cfg->delay;
 
        if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+               dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
                dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
                chan->config.coalesc = cfg->coalesc;
        }
 
        if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+               dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
                dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
                chan->config.delay = cfg->delay;
        }
index d413a0b..0bb6285 100644 (file)
@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes)
        if (!ghes_pvt)
                return;
 
+       if (atomic_dec_return(&ghes_init))
+               return;
+
        mci = ghes_pvt->mci;
+       ghes_pvt = NULL;
        edac_mc_del_mc(mci->pdev);
        edac_mc_free(mci);
 }
index 178ee81..b248870 100644 (file)
@@ -182,6 +182,7 @@ config RESET_ATTACK_MITIGATION
 
 config EFI_RCI2_TABLE
        bool "EFI Runtime Configuration Interface Table Version 2 Support"
+       depends on X86 || COMPILE_TEST
        help
          Displays the content of the Runtime Configuration Interface
          Table version 2 on Dell EMC PowerEdge systems as a binary
index 69f00f7..e98bbf8 100644 (file)
@@ -554,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
                                              sizeof(*seed) + size);
                        if (seed != NULL) {
                                pr_notice("seeding entropy pool\n");
-                               add_device_randomness(seed->bits, seed->size);
+                               add_bootloader_randomness(seed->bits, seed->size);
                                early_memunmap(seed, sizeof(*seed) + size);
                        } else {
                                pr_err("Could not map UEFI random seed!\n");
index 0460c75..ee0661d 100644 (file)
@@ -52,6 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB)     += arm-stub.o fdt.o string.o random.o \
 
 lib-$(CONFIG_ARM)              += arm32-stub.o
 lib-$(CONFIG_ARM64)            += arm64-stub.o
+CFLAGS_arm32-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 CFLAGS_arm64-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 
 #
index e8f7aef..41213bf 100644 (file)
@@ -195,6 +195,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
                                 unsigned long dram_base,
                                 efi_loaded_image_t *image)
 {
+       unsigned long kernel_base;
        efi_status_t status;
 
        /*
@@ -204,9 +205,18 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
         * loaded. These assumptions are made by the decompressor,
         * before any memory map is available.
         */
-       dram_base = round_up(dram_base, SZ_128M);
+       kernel_base = round_up(dram_base, SZ_128M);
 
-       status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+       /*
+        * Note that some platforms (notably, the Raspberry Pi 2) put
+        * spin-tables and other pieces of firmware at the base of RAM,
+        * abusing the fact that the window of TEXT_OFFSET bytes at the
+        * base of the kernel image is only partially used at the moment.
+        * (Up to 5 pages are used for the swapper page tables)
+        */
+       kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
+
+       status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
                                     reserve_size);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
@@ -220,7 +230,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
        *image_size = image->image_size;
        status = efi_relocate_kernel(sys_table, image_addr, *image_size,
                                     *image_size,
-                                    dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+                                    kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Failed to relocate kernel.\n");
                efi_free(sys_table, *reserve_size, *reserve_addr);
index 3caae7f..35dbc27 100644 (file)
@@ -260,11 +260,11 @@ fail:
 }
 
 /*
- * Allocate at the lowest possible address.
+ * Allocate at the lowest possible address that is not below 'min'.
  */
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
-                          unsigned long size, unsigned long align,
-                          unsigned long *addr)
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+                                unsigned long size, unsigned long align,
+                                unsigned long *addr, unsigned long min)
 {
        unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
@@ -311,13 +311,8 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                start = desc->phys_addr;
                end = start + desc->num_pages * EFI_PAGE_SIZE;
 
-               /*
-                * Don't allocate at 0x0. It will confuse code that
-                * checks pointers against NULL. Skip the first 8
-                * bytes so we start at a nice even number.
-                */
-               if (start == 0x0)
-                       start += 8;
+               if (start < min)
+                       start = min;
 
                start = round_up(start, align);
                if ((start + size) > end)
@@ -698,7 +693,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
-                                unsigned long alignment)
+                                unsigned long alignment,
+                                unsigned long min_addr)
 {
        unsigned long cur_image_addr;
        unsigned long new_addr = 0;
@@ -731,8 +727,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
         * possible.
         */
        if (status != EFI_SUCCESS) {
-               status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
-                                      &new_addr);
+               status = efi_low_alloc_above(sys_table_arg, alloc_size,
+                                            alignment, &new_addr, min_addr);
        }
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
index 877745c..7baf48c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/efi.h>
+#include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
@@ -717,6 +718,13 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
 
 static int efi_test_open(struct inode *inode, struct file *file)
 {
+       int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+       if (ret)
+               return ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
        /*
         * nothing special to do here
         * We do accept multiple open files at the same time as we
index ebd7977..31f9f0e 100644 (file)
@@ -88,6 +88,7 @@ int __init efi_tpm_eventlog_init(void)
 
        if (tbl_size < 0) {
                pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+               ret = -EINVAL;
                goto out_calc;
        }
 
index 2f1e9da..3302125 100644 (file)
@@ -362,9 +362,8 @@ static void mrfld_irq_handler(struct irq_desc *desc)
        chained_irq_exit(irqchip, desc);
 }
 
-static int mrfld_irq_init_hw(struct gpio_chip *chip)
+static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
 {
-       struct mrfld_gpio *priv = gpiochip_get_data(chip);
        void __iomem *reg;
        unsigned int base;
 
@@ -376,8 +375,6 @@ static int mrfld_irq_init_hw(struct gpio_chip *chip)
                reg = gpio_reg(&priv->chip, base, GFER);
                writel(0, reg);
        }
-
-       return 0;
 }
 
 static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -400,7 +397,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 {
        const struct mrfld_gpio_pinrange *range;
        const char *pinctrl_dev_name;
-       struct gpio_irq_chip *girq;
        struct mrfld_gpio *priv;
        u32 gpio_base, irq_base;
        void __iomem *base;
@@ -448,21 +444,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 
        raw_spin_lock_init(&priv->lock);
 
-       girq = &priv->chip.irq;
-       girq->chip = &mrfld_irqchip;
-       girq->init_hw = mrfld_irq_init_hw;
-       girq->parent_handler = mrfld_irq_handler;
-       girq->num_parents = 1;
-       girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
-                                    sizeof(*girq->parents),
-                                    GFP_KERNEL);
-       if (!girq->parents)
-               return -ENOMEM;
-       girq->parents[0] = pdev->irq;
-       girq->first = irq_base;
-       girq->default_type = IRQ_TYPE_NONE;
-       girq->handler = handle_bad_irq;
-
        pci_set_drvdata(pdev, priv);
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
@@ -484,6 +465,18 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
                }
        }
 
+       retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
+                                     handle_bad_irq, IRQ_TYPE_NONE);
+       if (retval) {
+               dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
+               return retval;
+       }
+
+       mrfld_irq_init_hw(priv);
+
+       gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
+                                    mrfld_irq_handler);
+
        return 0;
 }
 
index 61e38e4..85b0515 100644 (file)
@@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
        return 0;
 
 error_free:
-       while (i--) {
+       for (i = 0; i < last_entry; ++i) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+               amdgpu_bo_unref(&bo);
+       }
+       for (i = first_userptr; i < num_entries; ++i) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
 
                amdgpu_bo_unref(&bo);
index 2e53fee..82823d9 100644 (file)
@@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
        list_for_each_entry(lobj, validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
-               bool binding_userptr = false;
                struct mm_struct *usermm;
 
                usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
@@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
                                                     lobj->user_pages);
-                       binding_userptr = true;
                }
 
                if (p->evictable == lobj)
@@ -563,10 +561,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                if (r)
                        return r;
 
-               if (binding_userptr) {
-                       kvfree(lobj->user_pages);
-                       lobj->user_pages = NULL;
-               }
+               kvfree(lobj->user_pages);
+               lobj->user_pages = NULL;
        }
        return 0;
 }
index 6614d8a..2cdaf3b 100644 (file)
@@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
                        continue;
                }
 
-               for (i = 0; i < num_entities; i++)
+               for (i = 0; i < num_entities; i++) {
+                       mutex_lock(&ctx->adev->lock_reset);
                        drm_sched_entity_fini(&ctx->entities[0][i].entity);
+                       mutex_unlock(&ctx->adev->lock_reset);
+               }
        }
 }
 
index 5a1939d..7a6c837 100644 (file)
@@ -2885,6 +2885,13 @@ fence_driver_init:
                        DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
        }
 
+       /*
+        * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
+        * Otherwise the mgpu fan boost feature will be skipped due to the
+        * gpu instance is counted less.
+        */
+       amdgpu_register_gpu_instance(adev);
+
        /* enable clockgating, etc. after ib tests, etc. since some blocks require
         * explicit gating rather than handling it automatically.
         */
index 2a00a36..e1c1572 100644 (file)
@@ -1016,6 +1016,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
        {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
        {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
 
        /* Renoir */
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
index 6ee4021..6d19183 100644 (file)
@@ -289,6 +289,7 @@ struct amdgpu_gfx {
        uint32_t                        mec2_feature_version;
        bool                            mec_fw_write_wait;
        bool                            me_fw_write_wait;
+       bool                            cp_fw_write_wait;
        struct amdgpu_ring              gfx_ring[AMDGPU_MAX_GFX_RINGS];
        unsigned                        num_gfx_rings;
        struct amdgpu_ring              compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
index 9d76e09..96b2a31 100644 (file)
@@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
        struct amdgpu_job *job;
-       int r;
+       int r = 0;
 
        job = to_amdgpu_job(sched_job);
        finished = &job->base.s_fence->finished;
@@ -243,6 +243,8 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        job->fence = dma_fence_get(fence);
 
        amdgpu_job_free_resources(job);
+
+       fence = r ? ERR_PTR(r) : fence;
        return fence;
 }
 
index d55f5ba..a042ef4 100644 (file)
@@ -190,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
                pm_runtime_put_autosuspend(dev->dev);
        }
 
-       amdgpu_register_gpu_instance(adev);
 out:
        if (r) {
                /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
index 1fead0e..7289e1b 100644 (file)
@@ -453,7 +453,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                .interruptible = (bp->type != ttm_bo_type_kernel),
                .no_wait_gpu = false,
                .resv = bp->resv,
-               .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+               .flags = bp->type != ttm_bo_type_kernel ?
+                       TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
        };
        struct amdgpu_bo *bo;
        unsigned long page_align, size = bp->size;
index 4d71537..a460900 100644 (file)
@@ -950,21 +950,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
                             struct amdgpu_firmware_info *ucode)
 {
        struct amdgpu_device *adev = psp->adev;
-       const struct sdma_firmware_header_v1_0 *sdma_hdr =
-               (const struct sdma_firmware_header_v1_0 *)
-               adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
-       const struct gfx_firmware_header_v1_0 *ce_hdr =
-               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-       const struct gfx_firmware_header_v1_0 *pfp_hdr =
-               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-       const struct gfx_firmware_header_v1_0 *me_hdr =
-               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-       const struct gfx_firmware_header_v1_0 *mec_hdr =
-               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-       const struct rlc_firmware_header_v2_0 *rlc_hdr =
-               (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
-       const struct smc_firmware_header_v1_0 *smc_hdr =
-               (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+       struct common_firmware_header *hdr;
 
        switch (ucode->ucode_id) {
        case AMDGPU_UCODE_ID_SDMA0:
@@ -975,25 +961,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
        case AMDGPU_UCODE_ID_SDMA5:
        case AMDGPU_UCODE_ID_SDMA6:
        case AMDGPU_UCODE_ID_SDMA7:
-               amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
+               hdr = (struct common_firmware_header *)
+                       adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+               amdgpu_ucode_print_sdma_hdr(hdr);
                break;
        case AMDGPU_UCODE_ID_CP_CE:
-               amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+               hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+               amdgpu_ucode_print_gfx_hdr(hdr);
                break;
        case AMDGPU_UCODE_ID_CP_PFP:
-               amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+               hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+               amdgpu_ucode_print_gfx_hdr(hdr);
                break;
        case AMDGPU_UCODE_ID_CP_ME:
-               amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+               hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+               amdgpu_ucode_print_gfx_hdr(hdr);
                break;
        case AMDGPU_UCODE_ID_CP_MEC1:
-               amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+               hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+               amdgpu_ucode_print_gfx_hdr(hdr);
                break;
        case AMDGPU_UCODE_ID_RLC_G:
-               amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
+               hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+               amdgpu_ucode_print_rlc_hdr(hdr);
                break;
        case AMDGPU_UCODE_ID_SMC:
-               amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
+               hdr = (struct common_firmware_header *)adev->pm.fw->data;
+               amdgpu_ucode_print_smc_hdr(hdr);
                break;
        default:
                break;
index b70b3c4..65044b1 100644 (file)
@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                             struct amdgpu_bo *bo,
                              struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 1024;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        ib = &job->ibs[0];
 
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        /* stitch together an VCE create msg */
        ib->length_dw = 0;
@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        ib->ptr[ib->length_dw++] = 0x00000014; /* len */
        ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x00000001;
 
        for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
        /* skip vce ring1/2 ib test for now, since it's not reliable */
        if (ring != &ring->adev->vce.ring[0])
                return 0;
 
-       r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
index 30ea54d..e802f7d 100644 (file)
@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                             struct amdgpu_bo *bo,
                              struct dma_fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                               bool direct, struct dma_fence **fence);
index 7a6beb2..3199e4a 100644 (file)
@@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 }
 
 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-                             struct dma_fence **fence)
+                                        struct amdgpu_bo *bo,
+                                        struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x0000000b;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
@@ -621,13 +622,14 @@ err:
 }
 
 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                               struct dma_fence **fence)
+                                         struct amdgpu_bo *bo,
+                                         struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001;
        ib->ptr[ib->length_dw++] = handle;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x0000000b;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
@@ -675,13 +677,20 @@ err:
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 957811b..53090ea 100644 (file)
@@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
        kfree(adev->gfx.rlc.register_list_format);
 }
 
+static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
+{
+       adev->gfx.cp_fw_write_wait = false;
+
+       switch (adev->asic_type) {
+       case CHIP_NAVI10:
+       case CHIP_NAVI12:
+       case CHIP_NAVI14:
+               if ((adev->gfx.me_fw_version >= 0x00000046) &&
+                   (adev->gfx.me_feature_version >= 27) &&
+                   (adev->gfx.pfp_fw_version >= 0x00000068) &&
+                   (adev->gfx.pfp_feature_version >= 27) &&
+                   (adev->gfx.mec_fw_version >= 0x0000005b) &&
+                   (adev->gfx.mec_feature_version >= 27))
+                       adev->gfx.cp_fw_write_wait = true;
+               break;
+       default:
+               break;
+       }
+
+       if (adev->gfx.cp_fw_write_wait == false)
+               DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
+                             GRBM requires 1-cycle delay in cp firmware\n");
+}
+
+
 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
 {
        const struct rlc_firmware_header_v2_1 *rlc_hdr;
@@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
                }
        }
 
+       gfx_v10_0_check_fw_write_wait(adev);
 out:
        if (err) {
                dev_err(adev->dev,
@@ -4765,6 +4792,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
        gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
 }
 
+static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+                                                  uint32_t reg0, uint32_t reg1,
+                                                  uint32_t ref, uint32_t mask)
+{
+       int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+       struct amdgpu_device *adev = ring->adev;
+       bool fw_version_ok = false;
+
+       fw_version_ok = adev->gfx.cp_fw_write_wait;
+
+       if (fw_version_ok)
+               gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
+                                      ref, mask, 0x20);
+       else
+               amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+                                                          ref, mask);
+}
+
 static void
 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
                                      uint32_t me, uint32_t pipe,
@@ -5155,6 +5200,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
        .emit_tmz = gfx_v10_0_ring_emit_tmz,
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5188,6 +5234,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5218,6 +5265,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
        .emit_rreg = gfx_v10_0_ring_emit_rreg,
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
 };
 
 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
index dcadc73..dfca83a 100644 (file)
@@ -973,6 +973,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
        adev->gfx.me_fw_write_wait = false;
        adev->gfx.mec_fw_write_wait = false;
 
+       if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+           (adev->gfx.mec_feature_version < 46) ||
+           (adev->gfx.pfp_fw_version < 0x000000b7) ||
+           (adev->gfx.pfp_feature_version < 46))
+               DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
+                             GRBM requires 1-cycle delay in cp firmware\n");
+
        switch (adev->asic_type) {
        case CHIP_VEGA10:
                if ((adev->gfx.me_fw_version >= 0x0000009c) &&
@@ -1044,6 +1051,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_RLC_SMU_HS;
                break;
+       case CHIP_RENOIR:
+               if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+                               AMD_PG_SUPPORT_CP |
+                               AMD_PG_SUPPORT_RLC_SMU_HS;
+               break;
        default:
                break;
        }
index 8b789f7..db10640 100644 (file)
@@ -151,6 +151,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
 
        tmp = mmGCVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
 
        tmp = mmGCVM_L2_CNTL4_DEFAULT;
index 241a4e5..5c7d5f7 100644 (file)
@@ -309,6 +309,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 
        job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
        job->vm_needs_flush = true;
+       job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
@@ -343,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
        amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
                              upper_32_bits(pd_addr));
 
-       amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
-
-       /* wait for the invalidate to complete */
-       amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
-                                 1 << vmid, 1 << vmid);
+       amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+                                           hub->vm_inv_eng0_ack + eng,
+                                           req, 1 << vmid);
 
        return pd_addr;
 }
index 3542c20..b39bea6 100644 (file)
@@ -137,6 +137,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
 
        tmp = mmMMVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
 
        tmp = mmMMVM_L2_CNTL4_DEFAULT;
index 0cf7ef4..9ed178f 100644 (file)
@@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
                            hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
 
        tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
                            hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
 
index 78452cf..4554e72 100644 (file)
@@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
index f6e8168..8493bfb 100644 (file)
@@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
 }
 
+static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+                                                  uint32_t reg0, uint32_t reg1,
+                                                  uint32_t ref, uint32_t mask)
+{
+       amdgpu_ring_emit_wreg(ring, reg0, ref);
+       /* wait for a cycle to reset vm_inv_eng*_ack */
+       amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
+       amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+}
+
 static int sdma_v5_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
                6 + /* sdma_v5_0_ring_emit_pipeline_sync */
                /* sdma_v5_0_ring_emit_vm_flush */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
-               SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+               SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
                10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
        .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
        .emit_ib = sdma_v5_0_ring_emit_ib,
@@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
        .pad_ib = sdma_v5_0_ring_pad_ib,
        .emit_wreg = sdma_v5_0_ring_emit_wreg,
        .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
        .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
        .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
        .preempt_ib = sdma_v5_0_ring_preempt_ib,
index f8ab80c..4ccfcdf 100644 (file)
@@ -1186,11 +1186,6 @@ static int soc15_common_early_init(void *handle)
                                 AMD_PG_SUPPORT_VCN |
                                 AMD_PG_SUPPORT_VCN_DPG;
                adev->external_rev_id = adev->rev_id + 0x91;
-
-               if (adev->pm.pp_feature & PP_GFXOFF_MASK)
-                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
-                               AMD_PG_SUPPORT_CP |
-                               AMD_PG_SUPPORT_RLC_SMU_HS;
                break;
        default:
                /* FIXME: not supported yet */
index 670784a..217084d 100644 (file)
@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
  * Open up a stream for HW test
  */
 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct amdgpu_bo *bo,
                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00010000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -268,13 +269,14 @@ err:
  */
 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
                                        uint32_t handle,
+                                       struct amdgpu_bo *bo,
                                        struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00010000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -327,13 +329,20 @@ err:
 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 01f658f..0995378 100644 (file)
@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
  * Open up a stream for HW test
  */
 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct amdgpu_bo *bo,
                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00000000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -275,13 +276,14 @@ err:
  * Close up a stream for HW test or if userspace failed to do so
  */
 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                               struct dma_fence **fence)
+                                       struct amdgpu_bo *bo,
+                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001;
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00000000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002;
@@ -334,13 +336,20 @@ err:
 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+       r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 985633c..26c6d73 100644 (file)
 # It calculates Bandwidth and Watermarks values for HW programming
 #
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+calcs_ccflags := -mhard-float -msse
 
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+calcs_ccflags += -mpreferred-stack-boundary=4
+else
 calcs_ccflags += -msse2
 endif
 
index 5d1aded..4b8819c 100644 (file)
@@ -580,6 +580,10 @@ static bool construct(struct dc *dc,
 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
        // Allocate memory for the vm_helper
        dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+       if (!dc->vm_helper) {
+               dm_error("%s: failed to create dc->vm_helper\n", __func__);
+               goto fail;
+       }
 
 #endif
        memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
index 505967b..51991bf 100644 (file)
@@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
        enum display_dongle_type *dongle = &sink_cap->dongle_type;
        uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
        bool is_type2_dongle = false;
+       int retry_count = 2;
        struct dp_hdmi_dongle_signature_data *dongle_signature;
 
        /* Assume we have no valid DP passive dongle connected */
@@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
                DP_HDMI_DONGLE_ADDRESS,
                type2_dongle_buf,
                sizeof(type2_dongle_buf))) {
-               *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-               sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+               /* Passive HDMI dongles can sometimes fail here without retrying*/
+               while (retry_count > 0) {
+                       if (i2c_read(ddc,
+                               DP_HDMI_DONGLE_ADDRESS,
+                               type2_dongle_buf,
+                               sizeof(type2_dongle_buf)))
+                               break;
+                       retry_count--;
+               }
+               if (retry_count == 0) {
+                       *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+                       sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
 
-               CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
-                               "DP-DVI passive dongle %dMhz: ",
-                               DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
-               return;
+                       CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+                                       "DP-DVI passive dongle %dMhz: ",
+                                       DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+                       return;
+               }
        }
 
        /* Check if Type 2 dongle.*/
index 8f70295..f25ac17 100644 (file)
@@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
        if (stream1->view_format != stream2->view_format)
                return false;
 
+       if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
+               return false;
+
        return true;
 }
 static bool is_dp_and_hdmi_sharable(
@@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
        if (!are_stream_backends_same(old_stream, stream))
                return false;
 
+       if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+               return false;
+
        return true;
 }
 
index 01c7e30..bbd6e01 100644 (file)
@@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+       rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+       rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+       rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
        // All 3 color channels have same x
        corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
@@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
 
        i = 1;
        while (i != hw_points + 1) {
-               if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-                       rgb_plus_1->red = rgb->red;
-               if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-                       rgb_plus_1->green = rgb->green;
-               if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-                       rgb_plus_1->blue = rgb->blue;
-
                rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
                rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
                rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
@@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+       rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+       rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+       rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
        corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
        corner_points[0].green.x = corner_points[0].red.x;
@@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
        i = 1;
        while (i != hw_points + 1) {
-               if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-                       rgb_plus_1->red = rgb->red;
-               if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-                       rgb_plus_1->green = rgb->green;
-               if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-                       rgb_plus_1->blue = rgb->blue;
-
                rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
                rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
                rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
index ddb8d56..63f3bdd 100644 (file)
@@ -10,15 +10,20 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 DCN20 += dcn20_dsc.o
 endif
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
+else
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
 endif
 
index 5a2763d..6b2f2f1 100644 (file)
@@ -814,7 +814,7 @@ static const struct resource_caps res_cap_nv14 = {
                .num_audio = 6,
                .num_stream_encoder = 5,
                .num_pll = 5,
-               .num_dwb = 0,
+               .num_dwb = 1,
                .num_ddc = 5,
 };
 
@@ -1107,6 +1107,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
        if (!enc1)
                return NULL;
 
+       if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+               if (eng_id >= ENGINE_ID_DIGD)
+                       eng_id++;
+       }
+
        dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
                                        &stream_enc_regs[eng_id],
                                        &se_shift, &se_mask);
index ef673bf..ff50ae7 100644 (file)
@@ -3,15 +3,20 @@
 
 DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
+else
 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
 endif
 
index 5b2a65b..8df2516 100644 (file)
 # It provides the general basic services required by other DAL
 # subcomponents.
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+dml_ccflags := -mhard-float -msse
 
-dml_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dml_ccflags += -mpreferred-stack-boundary=4
+else
 dml_ccflags += -msse2
 endif
 
index 6498837..6c6c486 100644 (file)
@@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
                        mode_lib->vba.MinActiveDRAMClockChangeMargin
                                        + mode_lib->vba.DRAMClockChangeLatency;
 
-       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+               mode_lib->vba.DRAMClockChangeWatermark += 25;
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else {
                if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
index b456cd2..9707372 100644 (file)
@@ -1,15 +1,20 @@
 #
 # Makefile for the 'dsc' sub-component of DAL.
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+dsc_ccflags := -mhard-float -msse
 
-dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dsc_ccflags += -mpreferred-stack-boundary=4
+else
 dsc_ccflags += -msse2
 endif
 
index d08493b..beacfff 100644 (file)
@@ -5098,9 +5098,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
 
        if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
                podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
-               for (i = 0; i < podn_vdd_dep->count - 1; i++)
-                       od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
-               if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+               for (i = 0; i < podn_vdd_dep->count; i++)
                        od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
        } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
                podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
index 0b46140..3ec5a10 100644 (file)
@@ -205,7 +205,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,                   WORKLOAD_PPLIB_VR_BIT),
-       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_CUSTOM_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_COMPUTE_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
index bbd8ebd..92c393f 100644 (file)
@@ -219,7 +219,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,                   WORKLOAD_PPLIB_VR_BIT),
-       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_CUSTOM_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_COMPUTE_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
index 8820ce1..ae27490 100644 (file)
@@ -82,7 +82,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
 
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
 
-       drm_atomic_helper_commit_planes(dev, old_state, 0);
+       drm_atomic_helper_commit_planes(dev, old_state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
index ea26bc9..b848270 100644 (file)
@@ -564,8 +564,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
        }
 
        if (!in_range(&splitter->vsize, dflow->in_h)) {
-               DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
-                                dflow->in_w);
+               DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+                                dflow->in_h);
                return -EINVAL;
        }
 
index 3ef2ac5..2dd2cd8 100644 (file)
@@ -1581,8 +1581,11 @@ static void commit_tail(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
        const struct drm_mode_config_helper_funcs *funcs;
+       struct drm_crtc_state *new_crtc_state;
+       struct drm_crtc *crtc;
        ktime_t start;
        s64 commit_time_ms;
+       unsigned int i, new_self_refresh_mask = 0;
 
        funcs = dev->mode_config.helper_private;
 
@@ -1602,6 +1605,15 @@ static void commit_tail(struct drm_atomic_state *old_state)
 
        drm_atomic_helper_wait_for_dependencies(old_state);
 
+       /*
+        * We cannot safely access new_crtc_state after
+        * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
+        * self-refresh active beforehand:
+        */
+       for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
+               if (new_crtc_state->self_refresh_active)
+                       new_self_refresh_mask |= BIT(i);
+
        if (funcs && funcs->atomic_commit_tail)
                funcs->atomic_commit_tail(old_state);
        else
@@ -1610,7 +1622,8 @@ static void commit_tail(struct drm_atomic_state *old_state)
        commit_time_ms = ktime_ms_delta(ktime_get(), start);
        if (commit_time_ms > 0)
                drm_self_refresh_helper_update_avg_times(old_state,
-                                                (unsigned long)commit_time_ms);
+                                                (unsigned long)commit_time_ms,
+                                                new_self_refresh_mask);
 
        drm_atomic_helper_commit_cleanup_done(old_state);
 
index 68f4765..dd33fec 100644 (file)
@@ -133,29 +133,33 @@ out_drop_locks:
  * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
  * @state: the state which has just been applied to hardware
  * @commit_time_ms: the amount of time in ms that this commit took to complete
+ * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
+ *    new state
  *
  * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
  * update the average entry/exit self refresh times on self refresh transitions.
  * These averages will be used when calculating how long to delay before
  * entering self refresh mode after activity.
  */
-void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
-                                             unsigned int commit_time_ms)
+void
+drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
+                                        unsigned int commit_time_ms,
+                                        unsigned int new_self_refresh_mask)
 {
        struct drm_crtc *crtc;
-       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct drm_crtc_state *old_crtc_state;
        int i;
 
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
-                                     new_crtc_state, i) {
+       for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+               bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
                struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
                struct ewma_psr_time *time;
 
                if (old_crtc_state->self_refresh_active ==
-                   new_crtc_state->self_refresh_active)
+                   new_self_refresh_active)
                        continue;
 
-               if (new_crtc_state->self_refresh_active)
+               if (new_self_refresh_active)
                        time = &sr_data->entry_avg_ms;
                else
                        time = &sr_data->exit_avg_ms;
index 698db54..648cf02 100644 (file)
@@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
                              etnaviv_cmdbuf_get_va(&submit->cmdbuf,
                                        &gpu->mmu_context->cmdbuf_mapping));
 
+       mutex_unlock(&gpu->mmu_context->lock);
+
        /* Reserve space for the bomap */
        if (n_bomap_pages) {
                bomap_start = bomap = iter.data;
@@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
                                         obj->base.size);
        }
 
-       mutex_unlock(&gpu->mmu_context->lock);
-
        etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
 
        dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
index 043111a..f8bf488 100644 (file)
@@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu
 
        memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
        buf += SZ_4K;
-       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
-               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
                        memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+                       buf += SZ_4K;
+               }
 }
 
 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
index 35ebae6..3607d34 100644 (file)
@@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
 
        ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
                                          global->memory_base);
-       if (ret) {
-               global->ops->free(ctx);
-               return NULL;
+       if (ret)
+               goto out_free;
+
+       if (global->version == ETNAVIV_IOMMU_V1 &&
+           ctx->cmdbuf_mapping.iova > 0x80000000) {
+               dev_err(global->dev,
+                       "command buffer outside valid memory window\n");
+               goto out_unmap;
        }
 
        return ctx;
+
+out_unmap:
+       etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+       global->ops->free(ctx);
+       return NULL;
 }
 
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index e6e8d4a..0a08354 100644 (file)
@@ -864,6 +864,13 @@ load_detect:
 
 out:
        intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+
+       /*
+        * Make sure the refs for power wells enabled during detect are
+        * dropped to avoid a new detect cycle triggered by HPD polling.
+        */
+       intel_display_power_flush_work(dev_priv);
+
        return status;
 }
 
index aa54bb2..dfff6f4 100644 (file)
@@ -9315,7 +9315,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
 {
        struct intel_encoder *encoder;
-       bool pch_ssc_in_use = false;
        bool has_fdi = false;
 
        for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -9343,22 +9342,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
         * clock hierarchy. That would also allow us to do
         * clock bending finally.
         */
+       dev_priv->pch_ssc_use = 0;
+
        if (spll_uses_pch_ssc(dev_priv)) {
                DRM_DEBUG_KMS("SPLL using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
        }
 
        if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
                DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
        }
 
        if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
                DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
        }
 
-       if (pch_ssc_in_use)
+       if (dev_priv->pch_ssc_use)
                return;
 
        if (has_fdi) {
index 1209976..c002f23 100644 (file)
@@ -4896,6 +4896,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 
        power_domains->initializing = true;
 
+       /* Must happen before power domain init on VLV/CHV */
+       intel_update_rawclk(i915);
+
        if (INTEL_GEN(i915) >= 11) {
                icl_display_core_init(i915, resume);
        } else if (IS_CANNONLAKE(i915)) {
index 57e9f0b..9b15ac4 100644 (file)
@@ -1256,6 +1256,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
                                u32 unused)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *i915 =
+                       to_i915(intel_dig_port->base.base.dev);
+       enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
        u32 ret;
 
        ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1268,7 +1271,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
              DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 
-       if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+       if (intel_phy_is_tc(i915, phy) &&
+           intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
                ret |= DP_AUX_CH_CTL_TBT_IO;
 
        return ret;
@@ -5436,6 +5440,12 @@ out:
        if (status != connector_status_connected && !intel_dp->is_mst)
                intel_dp_unset_edid(intel_dp);
 
+       /*
+        * Make sure the refs for power wells enabled during detect are
+        * dropped to avoid a new detect cycle triggered by HPD polling.
+        */
+       intel_display_power_flush_work(dev_priv);
+
        return status;
 }
 
index b8148f8..d5a298c 100644 (file)
@@ -525,16 +525,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
        val = I915_READ(WRPLL_CTL(id));
        I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
        POSTING_READ(WRPLL_CTL(id));
+
+       /*
+        * Try to set up the PCH reference clock once all DPLLs
+        * that depend on it have been shut down.
+        */
+       if (dev_priv->pch_ssc_use & BIT(id))
+               intel_init_pch_refclk(dev_priv);
 }
 
 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
+       enum intel_dpll_id id = pll->info->id;
        u32 val;
 
        val = I915_READ(SPLL_CTL);
        I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
        POSTING_READ(SPLL_CTL);
+
+       /*
+        * Try to set up the PCH reference clock once all DPLLs
+        * that depend on it have been shut down.
+        */
+       if (dev_priv->pch_ssc_use & BIT(id))
+               intel_init_pch_refclk(dev_priv);
 }
 
 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
index e758879..104cf6d 100644 (file)
@@ -147,11 +147,11 @@ enum intel_dpll_id {
         */
        DPLL_ID_ICL_MGPLL4 = 6,
        /**
-        * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+        * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
         */
        DPLL_ID_TGL_MGPLL5 = 7,
        /**
-        * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+        * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
         */
        DPLL_ID_TGL_MGPLL6 = 8,
 };
index e02f0fa..b030f7a 100644 (file)
@@ -2565,6 +2565,12 @@ out:
        if (status != connector_status_connected)
                cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
 
+       /*
+        * Make sure the refs for power wells enabled during detect are
+        * dropped to avoid a new detect cycle triggered by HPD polling.
+        */
+       intel_display_power_flush_work(dev_priv);
+
        return status;
 }
 
index 1cdfe05..e41fd94 100644 (file)
@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        free_engines(rcu_access_pointer(ctx->engines));
        mutex_destroy(&ctx->engines_mutex);
 
+       kfree(ctx->jump_whitelist);
+
        if (ctx->timeline)
                intel_timeline_put(ctx->timeline);
 
@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
+       ctx->jump_whitelist = NULL;
+       ctx->jump_whitelist_cmds = 0;
+
        return ctx;
 
 err_free:
index 260d59c..00537b9 100644 (file)
@@ -192,6 +192,13 @@ struct i915_gem_context {
         * per vm, which may be one per context or shared with the global GTT)
         */
        struct radix_tree_root handles_vma;
+
+       /** jump_whitelist: Bit array for tracking cmds during cmdparsing
+        *  Guarded by struct_mutex
+        */
+       unsigned long *jump_whitelist;
+       /** jump_whitelist_cmds: No of cmd slots available */
+       u32 jump_whitelist_cmds;
 };
 
 #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
index b5f6937..e635e1e 100644 (file)
@@ -296,7 +296,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
 
 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
 {
-       return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
+       return intel_engine_requires_cmd_parser(eb->engine) ||
+               (intel_engine_using_cmd_parser(eb->engine) &&
+                eb->args->batch_len);
 }
 
 static int eb_create(struct i915_execbuffer *eb)
@@ -1955,40 +1957,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
        return 0;
 }
 
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
+static struct i915_vma *
+shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = eb->i915;
+       struct i915_vma * const vma = *eb->vma;
+       struct i915_address_space *vm;
+       u64 flags;
+
+       /*
+        * PPGTT backed shadow buffers must be mapped RO, to prevent
+        * post-scan tampering
+        */
+       if (CMDPARSER_USES_GGTT(dev_priv)) {
+               flags = PIN_GLOBAL;
+               vm = &dev_priv->ggtt.vm;
+       } else if (vma->vm->has_read_only) {
+               flags = PIN_USER;
+               vm = vma->vm;
+               i915_gem_object_set_readonly(obj);
+       } else {
+               DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+}
+
+static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
 {
        struct intel_engine_pool_node *pool;
        struct i915_vma *vma;
+       u64 batch_start;
+       u64 shadow_batch_start;
        int err;
 
        pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
        if (IS_ERR(pool))
                return ERR_CAST(pool);
 
-       err = intel_engine_cmd_parser(eb->engine,
+       vma = shadow_batch_pin(eb, pool->obj);
+       if (IS_ERR(vma))
+               goto err;
+
+       batch_start = gen8_canonical_addr(eb->batch->node.start) +
+                     eb->batch_start_offset;
+
+       shadow_batch_start = gen8_canonical_addr(vma->node.start);
+
+       err = intel_engine_cmd_parser(eb->gem_context,
+                                     eb->engine,
                                      eb->batch->obj,
-                                     pool->obj,
+                                     batch_start,
                                      eb->batch_start_offset,
                                      eb->batch_len,
-                                     is_master);
+                                     pool->obj,
+                                     shadow_batch_start);
+
        if (err) {
-               if (err == -EACCES) /* unhandled chained batch */
+               i915_vma_unpin(vma);
+
+               /*
+                * Unsafe GGTT-backed buffers can still be submitted safely
+                * as non-secure.
+                * For PPGTT backing however, we have no choice but to forcibly
+                * reject unsafe buffers
+                */
+               if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
+                       /* Execute original buffer non-secure */
                        vma = NULL;
                else
                        vma = ERR_PTR(err);
                goto err;
        }
 
-       vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
-       if (IS_ERR(vma))
-               goto err;
-
        eb->vma[eb->buffer_count] = i915_vma_get(vma);
        eb->flags[eb->buffer_count] =
                __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
        vma->exec_flags = &eb->flags[eb->buffer_count];
        eb->buffer_count++;
 
+       eb->batch_start_offset = 0;
+       eb->batch = vma;
+
+       if (CMDPARSER_USES_GGTT(eb->i915))
+               eb->batch_flags |= I915_DISPATCH_SECURE;
+
+       /* eb->batch_len unchanged */
+
        vma->private = pool;
        return vma;
 
@@ -2421,6 +2477,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
                       struct drm_i915_gem_exec_object2 *exec,
                       struct drm_syncobj **fences)
 {
+       struct drm_i915_private *i915 = to_i915(dev);
        struct i915_execbuffer eb;
        struct dma_fence *in_fence = NULL;
        struct dma_fence *exec_fence = NULL;
@@ -2432,7 +2489,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
                     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
 
-       eb.i915 = to_i915(dev);
+       eb.i915 = i915;
        eb.file = file;
        eb.args = args;
        if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
@@ -2452,8 +2509,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 
        eb.batch_flags = 0;
        if (args->flags & I915_EXEC_SECURE) {
+               if (INTEL_GEN(i915) >= 11)
+                       return -ENODEV;
+
+               /* Return -EPERM to trigger fallback code on old binaries. */
+               if (!HAS_SECURE_BATCHES(i915))
+                       return -EPERM;
+
                if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
-                   return -EPERM;
+                       return -EPERM;
 
                eb.batch_flags |= I915_DISPATCH_SECURE;
        }
@@ -2530,34 +2594,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
                goto err_vma;
        }
 
+       if (eb.batch_len == 0)
+               eb.batch_len = eb.batch->size - eb.batch_start_offset;
+
        if (eb_use_cmdparser(&eb)) {
                struct i915_vma *vma;
 
-               vma = eb_parse(&eb, drm_is_current_master(file));
+               vma = eb_parse(&eb);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto err_vma;
                }
-
-               if (vma) {
-                       /*
-                        * Batch parsed and accepted:
-                        *
-                        * Set the DISPATCH_SECURE bit to remove the NON_SECURE
-                        * bit from MI_BATCH_BUFFER_START commands issued in
-                        * the dispatch_execbuffer implementations. We
-                        * specifically don't want that set on batches the
-                        * command parser has accepted.
-                        */
-                       eb.batch_flags |= I915_DISPATCH_SECURE;
-                       eb.batch_start_offset = 0;
-                       eb.batch = vma;
-               }
        }
 
-       if (eb.batch_len == 0)
-               eb.batch_len = eb.batch->size - eb.batch_start_offset;
-
        /*
         * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
index a82cea9..9dd8c29 100644 (file)
@@ -475,12 +475,13 @@ struct intel_engine_cs {
 
        struct intel_engine_hangcheck hangcheck;
 
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
 #define I915_ENGINE_SUPPORTS_STATS   BIT(1)
 #define I915_ENGINE_HAS_PREEMPTION   BIT(2)
 #define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
 #define I915_ENGINE_IS_VIRTUAL       BIT(5)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
        unsigned int flags;
 
        /*
@@ -541,9 +542,15 @@ struct intel_engine_cs {
 };
 
 static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
 {
-       return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+       return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
+{
+       return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
 }
 
 static inline bool
index 1363e06..fac75af 100644 (file)
@@ -38,6 +38,9 @@ static int __gt_unpark(struct intel_wakeref *wf)
        gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
        GEM_BUG_ON(!gt->awake);
 
+       if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+               intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+
        intel_enable_gt_powersave(i915);
 
        i915_update_gfx_val(i915);
@@ -67,6 +70,11 @@ static int __gt_park(struct intel_wakeref *wf)
        if (INTEL_GEN(i915) >= 6)
                gen6_rps_idle(i915);
 
+       if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
+               i915_rc6_ctx_wa_check(i915);
+               intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+       }
+
        /* Everything switched off, flush any residual interrupt just in case */
        intel_synchronize_irq(i915);
 
index 728704b..cea184a 100644 (file)
@@ -199,14 +199,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
        MOCS_ENTRY(15, \
                   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
                   L3_3_WB), \
-       /* Bypass LLC - Uncached (EHL+) */ \
-       MOCS_ENTRY(16, \
-                  LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
-                  L3_1_UC), \
-       /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
-       MOCS_ENTRY(17, \
-                  LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
-                  L3_3_WB), \
        /* Self-Snoop - L3 + LLC */ \
        MOCS_ENTRY(18, \
                   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
@@ -270,7 +262,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
                   L3_1_UC),
        /* HW Special Case (Displayable) */
        MOCS_ENTRY(61,
-                  LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
+                  LE_1_UC | LE_TC_1_LLC,
                   L3_3_WB),
 };
 
index 13044c0..4bfaefd 100644 (file)
@@ -498,8 +498,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
                goto out_free_gem;
        }
 
-       i915_gem_object_put(obj);
-
        ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
        if (ret < 0) {
                gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
@@ -524,6 +522,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
                    file_count(dmabuf->file),
                    kref_read(&obj->base.refcount));
 
+       i915_gem_object_put(obj);
+
        return dmabuf_fd;
 
 out_free_dmabuf:
index 2455510..f24096e 100644 (file)
  * granting userspace undue privileges. There are three categories of privilege.
  *
  * First, commands which are explicitly defined as privileged or which should
- * only be used by the kernel driver. The parser generally rejects such
- * commands, though it may allow some from the drm master process.
+ * only be used by the kernel driver. The parser rejects such commands
  *
  * Second, commands which access registers. To support correct/enhanced
  * userspace functionality, particularly certain OpenGL extensions, the parser
- * provides a whitelist of registers which userspace may safely access (for both
- * normal and drm master processes).
+ * provides a whitelist of registers which userspace may safely access
  *
  * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
  * The parser always rejects such commands.
@@ -84,9 +82,9 @@
  * in the per-engine command tables.
  *
  * Other command table entries map fairly directly to high level categories
- * mentioned above: rejected, master-only, register whitelist. The parser
- * implements a number of checks, including the privileged memory checks, via a
- * general bitmasking mechanism.
+ * mentioned above: rejected, register whitelist. The parser implements a number
+ * of checks, including the privileged memory checks, via a general bitmasking
+ * mechanism.
  */
 
 /*
@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
         * CMD_DESC_REJECT: The command is never allowed
         * CMD_DESC_REGISTER: The command should be checked against the
         *                    register whitelist for the appropriate ring
-        * CMD_DESC_MASTER: The command is allowed if the submitting process
-        *                  is the DRM master
         */
        u32 flags;
 #define CMD_DESC_FIXED    (1<<0)
@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
 #define CMD_DESC_REJECT   (1<<2)
 #define CMD_DESC_REGISTER (1<<3)
 #define CMD_DESC_BITMASK  (1<<4)
-#define CMD_DESC_MASTER   (1<<5)
 
        /*
         * The command's unique identification bits and the bitmask to get them.
@@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
 #define CMD(op, opm, f, lm, fl, ...)                           \
        {                                                       \
                .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),     \
-               .cmd = { (op), ~0u << (opm) },                  \
+               .cmd = { (op & ~0u << (opm)), ~0u << (opm) },   \
                .length = { (lm) },                             \
                __VA_ARGS__                                     \
        }
@@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
 #define R CMD_DESC_REJECT
 #define W CMD_DESC_REGISTER
 #define B CMD_DESC_BITMASK
-#define M CMD_DESC_MASTER
 
 /*            Command                          Mask   Fixed Len   Action
              ---------------------------------------------------------- */
-static const struct drm_i915_cmd_descriptor common_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
        CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
        CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
-       CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      M  ),
+       CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      R  ),
        CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
        CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
        CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
        CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
 };
 
-static const struct drm_i915_cmd_descriptor render_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
        CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
        CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
        CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
        CMD(  MI_URB_ATOMIC_ALLOC,              SMI,    F,  1,      S  ),
        CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
        CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
-       CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
+       CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   R  ),
        CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
        CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   W,
              .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
        CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS,  S3D,   !F,  0x1FF,  S  ),
 };
 
-static const struct drm_i915_cmd_descriptor video_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
        CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
        CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
        CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
        CMD(  MFX_WAIT,                         SMFX,   F,  1,      S  ),
 };
 
-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
        CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
        CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
        CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
              }},                                                      ),
 };
 
-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
        CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
        CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  B,
              .bits = {{
@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
 };
 
 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
-       CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
+       CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   R  ),
        CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
 };
 
+/*
+ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
+ * need to re-enforce the register access checks. We therefore only need to
+ * teach the cmdparser how to find the end of each command, and identify
+ * register accesses. The table doesn't need to reject any commands, and so
+ * the only commands listed here are:
+ *   1) Those that touch registers
+ *   2) Those that do not have the default 8-bit length
+ *
+ * Note that the default MI length mask chosen for this table is 0xFF, not
+ * the 0x3F used on older devices. This is because the vast majority of MI
+ * cmds on Gen9 use a standard 8-bit Length field.
+ * All the Gen9 blitter instructions are standard 0xFF length mask, and
+ * none allow access to non-general registers, so in fact no BLT cmds are
+ * included in the table at all.
+ *
+ */
+static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
+       CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
+       CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      S  ),
+       CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      S  ),
+       CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
+       CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
+       CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
+       CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      S  ),
+       CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
+       CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   S  ),
+       CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   S  ),
+       CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  S  ),
+       CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
+             .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
+       CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3FF,  S  ),
+       CMD(  MI_STORE_REGISTER_MEM_GEN8,       SMI,    F,  4,      W,
+             .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
+       CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   S  ),
+       CMD(  MI_LOAD_REGISTER_MEM_GEN8,        SMI,    F,  4,      W,
+             .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
+       CMD(  MI_LOAD_REGISTER_REG,             SMI,    !F,  0xFF,  W,
+             .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
+
+       /*
+        * We allow BB_START but apply further checks. We just sanitize the
+        * basic fields here.
+        */
+#define MI_BB_START_OPERAND_MASK   GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+       CMD(  MI_BATCH_BUFFER_START_GEN8,       SMI,    !F,  0xFF,  B,
+             .bits = {{
+                       .offset = 0,
+                       .mask = MI_BB_START_OPERAND_MASK,
+                       .expected = MI_BB_START_OPERAND_EXPECT,
+             }},                                                      ),
+};
+
 static const struct drm_i915_cmd_descriptor noop_desc =
        CMD(MI_NOOP, SMI, F, 1, S);
 
@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
 #undef R
 #undef W
 #undef B
-#undef M
 
-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
-       { common_cmds, ARRAY_SIZE(common_cmds) },
-       { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+       { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
 };
 
-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
-       { common_cmds, ARRAY_SIZE(common_cmds) },
-       { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+       { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
        { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
 };
 
-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
-       { common_cmds, ARRAY_SIZE(common_cmds) },
-       { video_cmds, ARRAY_SIZE(video_cmds) },
+static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+       { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
 };
 
-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
-       { common_cmds, ARRAY_SIZE(common_cmds) },
-       { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+       { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
 };
 
-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
-       { common_cmds, ARRAY_SIZE(common_cmds) },
-       { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+       { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
 };
 
-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
-       { common_cmds, ARRAY_SIZE(common_cmds) },
-       { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+       { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
        { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
 };
 
+static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
+       { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
+};
+
+
 /*
  * Register whitelists, sorted by increasing register offset.
  */
@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
        REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
 };
 
-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
-       REG32(FORCEWAKE_MT),
-       REG32(DERRMR),
-       REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
-       REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
-       REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
-};
-
-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
-       REG32(FORCEWAKE_MT),
-       REG32(DERRMR),
+static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
+       REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+       REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
+       REG32(BCS_SWCTRL),
+       REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+       REG64_IDX(BCS_GPR, 0),
+       REG64_IDX(BCS_GPR, 1),
+       REG64_IDX(BCS_GPR, 2),
+       REG64_IDX(BCS_GPR, 3),
+       REG64_IDX(BCS_GPR, 4),
+       REG64_IDX(BCS_GPR, 5),
+       REG64_IDX(BCS_GPR, 6),
+       REG64_IDX(BCS_GPR, 7),
+       REG64_IDX(BCS_GPR, 8),
+       REG64_IDX(BCS_GPR, 9),
+       REG64_IDX(BCS_GPR, 10),
+       REG64_IDX(BCS_GPR, 11),
+       REG64_IDX(BCS_GPR, 12),
+       REG64_IDX(BCS_GPR, 13),
+       REG64_IDX(BCS_GPR, 14),
+       REG64_IDX(BCS_GPR, 15),
 };
 
 #undef REG64
@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
 struct drm_i915_reg_table {
        const struct drm_i915_reg_descriptor *regs;
        int num_regs;
-       bool master;
 };
 
 static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
-       { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
-       { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+       { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
 };
 
 static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
-       { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
-       { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+       { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
 };
 
 static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
-       { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
-       { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
-       { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+       { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+       { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
 };
 
 static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
-       { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
-       { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+       { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+};
+
+static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
+       { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
 };
 
 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
        return 0;
 }
 
+static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
+{
+       u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+
+       if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
+               return 0xFF;
+
+       DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+       return 0;
+}
+
 static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
                                 const struct drm_i915_cmd_table *cmd_tables,
                                 int cmd_table_count)
@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
        int cmd_table_count;
        int ret;
 
-       if (!IS_GEN(engine->i915, 7))
+       if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
+                                         engine->class == COPY_ENGINE_CLASS))
                return;
 
        switch (engine->class) {
        case RENDER_CLASS:
                if (IS_HASWELL(engine->i915)) {
-                       cmd_tables = hsw_render_ring_cmds;
+                       cmd_tables = hsw_render_ring_cmd_table;
                        cmd_table_count =
-                               ARRAY_SIZE(hsw_render_ring_cmds);
+                               ARRAY_SIZE(hsw_render_ring_cmd_table);
                } else {
-                       cmd_tables = gen7_render_cmds;
-                       cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+                       cmd_tables = gen7_render_cmd_table;
+                       cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
                }
 
                if (IS_HASWELL(engine->i915)) {
@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
                        engine->reg_tables = ivb_render_reg_tables;
                        engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
                }
-
                engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
                break;
        case VIDEO_DECODE_CLASS:
-               cmd_tables = gen7_video_cmds;
-               cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+               cmd_tables = gen7_video_cmd_table;
+               cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
                engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
        case COPY_ENGINE_CLASS:
-               if (IS_HASWELL(engine->i915)) {
-                       cmd_tables = hsw_blt_ring_cmds;
-                       cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+               engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+               if (IS_GEN(engine->i915, 9)) {
+                       cmd_tables = gen9_blt_cmd_table;
+                       cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
+                       engine->get_cmd_length_mask =
+                               gen9_blt_get_cmd_length_mask;
+
+                       /* BCS Engine unsafe without parser */
+                       engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
+               } else if (IS_HASWELL(engine->i915)) {
+                       cmd_tables = hsw_blt_ring_cmd_table;
+                       cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
                } else {
-                       cmd_tables = gen7_blt_cmds;
-                       cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+                       cmd_tables = gen7_blt_cmd_table;
+                       cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
                }
 
-               if (IS_HASWELL(engine->i915)) {
+               if (IS_GEN(engine->i915, 9)) {
+                       engine->reg_tables = gen9_blt_reg_tables;
+                       engine->reg_table_count =
+                               ARRAY_SIZE(gen9_blt_reg_tables);
+               } else if (IS_HASWELL(engine->i915)) {
                        engine->reg_tables = hsw_blt_reg_tables;
                        engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
                } else {
                        engine->reg_tables = ivb_blt_reg_tables;
                        engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
                }
-
-               engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
                break;
        case VIDEO_ENHANCEMENT_CLASS:
-               cmd_tables = hsw_vebox_cmds;
-               cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+               cmd_tables = hsw_vebox_cmd_table;
+               cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
                /* VECS can use the same length_mask function as VCS */
                engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
                return;
        }
 
-       engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
+       engine->flags |= I915_ENGINE_USING_CMD_PARSER;
 }
 
 /**
@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
  */
 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
 {
-       if (!intel_engine_needs_cmd_parser(engine))
+       if (!intel_engine_using_cmd_parser(engine))
                return;
 
        fini_hash_table(engine);
@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
 }
 
 static const struct drm_i915_reg_descriptor *
-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, u32 addr)
 {
        const struct drm_i915_reg_table *table = engine->reg_tables;
+       const struct drm_i915_reg_descriptor *reg = NULL;
        int count = engine->reg_table_count;
 
-       for (; count > 0; ++table, --count) {
-               if (!table->master || is_master) {
-                       const struct drm_i915_reg_descriptor *reg;
+       for (; !reg && (count > 0); ++table, --count)
+               reg = __find_reg(table->regs, table->num_regs, addr);
 
-                       reg = __find_reg(table->regs, table->num_regs, addr);
-                       if (reg != NULL)
-                               return reg;
-               }
-       }
-
-       return NULL;
+       return reg;
 }
 
 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 
 static bool check_cmd(const struct intel_engine_cs *engine,
                      const struct drm_i915_cmd_descriptor *desc,
-                     const u32 *cmd, u32 length,
-                     const bool is_master)
+                     const u32 *cmd, u32 length)
 {
        if (desc->flags & CMD_DESC_SKIP)
                return true;
@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                return false;
        }
 
-       if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
-               DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
-                                *cmd);
-               return false;
-       }
-
        if (desc->flags & CMD_DESC_REGISTER) {
                /*
                 * Get the distance between individual register offset
@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                     offset += step) {
                        const u32 reg_addr = cmd[offset] & desc->reg.mask;
                        const struct drm_i915_reg_descriptor *reg =
-                               find_reg(engine, is_master, reg_addr);
+                               find_reg(engine, reg_addr);
 
                        if (!reg) {
                                DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
        return true;
 }
 
+static int check_bbstart(const struct i915_gem_context *ctx,
+                        u32 *cmd, u32 offset, u32 length,
+                        u32 batch_len,
+                        u64 batch_start,
+                        u64 shadow_batch_start)
+{
+       u64 jump_offset, jump_target;
+       u32 target_cmd_offset, target_cmd_index;
+
+       /* For igt compatibility on older platforms */
+       if (CMDPARSER_USES_GGTT(ctx->i915)) {
+               DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+               return -EACCES;
+       }
+
+       if (length != 3) {
+               DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+                         length);
+               return -EINVAL;
+       }
+
+       jump_target = *(u64*)(cmd+1);
+       jump_offset = jump_target - batch_start;
+
+       /*
+        * Any underflow of jump_target is guaranteed to be outside the range
+        * of a u32, so >= test catches both too large and too small
+        */
+       if (jump_offset >= batch_len) {
+               DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+                         jump_target);
+               return -EINVAL;
+       }
+
+       /*
+        * This cannot overflow a u32 because we already checked jump_offset
+        * is within the BB, and the batch_len is a u32
+        */
+       target_cmd_offset = lower_32_bits(jump_offset);
+       target_cmd_index = target_cmd_offset / sizeof(u32);
+
+       *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+       if (target_cmd_index == offset)
+               return 0;
+
+       if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+               DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+               return -EINVAL;
+       } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+               DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+                         jump_target);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+{
+       const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+       const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+       u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+       unsigned long *next_whitelist;
+
+       if (CMDPARSER_USES_GGTT(ctx->i915))
+               return;
+
+       if (batch_cmds <= ctx->jump_whitelist_cmds) {
+               bitmap_zero(ctx->jump_whitelist, batch_cmds);
+               return;
+       }
+
+again:
+       next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+       if (next_whitelist) {
+               kfree(ctx->jump_whitelist);
+               ctx->jump_whitelist = next_whitelist;
+               ctx->jump_whitelist_cmds =
+                       next_size * BITS_PER_BYTE * sizeof(long);
+               return;
+       }
+
+       if (next_size > exact_size) {
+               next_size = exact_size;
+               goto again;
+       }
+
+       DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+       bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+
+       return;
+}
+
 #define LENGTH_BIAS 2
 
 /**
  * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
  * @engine: the engine on which the batch is to execute
  * @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start: Canonical base address of batch
  * @batch_start_offset: byte offset in the batch at which execution starts
  * @batch_len: length of the commands in batch_obj
- * @is_master: is the submitting process the drm master?
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
  *
  * Parses the specified batch buffer looking for privilege violations as
  * described in the overview.
@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+                           struct intel_engine_cs *engine,
                            struct drm_i915_gem_object *batch_obj,
-                           struct drm_i915_gem_object *shadow_batch_obj,
+                           u64 batch_start,
                            u32 batch_start_offset,
                            u32 batch_len,
-                           bool is_master)
+                           struct drm_i915_gem_object *shadow_batch_obj,
+                           u64 shadow_batch_start)
 {
-       u32 *cmd, *batch_end;
+       u32 *cmd, *batch_end, offset = 0;
        struct drm_i915_cmd_descriptor default_desc = noop_desc;
        const struct drm_i915_cmd_descriptor *desc = &default_desc;
        bool needs_clflush_after = false;
@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                return PTR_ERR(cmd);
        }
 
+       init_whitelist(ctx, batch_len);
+
        /*
         * We use the batch length as size because the shadow object is as
         * large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
        do {
                u32 length;
 
-               if (*cmd == MI_BATCH_BUFFER_END) {
-                       if (needs_clflush_after) {
-                               void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
-                               drm_clflush_virt_range(ptr,
-                                                      (void *)(cmd + 1) - ptr);
-                       }
+               if (*cmd == MI_BATCH_BUFFER_END)
                        break;
-               }
 
                desc = find_cmd(engine, *cmd, desc, &default_desc);
                if (!desc) {
                        DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
                                         *cmd);
                        ret = -EINVAL;
-                       break;
-               }
-
-               /*
-                * If the batch buffer contains a chained batch, return an
-                * error that tells the caller to abort and dispatch the
-                * workload as a non-secure batch.
-                */
-               if (desc->cmd.value == MI_BATCH_BUFFER_START) {
-                       ret = -EACCES;
-                       break;
+                       goto err;
                }
 
                if (desc->flags & CMD_DESC_FIXED)
@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                                         length,
                                         batch_end - cmd);
                        ret = -EINVAL;
-                       break;
+                       goto err;
                }
 
-               if (!check_cmd(engine, desc, cmd, length, is_master)) {
+               if (!check_cmd(engine, desc, cmd, length)) {
                        ret = -EACCES;
+                       goto err;
+               }
+
+               if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+                       ret = check_bbstart(ctx, cmd, offset, length,
+                                           batch_len, batch_start,
+                                           shadow_batch_start);
+
+                       if (ret)
+                               goto err;
                        break;
                }
 
+               if (ctx->jump_whitelist_cmds > offset)
+                       set_bit(offset, ctx->jump_whitelist);
+
                cmd += length;
+               offset += length;
                if  (cmd >= batch_end) {
                        DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
                        ret = -EINVAL;
-                       break;
+                       goto err;
                }
        } while (1);
 
+       if (needs_clflush_after) {
+               void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+
+               drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
+       }
+
+err:
        i915_gem_object_unpin_map(shadow_batch_obj);
        return ret;
 }
@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
 
        /* If the command parser is not enabled, report 0 - unsupported */
        for_each_uabi_engine(engine, dev_priv) {
-               if (intel_engine_needs_cmd_parser(engine)) {
+               if (intel_engine_using_cmd_parser(engine)) {
                        active = true;
                        break;
                }
@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
         *    the parser enabled.
         * 9. Don't whitelist or handle oacontrol specially, as ownership
         *    for oacontrol state is moving to i915-perf.
+        * 10. Support for Gen9 BCS Parsing
         */
-       return 9;
+       return 10;
 }
index bb6f86c..3d717e2 100644 (file)
@@ -364,9 +364,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
        if (ret)
                goto cleanup_vga_client;
 
-       /* must happen before intel_power_domains_init_hw() on VLV/CHV */
-       intel_update_rawclk(dev_priv);
-
        intel_power_domains_init_hw(dev_priv, false);
 
        intel_csr_ucode_init(dev_priv);
@@ -1850,6 +1847,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        i915_gem_suspend_late(dev_priv);
 
+       i915_rc6_ctx_wa_suspend(dev_priv);
+
        intel_uncore_suspend(&dev_priv->uncore);
 
        intel_power_domains_suspend(dev_priv,
@@ -2053,6 +2052,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_power_domains_resume(dev_priv);
 
+       i915_rc6_ctx_wa_resume(dev_priv);
+
        intel_gt_sanitize(&dev_priv->gt, true);
 
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
index 772154e..89b6112 100644 (file)
@@ -593,6 +593,8 @@ struct intel_rps {
 
 struct intel_rc6 {
        bool enabled;
+       bool ctx_corrupted;
+       intel_wakeref_t ctx_corrupted_wakeref;
        u64 prev_hw_residency[4];
        u64 cur_residency[4];
 };
@@ -1723,6 +1725,8 @@ struct drm_i915_private {
                struct work_struct idle_work;
        } gem;
 
+       u8 pch_ssc_use;
+
        /* For i945gm vblank irq vs. C3 workaround */
        struct {
                struct work_struct work;
@@ -2073,9 +2077,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define VEBOX_MASK(dev_priv) \
        ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
 
+/*
+ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+ * All later gens can run the final buffer from the ppgtt
+ */
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+
 #define HAS_LLC(dev_priv)      (INTEL_INFO(dev_priv)->has_llc)
 #define HAS_SNOOP(dev_priv)    (INTEL_INFO(dev_priv)->has_snoop)
 #define HAS_EDRAM(dev_priv)    ((dev_priv)->edram_size_mb)
+#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
 #define HAS_WT(dev_priv)       ((IS_HASWELL(dev_priv) || \
                                 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
 
@@ -2108,10 +2119,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)    (IS_I830(dev_priv) || IS_I845G(dev_priv))
 
+#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)  \
+       (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+
 /* WaRsDisableCoarsePowerGating:skl,cnl */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
-       (IS_CANNONLAKE(dev_priv) || \
-        IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+       (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
 
 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -2282,6 +2295,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
                           unsigned long flags);
 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
 
+struct i915_vma * __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
+                   const struct i915_ggtt_view *view,
+                   u64 size,
+                   u64 alignment,
+                   u64 flags);
+
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
 static inline int __must_check
@@ -2391,12 +2412,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+int intel_engine_cmd_parser(struct i915_gem_context *cxt,
+                           struct intel_engine_cs *engine,
                            struct drm_i915_gem_object *batch_obj,
-                           struct drm_i915_gem_object *shadow_batch_obj,
+                           u64 user_batch_start,
                            u32 batch_start_offset,
                            u32 batch_len,
-                           bool is_master);
+                           struct drm_i915_gem_object *shadow_batch_obj,
+                           u64 shadow_batch_start);
 
 /* intel_device_info.c */
 static inline struct intel_device_info *
index d0f94f2..98305d9 100644 (file)
@@ -964,6 +964,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_address_space *vm = &dev_priv->ggtt.vm;
+
+       return i915_gem_object_pin(obj, vm, view, size, alignment,
+                                  flags | PIN_GLOBAL);
+}
+
+struct i915_vma *
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
+                   const struct i915_ggtt_view *view,
+                   u64 size,
+                   u64 alignment,
+                   u64 flags)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_vma *vma;
        int ret;
 
@@ -1038,7 +1052,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                        return ERR_PTR(ret);
        }
 
-       ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+       ret = i915_vma_pin(vma, size, alignment, flags);
        if (ret)
                return ERR_PTR(ret);
 
index 5d91013..9f1517a 100644 (file)
@@ -62,7 +62,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
                value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
                break;
        case I915_PARAM_HAS_SECURE_BATCHES:
-               value = capable(CAP_SYS_ADMIN);
+               value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
                break;
        case I915_PARAM_CMD_PARSER_VERSION:
                value = i915_cmd_parser_get_version(i915);
index 2abd199..f8ee9ab 100644 (file)
@@ -471,6 +471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   ECOCHK_PPGTT_WT_HSW          (0x2 << 3)
 #define   ECOCHK_PPGTT_WB_HSW          (0x3 << 3)
 
+#define GEN8_RC6_CTX_INFO              _MMIO(0x8504)
+
 #define GAC_ECO_BITS                   _MMIO(0x14090)
 #define   ECOBITS_SNB_BIT              (1 << 13)
 #define   ECOBITS_PPGTT_CACHE64B       (3 << 8)
@@ -555,6 +557,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  */
 #define BCS_SWCTRL _MMIO(0x22200)
 
+/* There are 16 GPR registers */
+#define BCS_GPR(n)     _MMIO(0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
+
 #define GPGPU_THREADS_DISPATCHED        _MMIO(0x2290)
 #define GPGPU_THREADS_DISPATCHED_UDW   _MMIO(0x2290 + 4)
 #define HS_INVOCATION_COUNT             _MMIO(0x2300)
@@ -7211,6 +7217,10 @@ enum {
 #define TGL_DMC_DEBUG_DC5_COUNT        _MMIO(0x101084)
 #define TGL_DMC_DEBUG_DC6_COUNT        _MMIO(0x101088)
 
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT             _MMIO(0x42060)
+#define  MMIO_TIMEOUT_US(us)   ((us) << 0)
+
 /* interrupts */
 #define DE_MASTER_IRQ_CONTROL   (1 << 31)
 #define DE_SPRITEB_FLIP_DONE    (1 << 29)
index 75ee027..2efe1d1 100644 (file)
@@ -126,6 +126,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
         */
        I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
                   PWM1_GATING_DIS | PWM2_GATING_DIS);
+
+       /*
+        * Lower the display internal timeout.
+        * This is needed to avoid any hard hangs when DSI port PLL
+        * is off and a MMIO access is attempted by any privilege
+        * application, using batch buffers or any other means.
+        */
+       I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
 }
 
 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -8544,6 +8552,100 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
        dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
+{
+       return !I915_READ(GEN8_RC6_CTX_INFO);
+}
+
+static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
+{
+       if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+               return;
+
+       if (i915_rc6_ctx_corrupted(i915)) {
+               DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+               i915->gt_pm.rc6.ctx_corrupted = true;
+               i915->gt_pm.rc6.ctx_corrupted_wakeref =
+                       intel_runtime_pm_get(&i915->runtime_pm);
+       }
+}
+
+static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
+{
+       if (i915->gt_pm.rc6.ctx_corrupted) {
+               intel_runtime_pm_put(&i915->runtime_pm,
+                                    i915->gt_pm.rc6.ctx_corrupted_wakeref);
+               i915->gt_pm.rc6.ctx_corrupted = false;
+       }
+}
+
+/**
+ * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
+ */
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
+{
+       if (i915->gt_pm.rc6.ctx_corrupted)
+               intel_runtime_pm_put(&i915->runtime_pm,
+                                    i915->gt_pm.rc6.ctx_corrupted_wakeref);
+}
+
+/**
+ * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
+{
+       if (!i915->gt_pm.rc6.ctx_corrupted)
+               return;
+
+       if (i915_rc6_ctx_corrupted(i915)) {
+               i915->gt_pm.rc6.ctx_corrupted_wakeref =
+                       intel_runtime_pm_get(&i915->runtime_pm);
+               return;
+       }
+
+       DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+       i915->gt_pm.rc6.ctx_corrupted = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv);
+
+/**
+ * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @i915: i915 device
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+ *
+ * Return false if no context corruption has happened since the last call of
+ * this function, true otherwise.
+*/
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
+{
+       if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+               return false;
+
+       if (i915->gt_pm.rc6.ctx_corrupted)
+               return false;
+
+       if (!i915_rc6_ctx_corrupted(i915))
+               return false;
+
+       DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+       intel_disable_rc6(i915);
+       i915->gt_pm.rc6.ctx_corrupted = true;
+       i915->gt_pm.rc6.ctx_corrupted_wakeref =
+               intel_runtime_pm_get_noresume(&i915->runtime_pm);
+
+       return true;
+}
+
 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -8557,6 +8659,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
                pm_runtime_get(&dev_priv->drm.pdev->dev);
        }
 
+       i915_rc6_ctx_wa_init(dev_priv);
+
        /* Initialize RPS limits (for userspace) */
        if (IS_CHERRYVIEW(dev_priv))
                cherryview_init_gt_powersave(dev_priv);
@@ -8595,6 +8699,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
        if (IS_VALLEYVIEW(dev_priv))
                valleyview_cleanup_gt_powersave(dev_priv);
 
+       i915_rc6_ctx_wa_cleanup(dev_priv);
+
        if (!HAS_RC6(dev_priv))
                pm_runtime_put(&dev_priv->drm.pdev->dev);
 }
@@ -8623,7 +8729,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
        i915->gt_pm.llc_pstate.enabled = false;
 }
 
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
 {
        lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
 
@@ -8642,6 +8748,15 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
        dev_priv->gt_pm.rc6.enabled = false;
 }
 
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       mutex_lock(&rps->lock);
+       __intel_disable_rc6(dev_priv);
+       mutex_unlock(&rps->lock);
+}
+
 static void intel_disable_rps(struct drm_i915_private *dev_priv)
 {
        lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
@@ -8667,7 +8782,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
 {
        mutex_lock(&dev_priv->gt_pm.rps.lock);
 
-       intel_disable_rc6(dev_priv);
+       __intel_disable_rc6(dev_priv);
        intel_disable_rps(dev_priv);
        if (HAS_LLC(dev_priv))
                intel_disable_llc_pstate(dev_priv);
@@ -8694,6 +8809,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
        if (dev_priv->gt_pm.rc6.enabled)
                return;
 
+       if (dev_priv->gt_pm.rc6.ctx_corrupted)
+               return;
+
        if (IS_CHERRYVIEW(dev_priv))
                cherryview_enable_rc6(dev_priv);
        else if (IS_VALLEYVIEW(dev_priv))
index e3573e1..0f7390c 100644 (file)
@@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct i915_request *rq);
index bc2ddeb..f21bc8a 100644 (file)
@@ -556,11 +556,11 @@ static int panfrost_probe(struct platform_device *pdev)
        return 0;
 
 err_out2:
+       pm_runtime_disable(pfdev->dev);
        panfrost_devfreq_fini(pfdev);
 err_out1:
        panfrost_device_fini(pfdev);
 err_out0:
-       pm_runtime_disable(pfdev->dev);
        drm_dev_put(ddev);
        return err;
 }
index bdd9905..a3ed64a 100644 (file)
@@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size)
        return SZ_2M;
 }
 
-void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
-                             struct panfrost_mmu *mmu,
-                             u64 iova, size_t size)
+static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+                                    struct panfrost_mmu *mmu,
+                                    u64 iova, size_t size)
 {
        if (mmu->as < 0)
                return;
@@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
        spin_lock(&pfdev->as_lock);
        list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
                if (as == mmu->as)
-                       break;
+                       goto found_mmu;
        }
-       if (as != mmu->as)
-               goto out;
+       goto out;
 
+found_mmu:
        priv = container_of(mmu, struct panfrost_file_priv, mmu);
 
        spin_lock(&priv->mm_lock);
@@ -432,7 +432,8 @@ out:
 
 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
 
-int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+                                      u64 addr)
 {
        int ret, i;
        struct panfrost_gem_object *bo;
index 83c57d3..2dba192 100644 (file)
@@ -16,6 +16,7 @@
 #include "panfrost_issues.h"
 #include "panfrost_job.h"
 #include "panfrost_mmu.h"
+#include "panfrost_perfcnt.h"
 #include "panfrost_regs.h"
 
 #define COUNTERS_PER_BLOCK             64
index 9e55076..4528f4d 100644 (file)
@@ -379,11 +379,25 @@ radeon_pci_remove(struct pci_dev *pdev)
 static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
+#ifdef CONFIG_PPC64
+       struct drm_device *ddev = pci_get_drvdata(pdev);
+#endif
+
        /* if we are running in a VM, make sure the device
         * torn down properly on reboot/shutdown
         */
        if (radeon_device_is_virtual())
                radeon_pci_remove(pdev);
+
+#ifdef CONFIG_PPC64
+       /* Some adapters need to be suspended before a
+        * shutdown occurs in order to prevent an error
+        * during kexec.
+        * Make this power specific becauase it breaks
+        * some non-power boards.
+        */
+       radeon_suspend_kms(ddev, true, true, false);
+#endif
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index 460fd98..a0b382a 100644 (file)
@@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
                case 0x682C:
                        si_pi->cac_weights = cac_weights_cape_verde_pro;
                        si_pi->dte_data = dte_data_sun_xt;
+                       update_dte_from_pl2 = true;
                        break;
                case 0x6825:
                case 0x6827:
index 9a0ee74..f39b97e 100644 (file)
@@ -479,6 +479,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
        struct drm_sched_job *s_job, *tmp;
        uint64_t guilty_context;
        bool found_guilty = false;
+       struct dma_fence *fence;
 
        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -492,7 +493,16 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
                        dma_fence_set_error(&s_fence->finished, -ECANCELED);
 
                dma_fence_put(s_job->s_fence->parent);
-               s_job->s_fence->parent = sched->ops->run_job(s_job);
+               fence = sched->ops->run_job(s_job);
+
+               if (IS_ERR_OR_NULL(fence)) {
+                       s_job->s_fence->parent = NULL;
+                       dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+               } else {
+                       s_job->s_fence->parent = fence;
+               }
+
+
        }
 }
 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -720,7 +730,7 @@ static int drm_sched_main(void *param)
                fence = sched->ops->run_job(sched_job);
                drm_sched_fence_scheduled(s_fence);
 
-               if (fence) {
+               if (!IS_ERR_OR_NULL(fence)) {
                        s_fence->parent = dma_fence_get(fence);
                        r = dma_fence_add_callback(fence, &sched_job->cb,
                                                   drm_sched_process_job);
@@ -730,8 +740,11 @@ static int drm_sched_main(void *param)
                                DRM_ERROR("fence add callback failed (%d)\n",
                                          r);
                        dma_fence_put(fence);
-               } else
+               } else {
+
+                       dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
                        drm_sched_process_job(NULL, &sched_job->cb);
+               }
 
                wake_up(&sched->job_scheduled);
        }
index 04c721d..b89439e 100644 (file)
@@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 
        WARN_ON(!tcon->quirks->has_channel_0);
 
-       tcon->dclk_min_div = 6;
+       tcon->dclk_min_div = 1;
        tcon->dclk_max_div = 127;
        sun4i_tcon0_mode_set_common(tcon, mode);
 
index 5d80507..19c092d 100644 (file)
@@ -557,13 +557,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
        if (args->bcl_start != args->bcl_end) {
                bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
-               if (!bin)
+               if (!bin) {
+                       v3d_job_put(&render->base);
                        return -ENOMEM;
+               }
 
                ret = v3d_job_init(v3d, file_priv, &bin->base,
                                   v3d_job_free, args->in_sync_bcl);
                if (ret) {
                        v3d_job_put(&render->base);
+                       kfree(bin);
                        return ret;
                }
 
index 6654c15..fbe4e16 100644 (file)
@@ -63,13 +63,20 @@ static int axff_init(struct hid_device *hid)
 {
        struct axff_device *axff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int field_count = 0;
        int i, j;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 3eaee2c..63fdbf0 100644 (file)
@@ -1139,6 +1139,7 @@ int hid_open_report(struct hid_device *device)
        __u8 *start;
        __u8 *buf;
        __u8 *end;
+       __u8 *next;
        int ret;
        static int (*dispatch_type[])(struct hid_parser *parser,
                                      struct hid_item *item) = {
@@ -1192,7 +1193,8 @@ int hid_open_report(struct hid_device *device)
        device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
 
        ret = -EINVAL;
-       while ((start = fetch_item(start, end, &item)) != NULL) {
+       while ((next = fetch_item(start, end, &item)) != NULL) {
+               start = next;
 
                if (item.format != HID_ITEM_FORMAT_SHORT) {
                        hid_err(device, "unexpected long global item\n");
@@ -1230,7 +1232,8 @@ int hid_open_report(struct hid_device *device)
                }
        }
 
-       hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
+       hid_err(device, "item fetching failed at offset %u/%u\n",
+               size - (unsigned int)(end - start), size);
 err:
        kfree(parser->collection_stack);
 alloc_err:
index 17e17f9..947f19f 100644 (file)
@@ -75,13 +75,19 @@ static int drff_init(struct hid_device *hid)
 {
        struct drff_device *drff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 7cd5651..c34f2e5 100644 (file)
@@ -47,13 +47,19 @@ static int emsff_init(struct hid_device *hid)
 {
        struct emsff_device *emsff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 0f95c96..ecbd399 100644 (file)
@@ -64,14 +64,20 @@ static int gaff_init(struct hid_device *hid)
 {
        struct gaff_device *gaff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct list_head *report_ptr = report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 84f8c12..d86a918 100644 (file)
@@ -470,6 +470,10 @@ static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
index 10a7205..8619b80 100644 (file)
@@ -124,13 +124,19 @@ static int holtekff_init(struct hid_device *hid)
 {
        struct holtekff_device *holtekff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output report found\n");
                return -ENODEV;
index 76969a2..447e8db 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
 #define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
+#define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
+#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index dd1a6c3..73d07e3 100644 (file)
@@ -50,11 +50,17 @@ int lg2ff_init(struct hid_device *hid)
 {
        struct lg2ff_device *lg2ff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
        if (!report)
index 9ecb6fd..b7e1949 100644 (file)
@@ -117,12 +117,19 @@ static const signed short ff3_joystick_ac[] = {
 
 int lg3ff_init(struct hid_device *hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const signed short *ff_bits = ff3_joystick_ac;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
                return -ENODEV;
index 03f0220..5e6a0ce 100644 (file)
@@ -1253,8 +1253,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
 
 int lg4ff_init(struct hid_device *hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
        const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@@ -1266,6 +1266,13 @@ int lg4ff_init(struct hid_device *hid)
        int mmode_ret, mmode_idx = -1;
        u16 real_product_id;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
                return -1;
index c79a6ec..aed4ddc 100644 (file)
@@ -115,12 +115,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
 
 int lgff_init(struct hid_device* hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const signed short *ff_bits = ff_joystick;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
                return -ENODEV;
index 0179f7e..8e91e2f 100644 (file)
@@ -1669,6 +1669,7 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
 
 #define HIDPP_FF_EFFECTID_NONE         -1
 #define HIDPP_FF_EFFECTID_AUTOCENTER   -2
+#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18
 
 #define HIDPP_FF_MAX_PARAMS    20
 #define HIDPP_FF_RESERVED_SLOTS        1
@@ -2009,7 +2010,7 @@ static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
 static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
 {
        struct hidpp_ff_private_data *data = dev->ff->private;
-       u8 params[18];
+       u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH];
 
        dbg_hid("Setting autocenter to %d.\n", magnitude);
 
@@ -2077,23 +2078,34 @@ static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp
 static void hidpp_ff_destroy(struct ff_device *ff)
 {
        struct hidpp_ff_private_data *data = ff->private;
+       struct hid_device *hid = data->hidpp->hid_dev;
 
+       hid_info(hid, "Unloading HID++ force feedback.\n");
+
+       device_remove_file(&hid->dev, &dev_attr_range);
+       destroy_workqueue(data->wq);
        kfree(data->effect_ids);
 }
 
-static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+static int hidpp_ff_init(struct hidpp_device *hidpp,
+                        struct hidpp_ff_private_data *data)
 {
        struct hid_device *hid = hidpp->hid_dev;
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
        const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
        struct ff_device *ff;
-       struct hidpp_report response;
-       struct hidpp_ff_private_data *data;
-       int error, j, num_slots;
+       int error, j, num_slots = data->num_effects;
        u8 version;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (!dev) {
                hid_err(hid, "Struct input_dev not set!\n");
                return -EINVAL;
@@ -2109,27 +2121,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++)
                        set_bit(hidpp_ff_effects_v2[j], dev->ffbit);
 
-       /* Read number of slots available in device */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_INFO, NULL, 0, &response);
-       if (error) {
-               if (error < 0)
-                       return error;
-               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
-                       __func__, error);
-               return -EPROTO;
-       }
-
-       num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
-
        error = input_ff_create(dev, num_slots);
 
        if (error) {
                hid_err(dev, "Failed to create FF device!\n");
                return error;
        }
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       /*
+        * Create a copy of passed data, so we can transfer memory
+        * ownership to FF core
+        */
+       data = kmemdup(data, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
        data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
@@ -2145,10 +2147,7 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        }
 
        data->hidpp = hidpp;
-       data->feature_index = feature_index;
        data->version = version;
-       data->slot_autocenter = 0;
-       data->num_effects = num_slots;
        for (j = 0; j < num_slots; j++)
                data->effect_ids[j] = -1;
 
@@ -2162,68 +2161,20 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        ff->set_autocenter = hidpp_ff_set_autocenter;
        ff->destroy = hidpp_ff_destroy;
 
-
-       /* reset all forces */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_RESET_ALL, NULL, 0, &response);
-
-       /* Read current Range */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_APERTURE, NULL, 0, &response);
-       if (error)
-               hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
-       data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
-
        /* Create sysfs interface */
        error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
        if (error)
                hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
 
-       /* Read the current gain values */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
-       if (error)
-               hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
-       data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
-       /* ignore boost value at response.fap.params[2] */
-
        /* init the hardware command queue */
        atomic_set(&data->workqueue_size, 0);
 
-       /* initialize with zero autocenter to get wheel in usable state */
-       hidpp_ff_set_autocenter(dev, 0);
-
        hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
                 version);
 
        return 0;
 }
 
-static int hidpp_ff_deinit(struct hid_device *hid)
-{
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
-       struct hidpp_ff_private_data *data;
-
-       if (!dev) {
-               hid_err(hid, "Struct input_dev not found!\n");
-               return -EINVAL;
-       }
-
-       hid_info(hid, "Unloading HID++ force feedback.\n");
-       data = dev->ff->private;
-       if (!data) {
-               hid_err(hid, "Private data not found!\n");
-               return -EINVAL;
-       }
-
-       destroy_workqueue(data->wq);
-       device_remove_file(&hid->dev, &dev_attr_range);
-
-       return 0;
-}
-
-
 /* ************************************************************************** */
 /*                                                                            */
 /* Device Support                                                             */
@@ -2725,24 +2676,93 @@ static int k400_connect(struct hid_device *hdev, bool connected)
 
 #define HIDPP_PAGE_G920_FORCE_FEEDBACK                 0x8123
 
-static int g920_get_config(struct hidpp_device *hidpp)
+static int g920_ff_set_autocenter(struct hidpp_device *hidpp,
+                                 struct hidpp_ff_private_data *data)
 {
+       struct hidpp_report response;
+       u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = {
+               [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART,
+       };
+       int ret;
+
+       /* initialize with zero autocenter to get wheel in usable state */
+
+       dbg_hid("Setting autocenter to 0.\n");
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_DOWNLOAD_EFFECT,
+                                         params, ARRAY_SIZE(params),
+                                         &response);
+       if (ret)
+               hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n");
+       else
+               data->slot_autocenter = response.fap.params[0];
+
+       return ret;
+}
+
+static int g920_get_config(struct hidpp_device *hidpp,
+                          struct hidpp_ff_private_data *data)
+{
+       struct hidpp_report response;
        u8 feature_type;
-       u8 feature_index;
        int ret;
 
+       memset(data, 0, sizeof(*data));
+
        /* Find feature and store for later use */
        ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
-               &feature_index, &feature_type);
+                                    &data->feature_index, &feature_type);
        if (ret)
                return ret;
 
-       ret = hidpp_ff_init(hidpp, feature_index);
+       /* Read number of slots available in device */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_INFO,
+                                         NULL, 0,
+                                         &response);
+       if (ret) {
+               if (ret < 0)
+                       return ret;
+               hid_err(hidpp->hid_dev,
+                       "%s: received protocol error 0x%02x\n", __func__, ret);
+               return -EPROTO;
+       }
+
+       data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+       /* reset all forces */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_RESET_ALL,
+                                         NULL, 0,
+                                         &response);
        if (ret)
-               hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
-                               ret);
+               hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n");
 
-       return 0;
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_APERTURE,
+                                         NULL, 0,
+                                         &response);
+       if (ret) {
+               hid_warn(hidpp->hid_dev,
+                        "Failed to read range from device!\n");
+       }
+       data->range = ret ?
+               900 : get_unaligned_be16(&response.fap.params[0]);
+
+       /* Read the current gain values */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_GLOBAL_GAINS,
+                                         NULL, 0,
+                                         &response);
+       if (ret)
+               hid_warn(hidpp->hid_dev,
+                        "Failed to read gain values from device!\n");
+       data->gain = ret ?
+               0xffff : get_unaligned_be16(&response.fap.params[0]);
+
+       /* ignore boost value at response.fap.params[2] */
+
+       return g920_ff_set_autocenter(hidpp, data);
 }
 
 /* -------------------------------------------------------------------------- */
@@ -3458,34 +3478,45 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
        return report->field[0]->report_count + 1;
 }
 
-static bool hidpp_validate_report(struct hid_device *hdev, int id,
-                                 int expected_length, bool optional)
+static bool hidpp_validate_device(struct hid_device *hdev)
 {
-       int report_length;
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+       int id, report_length, supported_reports = 0;
 
-       if (id >= HID_MAX_IDS || id < 0) {
-               hid_err(hdev, "invalid HID report id %u\n", id);
-               return false;
+       id = REPORT_ID_HIDPP_SHORT;
+       report_length = hidpp_get_report_length(hdev, id);
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_SHORT_LENGTH)
+                       goto bad_device;
+
+               supported_reports++;
        }
 
+       id = REPORT_ID_HIDPP_LONG;
        report_length = hidpp_get_report_length(hdev, id);
-       if (!report_length)
-               return optional;
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_LONG_LENGTH)
+                       goto bad_device;
 
-       if (report_length < expected_length) {
-               hid_warn(hdev, "not enough values in hidpp report %d\n", id);
-               return false;
+               supported_reports++;
        }
 
-       return true;
-}
+       id = REPORT_ID_HIDPP_VERY_LONG;
+       report_length = hidpp_get_report_length(hdev, id);
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_LONG_LENGTH ||
+                   report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+                       goto bad_device;
 
-static bool hidpp_validate_device(struct hid_device *hdev)
-{
-       return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
-                                    HIDPP_REPORT_SHORT_LENGTH, false) &&
-              hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
-                                    HIDPP_REPORT_LONG_LENGTH, true);
+               supported_reports++;
+               hidpp->very_long_report_length = report_length;
+       }
+
+       return supported_reports;
+
+bad_device:
+       hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+       return false;
 }
 
 static bool hidpp_application_equals(struct hid_device *hdev,
@@ -3505,6 +3536,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        bool connected;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
+       struct hidpp_ff_private_data data;
 
        /* report_fixup needs drvdata to be set before we call hid_parse */
        hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
@@ -3531,11 +3563,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        }
 
-       hidpp->very_long_report_length =
-               hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
-       if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
-               hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
-
        if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
                hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
 
@@ -3614,7 +3641,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                if (ret)
                        goto hid_hw_init_fail;
        } else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
-               ret = g920_get_config(hidpp);
+               ret = g920_get_config(hidpp, &data);
                if (ret)
                        goto hid_hw_init_fail;
        }
@@ -3636,6 +3663,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto hid_hw_start_fail;
        }
 
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               ret = hidpp_ff_init(hidpp, &data);
+               if (ret)
+                       hid_warn(hidpp->hid_dev,
+                    "Unable to initialize force feedback support, errno %d\n",
+                                ret);
+       }
+
        return ret;
 
 hid_hw_init_fail:
@@ -3658,9 +3693,6 @@ static void hidpp_remove(struct hid_device *hdev)
 
        sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
 
-       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
-               hidpp_ff_deinit(hdev);
-
        hid_hw_stop(hdev);
        cancel_work_sync(&hidpp->work);
        mutex_destroy(&hidpp->send_mutex);
index 2cf8385..2d8b589 100644 (file)
@@ -328,11 +328,17 @@ static int ms_play_effect(struct input_dev *dev, void *data,
 
 static int ms_init_ff(struct hid_device *hdev)
 {
-       struct hid_input *hidinput = list_entry(hdev->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
        struct ms_data *ms = hid_get_drvdata(hdev);
 
+       if (list_empty(&hdev->inputs)) {
+               hid_err(hdev, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
+
        if (!(ms->quirks & MS_QUIRK_FF))
                return 0;
 
index 5a3b3d9..2666af0 100644 (file)
@@ -516,7 +516,7 @@ static void pcmidi_setup_extra_keys(
                MY PICTURES =>  KEY_WORDPROCESSOR
                MY MUSIC=>      KEY_SPREADSHEET
        */
-       unsigned int keys[] = {
+       static const unsigned int keys[] = {
                KEY_FN,
                KEY_MESSENGER, KEY_CALENDAR,
                KEY_ADDRESSBOOK, KEY_DOCUMENTS,
@@ -532,7 +532,7 @@ static void pcmidi_setup_extra_keys(
                0
        };
 
-       unsigned int *pkeys = &keys[0];
+       const unsigned int *pkeys = &keys[0];
        unsigned short i;
 
        if (pm->ifnum != 1)  /* only set up ONCE for interace 1 */
index 73c0f7a..4c6ed6e 100644 (file)
@@ -2254,9 +2254,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
 
 static int sony_init_ff(struct sony_sc *sc)
 {
-       struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
+
+       if (list_empty(&sc->hdev->inputs)) {
+               hid_err(sc->hdev, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
 
        input_set_capability(input_dev, EV_FF, FF_RUMBLE);
        return input_ff_create_memless(input_dev, NULL, sony_play_effect);
index bdfc5ff..90acef3 100644 (file)
@@ -124,12 +124,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
        struct tmff_device *tmff;
        struct hid_report *report;
        struct list_head *report_list;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                                       struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
+
        tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
        if (!tmff)
                return -ENOMEM;
index f90959e..3abaca0 100644 (file)
@@ -54,11 +54,17 @@ static int zpff_init(struct hid_device *hid)
 {
        struct zpff_device *zpff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        int i, error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        for (i = 0; i < 4; i++) {
                report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
                if (!report)
index 2a7c6e3..04c0881 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pm.h>
-#include <linux/pm_runtime.h>
 #include <linux/device.h>
 #include <linux/wait.h>
 #include <linux/err.h>
@@ -48,8 +47,6 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
-#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP                BIT(3)
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 
 /* flags */
@@ -172,14 +169,7 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, HID_ANY_ID,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
-       { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
-               I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
-       { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
-       { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { 0, 0 }
@@ -397,7 +387,6 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
 {
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        int ret;
-       unsigned long now, delay;
 
        i2c_hid_dbg(ihid, "%s\n", __func__);
 
@@ -415,22 +404,9 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
                        goto set_pwr_exit;
        }
 
-       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
-           power_state == I2C_HID_PWR_ON) {
-               now = jiffies;
-               if (time_after(ihid->sleep_delay, now)) {
-                       delay = jiffies_to_usecs(ihid->sleep_delay - now);
-                       usleep_range(delay, delay + 1);
-               }
-       }
-
        ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
                0, NULL, 0, NULL, 0);
 
-       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
-           power_state == I2C_HID_PWR_SLEEP)
-               ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
-
        if (ret)
                dev_err(&client->dev, "failed to change power setting.\n");
 
@@ -471,8 +447,12 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        if (ret) {
                dev_err(&client->dev, "failed to reset device.\n");
                i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+               goto out_unlock;
        }
 
+       /* At least some SIS devices need this after reset */
+       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+
 out_unlock:
        mutex_unlock(&ihid->reset_lock);
        return ret;
@@ -791,11 +771,6 @@ static int i2c_hid_open(struct hid_device *hid)
 {
        struct i2c_client *client = hid->driver_data;
        struct i2c_hid *ihid = i2c_get_clientdata(client);
-       int ret = 0;
-
-       ret = pm_runtime_get_sync(&client->dev);
-       if (ret < 0)
-               return ret;
 
        set_bit(I2C_HID_STARTED, &ihid->flags);
        return 0;
@@ -807,27 +782,6 @@ static void i2c_hid_close(struct hid_device *hid)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
 
        clear_bit(I2C_HID_STARTED, &ihid->flags);
-
-       /* Save some power */
-       pm_runtime_put(&client->dev);
-}
-
-static int i2c_hid_power(struct hid_device *hid, int lvl)
-{
-       struct i2c_client *client = hid->driver_data;
-       struct i2c_hid *ihid = i2c_get_clientdata(client);
-
-       i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
-
-       switch (lvl) {
-       case PM_HINT_FULLON:
-               pm_runtime_get_sync(&client->dev);
-               break;
-       case PM_HINT_NORMAL:
-               pm_runtime_put(&client->dev);
-               break;
-       }
-       return 0;
 }
 
 struct hid_ll_driver i2c_hid_ll_driver = {
@@ -836,7 +790,6 @@ struct hid_ll_driver i2c_hid_ll_driver = {
        .stop = i2c_hid_stop,
        .open = i2c_hid_open,
        .close = i2c_hid_close,
-       .power = i2c_hid_power,
        .output_report = i2c_hid_output_report,
        .raw_request = i2c_hid_raw_request,
 };
@@ -1104,9 +1057,6 @@ static int i2c_hid_probe(struct i2c_client *client,
 
        i2c_hid_acpi_fix_up_power(&client->dev);
 
-       pm_runtime_get_noresume(&client->dev);
-       pm_runtime_set_active(&client->dev);
-       pm_runtime_enable(&client->dev);
        device_enable_async_suspend(&client->dev);
 
        /* Make sure there is something at this address */
@@ -1114,16 +1064,16 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0) {
                dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
                ret = -ENXIO;
-               goto err_pm;
+               goto err_regulator;
        }
 
        ret = i2c_hid_fetch_hid_descriptor(ihid);
        if (ret < 0)
-               goto err_pm;
+               goto err_regulator;
 
        ret = i2c_hid_init_irq(client);
        if (ret < 0)
-               goto err_pm;
+               goto err_regulator;
 
        hid = hid_allocate_device();
        if (IS_ERR(hid)) {
@@ -1154,9 +1104,6 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
-               pm_runtime_put(&client->dev);
-
        return 0;
 
 err_mem_free:
@@ -1165,10 +1112,6 @@ err_mem_free:
 err_irq:
        free_irq(client->irq, ihid);
 
-err_pm:
-       pm_runtime_put_noidle(&client->dev);
-       pm_runtime_disable(&client->dev);
-
 err_regulator:
        regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
                               ihid->pdata.supplies);
@@ -1181,12 +1124,6 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
-               pm_runtime_get_sync(&client->dev);
-       pm_runtime_disable(&client->dev);
-       pm_runtime_set_suspended(&client->dev);
-       pm_runtime_put_noidle(&client->dev);
-
        hid = ihid->hid;
        hid_destroy_device(hid);
 
@@ -1219,25 +1156,15 @@ static int i2c_hid_suspend(struct device *dev)
        int wake_status;
 
        if (hid->driver && hid->driver->suspend) {
-               /*
-                * Wake up the device so that IO issues in
-                * HID driver's suspend code can succeed.
-                */
-               ret = pm_runtime_resume(dev);
-               if (ret < 0)
-                       return ret;
-
                ret = hid->driver->suspend(hid, PMSG_SUSPEND);
                if (ret < 0)
                        return ret;
        }
 
-       if (!pm_runtime_suspended(dev)) {
-               /* Save some power */
-               i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+       /* Save some power */
+       i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
 
-               disable_irq(client->irq);
-       }
+       disable_irq(client->irq);
 
        if (device_may_wakeup(&client->dev)) {
                wake_status = enable_irq_wake(client->irq);
@@ -1279,11 +1206,6 @@ static int i2c_hid_resume(struct device *dev)
                                wake_status);
        }
 
-       /* We'll resume to full power */
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-
        enable_irq(client->irq);
 
        /* Instead of resetting device, simply powers the device on. This
@@ -1304,30 +1226,8 @@ static int i2c_hid_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
-static int i2c_hid_runtime_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-       disable_irq(client->irq);
-       return 0;
-}
-
-static int i2c_hid_runtime_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       enable_irq(client->irq);
-       i2c_hid_set_power(client, I2C_HID_PWR_ON);
-       return 0;
-}
-#endif
-
 static const struct dev_pm_ops i2c_hid_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
-       SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
-                          NULL)
 };
 
 static const struct i2c_device_id i2c_hid_id_table[] = {
index 75078c8..d31ea82 100644 (file)
@@ -323,6 +323,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
                .driver_data = (void *)&sipodev_desc
        },
        {
+               /*
+                * There are at least 2 Primebook C11B versions, the older
+                * version has a product-name of "Primebook C11B", and a
+                * bios version / release / firmware revision of:
+                * V2.1.2 / 05/03/2018 / 18.2
+                * The new version has "PRIMEBOOK C11B" as product-name and a
+                * bios version / release / firmware revision of:
+                * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
+                * Only the older version needs this quirk, note the newer
+                * version will not match as it has a different product-name.
+                */
+               .ident = "Trekstor Primebook C11B",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
+       {
                .ident = "Direkt-Tek DTLAPY116-2",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
index 1b0a0cc..513d7a4 100644 (file)
@@ -84,7 +84,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
        return  0;
 out:
        dev_err(&cl->device->dev, "error in allocating Tx pool\n");
-       ishtp_cl_free_rx_ring(cl);
+       ishtp_cl_free_tx_ring(cl);
        return  -ENOMEM;
 }
 
index 4a7f8d3..203d27d 100644 (file)
@@ -202,6 +202,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac,
        }
 }
 
+/*
+ * Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes
+ * the normally-helpful work of 'hid_snto32' for fields that use signed
+ * ranges for questionable reasons.
+ */
+static inline __u32 wacom_s32tou(s32 value, __u8 n)
+{
+       switch (n) {
+       case 8:  return ((__u8)value);
+       case 16: return ((__u16)value);
+       case 32: return ((__u32)value);
+       }
+       return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
+}
+
 extern const struct hid_device_id wacom_ids[];
 
 void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
index 2b0a5b8..ccb7452 100644 (file)
@@ -2303,7 +2303,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
        case HID_DG_TOOLSERIALNUMBER:
                if (value) {
                        wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
-                       wacom_wac->serial[0] |= (__u32)value;
+                       wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size);
                }
                return;
        case HID_DG_TWIST:
@@ -2319,15 +2319,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                return;
        case WACOM_HID_WD_SERIALHI:
                if (value) {
+                       __u32 raw_value = wacom_s32tou(value, field->report_size);
+
                        wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
-                       wacom_wac->serial[0] |= ((__u64)value) << 32;
+                       wacom_wac->serial[0] |= ((__u64)raw_value) << 32;
                        /*
                         * Non-USI EMR devices may contain additional tool type
                         * information here. See WACOM_HID_WD_TOOLTYPE case for
                         * more details.
                         */
                        if (value >> 20 == 1) {
-                               wacom_wac->id[0] |= value & 0xFFFFF;
+                               wacom_wac->id[0] |= raw_value & 0xFFFFF;
                        }
                }
                return;
@@ -2339,7 +2341,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                 * bitwise OR so the complete value can be built
                 * up over time :(
                 */
-               wacom_wac->id[0] |= value;
+               wacom_wac->id[0] |= wacom_s32tou(value, field->report_size);
                return;
        case WACOM_HID_WD_OFFSETLEFT:
                if (features->offset_left && value != features->offset_left)
index 0037e2b..8a51dcf 100644 (file)
@@ -170,7 +170,7 @@ static inline int ina3221_wait_for_data(struct ina3221_data *ina)
 
        /* Polling the CVRF bit to make sure read data is ready */
        return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
-                                             cvrf, cvrf, wait, 100000);
+                                             cvrf, cvrf, wait, wait * 2);
 }
 
 static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
index b26419d..281c81e 100644 (file)
 #define FANCTL1_FMR_REG                0x00    /* Bank 3; 1 reg per channel */
 #define FANCTL1_OUT_REG                0x10    /* Bank 3; 1 reg per channel */
 
+#define VOLT_MONITOR_MODE      0x0
+#define THERMAL_DIODE_MODE     0x1
+#define THERMISTOR_MODE                0x3
+
 #define ENABLE_TSI     BIT(1)
 
 static const unsigned short normal_i2c[] = {
@@ -935,11 +939,16 @@ static int nct7904_probe(struct i2c_client *client,
        for (i = 0; i < 4; i++) {
                val = (ret >> (i * 2)) & 0x03;
                bit = (1 << i);
-               if (val == 0) {
+               if (val == VOLT_MONITOR_MODE) {
                        data->tcpu_mask &= ~bit;
+               } else if (val == THERMAL_DIODE_MODE && i < 2) {
+                       data->temp_mode |= bit;
+                       data->vsen_mask &= ~(0x06 << (i * 2));
+               } else if (val == THERMISTOR_MODE) {
+                       data->vsen_mask &= ~(0x02 << (i * 2));
                } else {
-                       if (val == 0x1 || val == 0x2)
-                               data->temp_mode |= bit;
+                       /* Reserved */
+                       data->tcpu_mask &= ~bit;
                        data->vsen_mask &= ~(0x06 << (i * 2));
                }
        }
index fa9d34a..f72803a 100644 (file)
@@ -626,6 +626,9 @@ static void intel_th_gth_switch(struct intel_th_device *thdev,
        if (!count)
                dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
 
+       /* De-assert the trigger */
+       iowrite32(0, gth->base + REG_CTS_CTL);
+
        intel_th_gth_stop(gth, output, false);
        intel_th_gth_start(gth, output);
 }
index fc9f15f..6d240df 100644 (file)
@@ -164,7 +164,7 @@ struct msc {
 };
 
 static LIST_HEAD(msu_buffer_list);
-static struct mutex msu_buffer_mutex;
+static DEFINE_MUTEX(msu_buffer_mutex);
 
 /**
  * struct msu_buffer_entry - internal MSU buffer bookkeeping
@@ -327,7 +327,7 @@ static size_t msc_win_total_sz(struct msc_window *win)
                struct msc_block_desc *bdesc = sg_virt(sg);
 
                if (msc_block_wrapped(bdesc))
-                       return win->nr_blocks << PAGE_SHIFT;
+                       return (size_t)win->nr_blocks << PAGE_SHIFT;
 
                size += msc_total_sz(bdesc);
                if (msc_block_last_written(bdesc))
@@ -1848,9 +1848,14 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
                len = cp - buf;
 
        mode = kstrndup(buf, len, GFP_KERNEL);
+       if (!mode)
+               return -ENOMEM;
+
        i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
-       if (i >= 0)
+       if (i >= 0) {
+               kfree(mode);
                goto found;
+       }
 
        /* Buffer sinks only work with a usable IRQ */
        if (!msc->do_irq) {
index 91dfeba..03ca5b1 100644 (file)
@@ -200,6 +200,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
        {
+               /* Comet Lake PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
+       {
                /* Ice Lake NNPI */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -209,6 +214,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Jasper Lake PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index fa66951..7b098ff 100644 (file)
 #define ASPEED_I2CD_S_TX_CMD                           BIT(2)
 #define ASPEED_I2CD_M_TX_CMD                           BIT(1)
 #define ASPEED_I2CD_M_START_CMD                                BIT(0)
+#define ASPEED_I2CD_MASTER_CMDS_MASK                                          \
+               (ASPEED_I2CD_M_STOP_CMD |                                      \
+                ASPEED_I2CD_M_S_RX_CMD_LAST |                                 \
+                ASPEED_I2CD_M_RX_CMD |                                        \
+                ASPEED_I2CD_M_TX_CMD |                                        \
+                ASPEED_I2CD_M_START_CMD)
 
 /* 0x18 : I2CD Slave Device Address Register   */
 #define ASPEED_I2CD_DEV_ADDR_MASK                      GENMASK(6, 0)
@@ -336,18 +342,19 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
        struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
        u8 slave_addr = i2c_8bit_addr_from_msg(msg);
 
-       bus->master_state = ASPEED_I2C_MASTER_START;
-
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        /*
         * If it's requested in the middle of a slave session, set the master
         * state to 'pending' then H/W will continue handling this master
         * command when the bus comes back to the idle state.
         */
-       if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
+       if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
                bus->master_state = ASPEED_I2C_MASTER_PENDING;
+               return;
+       }
 #endif /* CONFIG_I2C_SLAVE */
 
+       bus->master_state = ASPEED_I2C_MASTER_START;
        bus->buf_index = 0;
 
        if (msg->flags & I2C_M_RD) {
@@ -422,20 +429,6 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
                }
        }
 
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
-       /*
-        * A pending master command will be started by H/W when the bus comes
-        * back to idle state after completing a slave operation so change the
-        * master state from 'pending' to 'start' at here if slave is inactive.
-        */
-       if (bus->master_state == ASPEED_I2C_MASTER_PENDING) {
-               if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
-                       goto out_no_complete;
-
-               bus->master_state = ASPEED_I2C_MASTER_START;
-       }
-#endif /* CONFIG_I2C_SLAVE */
-
        /* Master is not currently active, irq was for someone else. */
        if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
            bus->master_state == ASPEED_I2C_MASTER_PENDING)
@@ -462,11 +455,15 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
                /*
                 * If a peer master starts a xfer immediately after it queues a
-                * master command, change its state to 'pending' then H/W will
-                * continue the queued master xfer just after completing the
-                * slave mode session.
+                * master command, clear the queued master command and change
+                * its state to 'pending'. To simplify handling of pending
+                * cases, it uses S/W solution instead of H/W command queue
+                * handling.
                 */
                if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
+                       writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
+                               ~ASPEED_I2CD_MASTER_CMDS_MASK,
+                              bus->base + ASPEED_I2C_CMD_REG);
                        bus->master_state = ASPEED_I2C_MASTER_PENDING;
                        dev_dbg(bus->dev,
                                "master goes pending due to a slave start\n");
@@ -629,6 +626,14 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
                        irq_handled |= aspeed_i2c_master_irq(bus,
                                                             irq_remaining);
        }
+
+       /*
+        * Start a pending master command at here if a slave operation is
+        * completed.
+        */
+       if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
+           bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+               aspeed_i2c_do_start(bus);
 #else
        irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
 #endif /* CONFIG_I2C_SLAVE */
@@ -691,6 +696,15 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
                     ASPEED_I2CD_BUS_BUSY_STS))
                        aspeed_i2c_recover_bus(bus);
 
+               /*
+                * If timed out and the state is still pending, drop the pending
+                * master command.
+                */
+               spin_lock_irqsave(&bus->lock, flags);
+               if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
+                       bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+               spin_unlock_irqrestore(&bus->lock, flags);
+
                return -ETIMEDOUT;
        }
 
index 29eae1b..2152ec5 100644 (file)
@@ -875,7 +875,7 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
 
 static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
 {
-       if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+       if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN))
                return I2C_FUNC_I2C |
                        (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
        else
index d36cf08..b24e7b9 100644 (file)
@@ -305,7 +305,7 @@ struct stm32f7_i2c_dev {
        struct regmap *regmap;
 };
 
-/**
+/*
  * All these values are coming from I2C Specification, Version 6.0, 4th of
  * April 2014.
  *
@@ -1192,6 +1192,8 @@ static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev)
                        STM32F7_I2C_CR1_TXIE;
                stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
 
+               /* Write 1st data byte */
+               writel_relaxed(value, base + STM32F7_I2C_TXDR);
        } else {
                /* Notify i2c slave that new write transfer is starting */
                i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
@@ -1501,7 +1503,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        void __iomem *base = i2c_dev->base;
        struct device *dev = i2c_dev->dev;
        struct stm32_i2c_dma *dma = i2c_dev->dma;
-       u32 mask, status;
+       u32 status;
 
        status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
 
@@ -1526,12 +1528,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
                f7_msg->result = -EINVAL;
        }
 
-       /* Disable interrupts */
-       if (stm32f7_i2c_is_slave_registered(i2c_dev))
-               mask = STM32F7_I2C_XFER_IRQ_MASK;
-       else
-               mask = STM32F7_I2C_ALL_IRQ_MASK;
-       stm32f7_i2c_disable_irq(i2c_dev, mask);
+       if (!i2c_dev->slave_running) {
+               u32 mask;
+               /* Disable interrupts */
+               if (stm32f7_i2c_is_slave_registered(i2c_dev))
+                       mask = STM32F7_I2C_XFER_IRQ_MASK;
+               else
+                       mask = STM32F7_I2C_ALL_IRQ_MASK;
+               stm32f7_i2c_disable_irq(i2c_dev, mask);
+       }
 
        /* Disable dma */
        if (i2c_dev->use_dma) {
index 9cb2aa1..62a1c92 100644 (file)
@@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
        int index;
        u32 speed;
        u32 min_speed;
+       u32 force_speed;
 };
 
 /**
@@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches,
        return acpi_match_device(matches, &client->dev);
 }
 
+static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+       /*
+        * These Silead touchscreen controllers only work at 400KHz, for
+        * some reason they do not work at 100KHz. On some devices the ACPI
+        * tables list another device at their bus as only being capable
+        * of 100KHz, testing has shown that these other devices work fine
+        * at 400KHz (as can be expected of any recent i2c hw) so we force
+        * the speed of the bus to 400 KHz if a Silead device is present.
+        */
+       { "MSSL1680", 0 },
+       {}
+};
+
 static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
                                           void *data, void **return_value)
 {
@@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
        if (lookup->speed <= lookup->min_speed)
                lookup->min_speed = lookup->speed;
 
+       if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
+               lookup->force_speed = 400000;
+
        return AE_OK;
 }
 
@@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
                return 0;
        }
 
-       return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
+       if (lookup.force_speed) {
+               if (lookup.force_speed != lookup.min_speed)
+                       dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
+                                lookup.min_speed, lookup.force_speed);
+               return lookup.force_speed;
+       } else if (lookup.min_speed != UINT_MAX) {
+               return lookup.min_speed;
+       } else {
+               return 0;
+       }
 }
 EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
 
index 6f632d5..7eb4199 100644 (file)
@@ -245,14 +245,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
                }
 
                client = of_i2c_register_device(adap, rd->dn);
-               put_device(&adap->dev);
-
                if (IS_ERR(client)) {
                        dev_err(&adap->dev, "failed to create client for '%pOF'\n",
                                 rd->dn);
+                       put_device(&adap->dev);
                        of_node_clear_flag(rd->dn, OF_POPULATED);
                        return notifier_from_errno(PTR_ERR(client));
                }
+               put_device(&adap->dev);
                break;
        case OF_RECONFIG_CHANGE_REMOVE:
                /* already depopulated? */
index 663f8a5..73aee59 100644 (file)
@@ -1399,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
        cookie = dmaengine_submit(desc);
        ret = dma_submit_error(cookie);
        if (ret) {
-               dmaengine_terminate_all(adc->dma_chan);
+               dmaengine_terminate_sync(adc->dma_chan);
                return ret;
        }
 
@@ -1477,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
                stm32_adc_conv_irq_disable(adc);
 
        if (adc->dma_chan)
-               dmaengine_terminate_all(adc->dma_chan);
+               dmaengine_terminate_sync(adc->dma_chan);
 
        if (stm32_adc_set_trig(indio_dev, NULL))
                dev_err(&indio_dev->dev, "Can't clear trigger\n");
index b99d738..8743b2f 100644 (file)
@@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
        struct adis16480 *st = iio_priv(indio_dev);
        unsigned int t, reg;
 
+       if (val < 0 || val2 < 0)
+               return -EINVAL;
+
        t =  val * 1000 + val2 / 1000;
-       if (t <= 0)
+       if (t == 0)
                return -EINVAL;
 
        /*
index b17f060..868281b 100644 (file)
@@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .name = "MPU6050",
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
+               .fifo_size = 1024,
        },
        {
                .whoami = INV_MPU6500_WHOAMI_VALUE,
                .name = "MPU6500",
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
+               .fifo_size = 512,
        },
        {
                .whoami = INV_MPU6515_WHOAMI_VALUE,
                .name = "MPU6515",
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
+               .fifo_size = 512,
        },
        {
                .whoami = INV_MPU6000_WHOAMI_VALUE,
                .name = "MPU6000",
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
+               .fifo_size = 1024,
        },
        {
                .whoami = INV_MPU9150_WHOAMI_VALUE,
                .name = "MPU9150",
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
+               .fifo_size = 1024,
        },
        {
                .whoami = INV_MPU9250_WHOAMI_VALUE,
                .name = "MPU9250",
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
+               .fifo_size = 512,
        },
        {
                .whoami = INV_MPU9255_WHOAMI_VALUE,
                .name = "MPU9255",
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
+               .fifo_size = 512,
        },
        {
                .whoami = INV_ICM20608_WHOAMI_VALUE,
                .name = "ICM20608",
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
+               .fifo_size = 512,
        },
        {
                .whoami = INV_ICM20602_WHOAMI_VALUE,
                .name = "ICM20602",
                .reg = &reg_set_icm20602,
                .config = &chip_config_6050,
+               .fifo_size = 1008,
        },
 };
 
index db1c690..5123567 100644 (file)
@@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config {
  *  @name:      name of the chip.
  *  @reg:   register map of the chip.
  *  @config:    configuration of the chip.
+ *  @fifo_size:        size of the FIFO in bytes.
  */
 struct inv_mpu6050_hw {
        u8 whoami;
        u8 *name;
        const struct inv_mpu6050_reg_map *reg;
        const struct inv_mpu6050_chip_config *config;
+       size_t fifo_size;
 };
 
 /*
index 5f9a5de..72d8c57 100644 (file)
@@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
                        "failed to ack interrupt\n");
                goto flush_fifo;
        }
-       /* handle fifo overflow by reseting fifo */
-       if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
-               goto flush_fifo;
        if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
                dev_warn(regmap_get_device(st->map),
                        "spurious interrupt with status 0x%x\n", int_status);
@@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
        if (result)
                goto end_session;
        fifo_count = get_unaligned_be16(&data[0]);
+
+       /*
+        * Handle fifo overflow by resetting fifo.
+        * Reset if there is only 3 data set free remaining to mitigate
+        * possible delay between reading fifo count and fifo data.
+        */
+       nb = 3 * bytes_per_datum;
+       if (fifo_count >= st->hw->fifo_size - nb) {
+               dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
+               goto flush_fifo;
+       }
+
        /* compute and process all complete datum */
        nb = fifo_count / bytes_per_datum;
        inv_mpu6050_update_period(st, pf->timestamp, nb);
index 8b50d56..01eb8cc 100644 (file)
@@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data)
        udelay(data->cfg->trigger_pulse_us);
        gpiod_set_value(data->gpiod_trig, 0);
 
-       /* it cannot take more than 20 ms */
+       /* it should not take more than 20 ms until echo is rising */
        ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
        if (ret < 0) {
                mutex_unlock(&data->lock);
@@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data)
                return -ETIMEDOUT;
        }
 
-       ret = wait_for_completion_killable_timeout(&data->falling, HZ/50);
+       /* it cannot take more than 50 ms until echo is falling */
+       ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
        if (ret < 0) {
                mutex_unlock(&data->lock);
                return ret;
@@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data)
 
        dt_ns = ktime_to_ns(ktime_dt);
        /*
-        * measuring more than 3 meters is beyond the capabilities of
-        * the sensor
+        * measuring more than 6,45 meters is beyond the capabilities of
+        * the supported sensors
         * ==> filter out invalid results for not measuring echos of
         *     another us sensor
         *
         * formula:
-        *         distance       3 m
-        * time = ---------- = --------- = 9404389 ns
-        *          speed       319 m/s
+        *         distance     6,45 * 2 m
+        * time = ---------- = ------------ = 40438871 ns
+        *          speed         319 m/s
         *
         * using a minimum speed at -20 Â°C of 319 m/s
         */
-       if (dt_ns > 9404389)
+       if (dt_ns > 40438871)
                return -EIO;
 
        time_ns = dt_ns;
@@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data)
         *   with Temp in Â°C
         *   and speed in m/s
         *
-        * use 343 m/s as ultrasonic speed at 20 Â°C here in absence of the
+        * use 343,5 m/s as ultrasonic speed at 20 Â°C here in absence of the
         * temperature
         *
         * therefore:
-        *             time     343
-        * distance = ------ * -----
-        *             10^6       2
+        *             time     343,5     time * 106
+        * distance = ------ * ------- = ------------
+        *             10^6         2         617176
         *   with time in ns
         *   and distance in mm (one way)
         *
-        * because we limit to 3 meters the multiplication with 343 just
+        * because we limit to 6,45 meters the multiplication with 106 just
         * fits into 32 bit
         */
-       distance_mm = time_ns * 343 / 2000000;
+       distance_mm = time_ns * 106 / 617176;
 
        return distance_mm;
 }
index 3a8b091..9d07378 100644 (file)
@@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+void rdma_nl_init(void);
 void rdma_nl_exit(void);
 
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
index 2dd2cfe..50a9244 100644 (file)
@@ -2716,6 +2716,8 @@ static int __init ib_core_init(void)
                goto err_comp_unbound;
        }
 
+       rdma_nl_init();
+
        ret = addr_init();
        if (ret) {
                pr_warn("Could't init IB address resolution\n");
index 72141c5..ade7182 100644 (file)
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
 static void destroy_cm_id(struct iw_cm_id *cm_id)
 {
        struct iwcm_id_private *cm_id_priv;
+       struct ib_qp *qp;
        unsigned long flags;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
        set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
+
        switch (cm_id_priv->state) {
        case IW_CM_STATE_LISTEN:
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
                /* Abrupt close of the connection */
-               (void)iwcm_modify_qp_err(cm_id_priv->qp);
+               (void)iwcm_modify_qp_err(qp);
                spin_lock_irqsave(&cm_id_priv->lock, flags);
                break;
        case IW_CM_STATE_IDLE:
@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                BUG();
                break;
        }
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
 
        if (cm_id->mapped) {
                iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
                BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
                cm_id_priv->state = IW_CM_STATE_IDLE;
                spin_lock_irqsave(&cm_id_priv->lock, flags);
-               if (cm_id_priv->qp) {
-                       cm_id->device->ops.iw_rem_ref(qp);
-                       cm_id_priv->qp = NULL;
-               }
+               qp = cm_id_priv->qp;
+               cm_id_priv->qp = NULL;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+               if (qp)
+                       cm_id->device->ops.iw_rem_ref(qp);
                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
                wake_up_all(&cm_id_priv->connect_wait);
        }
@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        struct iwcm_id_private *cm_id_priv;
        int ret;
        unsigned long flags;
-       struct ib_qp *qp;
+       struct ib_qp *qp = NULL;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
                return 0;       /* success */
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
-       if (cm_id_priv->qp) {
-               cm_id->device->ops.iw_rem_ref(qp);
-               cm_id_priv->qp = NULL;
-       }
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
        cm_id_priv->state = IW_CM_STATE_IDLE;
 err:
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id->device->ops.iw_rem_ref(qp);
        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
        wake_up_all(&cm_id_priv->connect_wait);
        return ret;
@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                               struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp = NULL;
        unsigned long flags;
        int ret;
 
@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
        } else {
                /* REJECTED or RESET */
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+               qp = cm_id_priv->qp;
                cm_id_priv->qp = NULL;
                cm_id_priv->state = IW_CM_STATE_IDLE;
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
        ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 
        if (iw_event->private_data_len)
@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
                                  struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp;
        unsigned long flags;
-       int ret = 0;
+       int ret = 0, notify_event = 0;
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
 
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        switch (cm_id_priv->state) {
        case IW_CM_STATE_ESTABLISHED:
        case IW_CM_STATE_CLOSING:
                cm_id_priv->state = IW_CM_STATE_IDLE;
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
-               spin_lock_irqsave(&cm_id_priv->lock, flags);
+               notify_event = 1;
                break;
        case IW_CM_STATE_DESTROYING:
                break;
@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
+       if (notify_event)
+               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
        return ret;
 }
 
index 81dbd5f..8cd31ef 100644 (file)
 #include <linux/module.h>
 #include "core_priv.h"
 
-static DEFINE_MUTEX(rdma_nl_mutex);
 static struct {
-       const struct rdma_nl_cbs   *cb_table;
+       const struct rdma_nl_cbs *cb_table;
+       /* Synchronizes between ongoing netlink commands and netlink client
+        * unregistration.
+        */
+       struct rw_semaphore sem;
 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
 
 bool rdma_nl_chk_listeners(unsigned int group)
@@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
        return (op < max_num_ops[type]) ? true : false;
 }
 
-static bool
-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
 {
        const struct rdma_nl_cbs *cb_table;
 
-       if (!is_nl_msg_valid(type, op))
-               return false;
-
        /*
         * Currently only NLDEV client is supporting netlink commands in
         * non init_net net namespace.
         */
        if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
-               return false;
+               return NULL;
 
-       if (!rdma_nl_types[type].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               request_module("rdma-netlink-subsys-%d", type);
-               mutex_lock(&rdma_nl_mutex);
-       }
+       cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       if (!cb_table) {
+               /*
+                * Didn't get valid reference of the table, attempt module
+                * load once.
+                */
+               up_read(&rdma_nl_types[type].sem);
 
-       cb_table = rdma_nl_types[type].cb_table;
+               request_module("rdma-netlink-subsys-%d", type);
 
+               down_read(&rdma_nl_types[type].sem);
+               cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       }
        if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
-               return false;
-       return true;
+               return NULL;
+       return cb_table;
 }
 
 void rdma_nl_register(unsigned int index,
                      const struct rdma_nl_cbs cb_table[])
 {
-       mutex_lock(&rdma_nl_mutex);
-       if (!is_nl_msg_valid(index, 0)) {
-               /*
-                * All clients are not interesting in success/failure of
-                * this call. They want to see the print to error log and
-                * continue their initialization. Print warning for them,
-                * because it is programmer's error to be here.
-                */
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The not-valid %u index was supplied to RDMA netlink\n",
-                    index);
+       if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+           WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
                return;
-       }
-
-       if (rdma_nl_types[index].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The %u index is already registered in RDMA netlink\n",
-                    index);
-               return;
-       }
 
-       rdma_nl_types[index].cb_table = cb_table;
-       mutex_unlock(&rdma_nl_mutex);
+       /* Pairs with the READ_ONCE in is_nl_valid() */
+       smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
 }
 EXPORT_SYMBOL(rdma_nl_register);
 
 void rdma_nl_unregister(unsigned int index)
 {
-       mutex_lock(&rdma_nl_mutex);
+       down_write(&rdma_nl_types[index].sem);
        rdma_nl_types[index].cb_table = NULL;
-       mutex_unlock(&rdma_nl_mutex);
+       up_write(&rdma_nl_types[index].sem);
 }
 EXPORT_SYMBOL(rdma_nl_unregister);
 
@@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        unsigned int index = RDMA_NL_GET_CLIENT(type);
        unsigned int op = RDMA_NL_GET_OP(type);
        const struct rdma_nl_cbs *cb_table;
+       int err = -EINVAL;
 
-       if (!is_nl_valid(skb, index, op))
+       if (!is_nl_msg_valid(index, op))
                return -EINVAL;
 
-       cb_table = rdma_nl_types[index].cb_table;
+       down_read(&rdma_nl_types[index].sem);
+       cb_table = get_cb_table(skb, index, op);
+       if (!cb_table)
+               goto done;
 
        if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
-           !netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
+           !netlink_capable(skb, CAP_NET_ADMIN)) {
+               err = -EPERM;
+               goto done;
+       }
 
        /*
         * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
@@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
         */
        if (index == RDMA_NL_LS) {
                if (cb_table[op].doit)
-                       return cb_table[op].doit(skb, nlh, extack);
-               return -EINVAL;
+                       err = cb_table[op].doit(skb, nlh, extack);
+               goto done;
        }
        /* FIXME: Convert IWCM to properly handle doit callbacks */
        if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
@@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                        .dump = cb_table[op].dump,
                };
                if (c.dump)
-                       return netlink_dump_start(skb->sk, skb, nlh, &c);
-               return -EINVAL;
+                       err = netlink_dump_start(skb->sk, skb, nlh, &c);
+               goto done;
        }
 
        if (cb_table[op].doit)
-               return cb_table[op].doit(skb, nlh, extack);
-
-       return 0;
+               err = cb_table[op].doit(skb, nlh, extack);
+done:
+       up_read(&rdma_nl_types[index].sem);
+       return err;
 }
 
 /*
@@ -263,9 +256,7 @@ skip:
 
 static void rdma_nl_rcv(struct sk_buff *skb)
 {
-       mutex_lock(&rdma_nl_mutex);
        rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
-       mutex_unlock(&rdma_nl_mutex);
 }
 
 int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
@@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(rdma_nl_multicast);
 
+void rdma_nl_init(void)
+{
+       int idx;
+
+       for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+               init_rwsem(&rdma_nl_types[idx].sem);
+}
+
 void rdma_nl_exit(void)
 {
        int idx;
index 65b3654..c03af08 100644 (file)
@@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
                container_of(res, struct rdma_counter, res);
 
        if (port && port != counter->port)
-               return 0;
+               return -EAGAIN;
 
        /* Dump it even query failed */
        rdma_counter_query_stats(counter);
index 1e5aeb3..63f7f7d 100644 (file)
@@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
 
 struct ib_uverbs_device {
        atomic_t                                refcount;
-       int                                     num_comp_vectors;
+       u32                                     num_comp_vectors;
        struct completion                       comp;
        struct device                           dev;
        /* First group for device attributes, NULL terminated array */
index f974b68..35c2841 100644 (file)
@@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid,
                           void *context)
 {
        struct find_gid_index_context *ctx = context;
+       u16 vlan_id = 0xffff;
+       int ret;
 
        if (ctx->gid_type != gid_attr->gid_type)
                return false;
 
-       if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
-           (is_vlan_dev(gid_attr->ndev) &&
-            vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
+       ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+       if (ret)
                return false;
 
-       return true;
+       return ctx->vlan_id == vlan_id;
 }
 
 static const struct ib_gid_attr *
index e87fc04..347dc24 100644 (file)
@@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
 
        pr_debug("ep %p tid %u\n", ep, ep->hwtid);
-
-       skb_get(skb);
-       rpl = cplhdr(skb);
-       if (!is_t4(adapter_type)) {
-               skb_trim(skb, roundup(sizeof(*rpl5), 16));
-               rpl5 = (void *)rpl;
-               INIT_TP_WR(rpl5, ep->hwtid);
-       } else {
-               skb_trim(skb, sizeof(*rpl));
-               INIT_TP_WR(rpl, ep->hwtid);
-       }
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
-                                                   ep->hwtid));
-
        cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
                      enable_tcp_timestamps && req->tcpopt.tstamp,
                      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
@@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN_V(1);
        }
+
+       skb_get(skb);
+       rpl = cplhdr(skb);
+       if (!is_t4(adapter_type)) {
+               skb_trim(skb, roundup(sizeof(*rpl5), 16));
+               rpl5 = (void *)rpl;
+               INIT_TP_WR(rpl5, ep->hwtid);
+       } else {
+               skb_trim(skb, sizeof(*rpl));
+               INIT_TP_WR(rpl, ep->hwtid);
+       }
+       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+                                                   ep->hwtid));
+
        if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
                opt2 |= T5_OPT_2_VALID_F;
index 71cb952..26b792b 100644 (file)
@@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
                goto bail_dev;
        }
 
-       hfi1_compute_tid_rdma_flow_wt();
        /*
         * These must be called before the driver is registered with
         * the PCI subsystem.
index 61aa550..61362bd 100644 (file)
@@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
        /*
         * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
         */
-       if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+       if (parent &&
+           (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
+            dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
                dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
                dd->link_gen3_capable = 0;
        }
index 513a8aa..1a3c647 100644 (file)
@@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
                if (qp->s_flags & RVT_S_WAIT_RNR)
                        goto bail_stop;
                rdi = ib_to_rvt(qp->ibqp.device);
-               if (qp->s_rnr_retry == 0 &&
-                   !((rdi->post_parms[wqe->wr.opcode].flags &
-                     RVT_OPERATION_IGN_RNR_CNT) &&
-                     qp->s_rnr_retry_cnt == 0)) {
-                       status = IB_WC_RNR_RETRY_EXC_ERR;
-                       goto class_b;
+               if (!(rdi->post_parms[wqe->wr.opcode].flags &
+                      RVT_OPERATION_IGN_RNR_CNT)) {
+                       if (qp->s_rnr_retry == 0) {
+                               status = IB_WC_RNR_RETRY_EXC_ERR;
+                               goto class_b;
+                       }
+                       if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
+                               qp->s_rnr_retry--;
                }
-               if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
-                       qp->s_rnr_retry--;
 
                /*
                 * The last valid PSN is the previous PSN. For TID RDMA WRITE
index 2ed7bfd..c61b602 100644 (file)
@@ -65,6 +65,7 @@
 #define SDMA_DESCQ_CNT 2048
 #define SDMA_DESC_INTR 64
 #define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
 
 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
 module_param(sdma_descq_cnt, uint, S_IRUGO);
@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
        struct sdma_engine *sde;
 
        if (dd->sdma_pad_dma) {
-               dma_free_coherent(&dd->pcidev->dev, 4,
+               dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
                                  (void *)dd->sdma_pad_dma,
                                  dd->sdma_pad_phys);
                dd->sdma_pad_dma = NULL;
@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
        }
 
        /* Allocate memory for pad */
-       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
                                              &dd->sdma_pad_phys, GFP_KERNEL);
        if (!dd->sdma_pad_dma) {
                dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
index b4dcc4d..e53f542 100644 (file)
@@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
  * C - Capcode
  */
 
-static u32 tid_rdma_flow_wt;
-
 static void tid_rdma_trigger_resume(struct work_struct *work);
 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
@@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
                                   struct tid_rdma_flow *flow,
                                   bool fecn);
 
+static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
+{
+       if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+               priv->r_tid_ack = priv->r_tid_tail;
+}
+
+static void tid_rdma_schedule_ack(struct rvt_qp *qp)
+{
+       struct hfi1_qp_priv *priv = qp->priv;
+
+       priv->s_flags |= RVT_S_ACK_PENDING;
+       hfi1_schedule_tid_send(qp);
+}
+
+static void tid_rdma_trigger_ack(struct rvt_qp *qp)
+{
+       validate_r_tid_ack(qp->priv);
+       tid_rdma_schedule_ack(qp);
+}
+
 static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
 {
        return
@@ -2736,11 +2754,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
                                if (diff > 0) {
-                                       if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
-                                               restart_tid_rdma_read_req(rcd,
-                                                                         qp,
-                                                                         wqe);
-
                                        /* Drop the packet.*/
                                        goto s_unlock;
                                } else if (diff < 0) {
@@ -3010,10 +3023,7 @@ nak_psn:
                qpriv->s_nak_state = IB_NAK_PSN_ERROR;
                /* We are NAK'ing the next expected PSN */
                qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
-               qpriv->s_flags |= RVT_S_ACK_PENDING;
-               if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
-                       qpriv->r_tid_ack = qpriv->r_tid_tail;
-               hfi1_schedule_tid_send(qp);
+               tid_rdma_trigger_ack(qp);
        }
        goto unlock;
 }
@@ -3376,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
        return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
 }
 
-void hfi1_compute_tid_rdma_flow_wt(void)
+static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
 {
        /*
         * Heuristic for computing the RNR timeout when waiting on the flow
         * queue. Rather than a computationaly expensive exact estimate of when
         * a flow will be available, we assume that if a QP is at position N in
         * the flow queue it has to wait approximately (N + 1) * (number of
-        * segments between two sync points), assuming PMTU of 4K. The rationale
-        * for this is that flows are released and recycled at each sync point.
+        * segments between two sync points). The rationale for this is that
+        * flows are released and recycled at each sync point.
         */
-       tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
-               TID_RDMA_MAX_SEGMENT_SIZE;
+       return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
 }
 
 static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
@@ -3510,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
                if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
                        ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
                        if (ret) {
-                               to_seg = tid_rdma_flow_wt *
+                               to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
                                        position_in_queue(qpriv,
                                                          &rcd->flow_queue);
                                break;
@@ -3531,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
                /*
                 * If overtaking req->acked_tail, send an RNR NAK. Because the
                 * QP is not queued in this case, and the issue can only be
-                * caused due a delay in scheduling the second leg which we
+                * caused by a delay in scheduling the second leg which we
                 * cannot estimate, we use a rather arbitrary RNR timeout of
                 * (MAX_FLOWS / 2) segments
                 */
@@ -3539,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
                                MAX_FLOWS)) {
                        ret = -EAGAIN;
                        to_seg = MAX_FLOWS >> 1;
-                       qpriv->s_flags |= RVT_S_ACK_PENDING;
-                       hfi1_schedule_tid_send(qp);
+                       tid_rdma_trigger_ack(qp);
                        break;
                }
 
@@ -4340,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
        trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
                                          req);
        trace_hfi1_tid_write_rsp_rcv_data(qp);
-       if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
-               priv->r_tid_ack = priv->r_tid_tail;
+       validate_r_tid_ack(priv);
 
        if (opcode == TID_OP(WRITE_DATA_LAST)) {
                release_rdma_sge_mr(e);
@@ -4380,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
        }
 
 done:
-       priv->s_flags |= RVT_S_ACK_PENDING;
-       hfi1_schedule_tid_send(qp);
+       tid_rdma_schedule_ack(qp);
 exit:
        priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
        if (fecn)
@@ -4393,10 +4399,7 @@ send_nak:
        if (!priv->s_nak_state) {
                priv->s_nak_state = IB_NAK_PSN_ERROR;
                priv->s_nak_psn = flow->flow_state.r_next_psn;
-               priv->s_flags |= RVT_S_ACK_PENDING;
-               if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
-                       priv->r_tid_ack = priv->r_tid_tail;
-               hfi1_schedule_tid_send(qp);
+               tid_rdma_trigger_ack(qp);
        }
        goto done;
 }
@@ -4944,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
        qpriv->resync = true;
        /* RESYNC request always gets a TID RDMA ACK. */
        qpriv->s_nak_state = 0;
-       qpriv->s_flags |= RVT_S_ACK_PENDING;
-       hfi1_schedule_tid_send(qp);
+       tid_rdma_trigger_ack(qp);
 bail:
        if (fecn)
                qp->s_flags |= RVT_S_ECN;
index 1c53618..6e82df2 100644 (file)
@@ -17,6 +17,7 @@
 #define TID_RDMA_MIN_SEGMENT_SIZE       BIT(18)   /* 256 KiB (for now) */
 #define TID_RDMA_MAX_SEGMENT_SIZE       BIT(18)   /* 256 KiB (for now) */
 #define TID_RDMA_MAX_PAGES              (BIT(18) >> PAGE_SHIFT)
+#define TID_RDMA_SEGMENT_SHIFT         18
 
 /*
  * Bit definitions for priv->s_flags.
@@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
                                  struct ib_other_headers *ohdr,
                                  u32 *bth1, u32 *bth2, u32 *len);
 
-void hfi1_compute_tid_rdma_flow_wt(void);
-
 void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
 
 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
index 7bff0a1..089e201 100644 (file)
@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
 /* Length of buffer to create verbs txreq cache name */
 #define TXREQ_NAME_LEN 24
 
-/* 16B trailing buffer */
-static const u8 trail_buf[MAX_16B_PADDING];
-
 static uint wss_threshold = 80;
 module_param(wss_threshold, uint, S_IRUGO);
 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
 
        /* add icrc, lt byte, and padding to flit */
        if (extra_bytes)
-               ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
-                                       (void *)trail_buf, extra_bytes);
+               ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+                                      sde->dd->sdma_pad_phys, extra_bytes);
 
 bail_txadd:
        return ret;
@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
                }
                /* add icrc, lt byte, and padding to flit */
                if (extra_bytes)
-                       seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
+                       seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+                                        extra_bytes);
 
                seg_pio_copy_end(pbuf);
        }
index 8678327..3bb8f78 100644 (file)
@@ -59,7 +59,7 @@ enum {
 
 #define HNS_ROCE_HEM_CHUNK_LEN \
         ((256 - sizeof(struct list_head) - 2 * sizeof(int)) /   \
-        (sizeof(struct scatterlist)))
+        (sizeof(struct scatterlist) + sizeof(void *)))
 
 #define check_whether_bt_num_3(type, hop_num) \
        (type < HEM_TYPE_MTT && hop_num == 2)
index 7a89d66..e82567f 100644 (file)
@@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
                return;
        }
 
-       if (eq->buf_list)
-               dma_free_coherent(hr_dev->dev, buf_chk_sz,
-                                 eq->buf_list->buf, eq->buf_list->map);
+       dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
+                         eq->buf_list->map);
+       kfree(eq->buf_list);
 }
 
 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
index 9591457..43ea2c1 100644 (file)
@@ -376,7 +376,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
        srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
        srq->max_gs = srq_init_attr->attr.max_sge;
 
-       srq_desc_size = max(16, 16 * srq->max_gs);
+       srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
 
        srq->wqe_shift = ilog2(srq_desc_size);
 
index 6305993..7019c12 100644 (file)
@@ -1967,8 +1967,8 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
        int err;
 
        if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               xa_erase(&dev->mdev->priv.mkey_table,
-                        mlx5_base_mkey(mmw->mmkey.key));
+               xa_erase_irq(&dev->mdev->priv.mkey_table,
+                            mlx5_base_mkey(mmw->mmkey.key));
                /*
                 * pagefault_single_data_segment() may be accessing mmw under
                 * SRCU if the user bound an ODP MR to this MW.
index 8937d72..5fd071c 100644 (file)
@@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
        }
 
        /* Only remove the old rate after new rate was set */
-       if ((old_rl.rate &&
-            !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
-           (new_state != MLX5_SQC_STATE_RDY))
+       if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+           (new_state != MLX5_SQC_STATE_RDY)) {
                mlx5_rl_remove_rate(dev, &old_rl);
+               if (new_state != MLX5_SQC_STATE_RDY)
+                       memset(&new_rl, 0, sizeof(new_rl));
+       }
 
        ibqp->rl = new_rl;
        sq->state = new_state;
index 5136b83..dc71b6e 100644 (file)
@@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
        struct qedr_dev *qedr = get_qedr_dev(ibdev);
        u32 fw_ver = (u32)qedr->attr.fw_ver;
 
-       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
+       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
                 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
                 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
 }
index 52d402f..b431748 100644 (file)
@@ -1312,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
 void siw_free_qp(struct kref *ref)
 {
        struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+       struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
        struct siw_device *sdev = qp->sdev;
        unsigned long flags;
 
@@ -1334,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
        atomic_dec(&sdev->num_qp);
        siw_dbg_qp(qp, "free QP\n");
        kfree_rcu(qp, rcu);
+       kfree(siw_base_qp);
 }
index 869e02b..b18a677 100644 (file)
@@ -604,7 +604,6 @@ out:
 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
 {
        struct siw_qp *qp = to_siw_qp(base_qp);
-       struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
        struct siw_ucontext *uctx =
                rdma_udata_to_drv_context(udata, struct siw_ucontext,
                                          base_ucontext);
@@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
        qp->scq = qp->rcq = NULL;
 
        siw_qp_put(qp);
-       kfree(siw_base_qp);
 
        return 0;
 }
index 1cb40c7..8229a90 100644 (file)
@@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
 {
        struct ml_device *ml = ff->private;
 
+       /*
+        * Even though we stop all playing effects when tearing down
+        * an input device (via input_device_flush() that calls into
+        * input_ff_flush() that stops and erases all effects), we
+        * do not actually stop the timer, and therefore we should
+        * do it here.
+        */
+       del_timer_sync(&ml->timer);
+
        kfree(ml->private);
 }
 
index 56fae34..704558d 100644 (file)
@@ -177,6 +177,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
        "LEN009b", /* T580 */
+       "LEN0402", /* X1 Extreme 2nd Generation */
        "LEN200f", /* T450s */
        "LEN2054", /* E480 */
        "LEN2055", /* E580 */
index f28a715..bbf9ae9 100644 (file)
@@ -510,7 +510,6 @@ struct f11_data {
        struct rmi_2d_sensor_platform_data sensor_pdata;
        unsigned long *abs_mask;
        unsigned long *rel_mask;
-       unsigned long *result_bits;
 };
 
 enum f11_finger_state {
@@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
        /*
        ** init instance data, fill in values and create any sysfs files
        */
-       f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
+       f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
                        GFP_KERNEL);
        if (!f11)
                return -ENOMEM;
@@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
                        + sizeof(struct f11_data));
        f11->rel_mask = (unsigned long *)((char *)f11
                        + sizeof(struct f11_data) + mask_size);
-       f11->result_bits = (unsigned long *)((char *)f11
-                       + sizeof(struct f11_data) + mask_size * 2);
 
        set_bit(fn->irq_pos, f11->abs_mask);
        set_bit(fn->irq_pos + 1, f11->rel_mask);
@@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
                        valid_bytes = f11->sensor.attn_size;
                memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
                        valid_bytes);
-               drvdata->attn_data.data += f11->sensor.attn_size;
-               drvdata->attn_data.size -= f11->sensor.attn_size;
+               drvdata->attn_data.data += valid_bytes;
+               drvdata->attn_data.size -= valid_bytes;
        } else {
                error = rmi_read_block(rmi_dev,
                                data_base_addr, f11->sensor.data_pkt,
index d20a5d6..7e97944 100644 (file)
@@ -55,6 +55,9 @@ struct f12_data {
 
        const struct rmi_register_desc_item *data15;
        u16 data15_offset;
+
+       unsigned long *abs_mask;
+       unsigned long *rel_mask;
 };
 
 static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
@@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
                        valid_bytes = sensor->attn_size;
                memcpy(sensor->data_pkt, drvdata->attn_data.data,
                        valid_bytes);
-               drvdata->attn_data.data += sensor->attn_size;
-               drvdata->attn_data.size -= sensor->attn_size;
+               drvdata->attn_data.data += valid_bytes;
+               drvdata->attn_data.size -= valid_bytes;
        } else {
                retval = rmi_read_block(rmi_dev, f12->data_addr,
                                        sensor->data_pkt, sensor->pkt_size);
@@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
 static int rmi_f12_config(struct rmi_function *fn)
 {
        struct rmi_driver *drv = fn->rmi_dev->driver;
+       struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+       struct rmi_2d_sensor *sensor;
        int ret;
 
-       drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+       sensor = &f12->sensor;
+
+       if (!sensor->report_abs)
+               drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
+       else
+               drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
+
+       drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
 
        ret = rmi_f12_write_control_regs(fn);
        if (ret)
@@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
        struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        u16 data_offset = 0;
+       int mask_size;
 
        rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
 
+       mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
+
        ret = rmi_read(fn->rmi_dev, query_addr, &buf);
        if (ret < 0) {
                dev_err(&fn->dev, "Failed to read general info register: %d\n",
@@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
                return -ENODEV;
        }
 
-       f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
+       f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
+                       GFP_KERNEL);
        if (!f12)
                return -ENOMEM;
 
+       f12->abs_mask = (unsigned long *)((char *)f12
+                       + sizeof(struct f12_data));
+       f12->rel_mask = (unsigned long *)((char *)f12
+                       + sizeof(struct f12_data) + mask_size);
+
+       set_bit(fn->irq_pos, f12->abs_mask);
+       set_bit(fn->irq_pos + 1, f12->rel_mask);
+
        f12->has_dribble = !!(buf & BIT(3));
 
        if (fn->dev.of_node) {
index 710b025..897105b 100644 (file)
@@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
 static const struct vb2_queue rmi_f54_queue = {
        .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
        .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
-       .buf_struct_size = sizeof(struct vb2_buffer),
+       .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
        .ops = &rmi_f54_queue_ops,
        .mem_ops = &vb2_vmalloc_memops,
        .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
@@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
 {
        struct rmi_driver *drv = fn->rmi_dev->driver;
 
-       drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+       drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
 
        return 0;
 }
@@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
 
        video_unregister_device(&f54->vdev);
        v4l2_device_unregister(&f54->v4l2);
+       destroy_workqueue(f54->workqueue);
 }
 
 struct rmi_function_handler rmi_f54_handler = {
index 4b22d49..6bcffc9 100644 (file)
@@ -1990,11 +1990,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
 
        /* get sysinfo */
        md->si = &cd->sysinfo;
-       if (!md->si) {
-               dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
-                       __func__, md->si);
-               goto error_get_sysinfo;
-       }
 
        rc = cyttsp4_setup_input_device(cd);
        if (rc)
@@ -2004,8 +1999,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
 
 error_init_input:
        input_free_device(md->input);
-error_get_sysinfo:
-       input_set_drvdata(md->input, NULL);
 error_alloc_failed:
        dev_err(dev, "%s failed.\n", __func__);
        return rc;
index 3492339..1139714 100644 (file)
@@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
        for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
                finger[i].is_valid = buf[i + y] >> 7;
                if (finger[i].is_valid) {
-                       finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
-                       finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
+                       finger[i].x = ((buf[i + y] & 0x0070) << 4) |
+                                       buf[i + y + 1];
+                       finger[i].y = ((buf[i + y] & 0x0007) << 8) |
+                                       buf[i + y + 2];
 
                        /* st1232 includes a z-axis / touch strength */
                        if (ts->chip_info->have_z)
index 7b97122..c498796 100644 (file)
@@ -405,8 +405,12 @@ void icc_set_tag(struct icc_path *path, u32 tag)
        if (!path)
                return;
 
+       mutex_lock(&icc_lock);
+
        for (i = 0; i < path->num_nodes; i++)
                path->reqs[i].tag = tag;
+
+       mutex_unlock(&icc_lock);
 }
 EXPORT_SYMBOL_GPL(icc_set_tag);
 
index 910081d..b4966d8 100644 (file)
@@ -433,7 +433,8 @@ static int qnoc_probe(struct platform_device *pdev)
        if (!qp)
                return -ENOMEM;
 
-       data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL);
+       data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+                           GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
index 5795559..502a6c2 100644 (file)
@@ -790,7 +790,8 @@ static int qnoc_probe(struct platform_device *pdev)
        if (!qp)
                return -ENOMEM;
 
-       data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+       data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes),
+                           GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
index c235f79..5120ce4 100644 (file)
@@ -74,6 +74,19 @@ static const struct dmi_system_id ivrs_quirks[] __initconst = {
                .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
        },
        {
+               /*
+                * Acer Aspire A315-41 requires the very same workaround as
+                * Dell Latitude 5495
+                */
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Acer Aspire A315-41",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
+       {
                .callback = ivrs_ioapic_quirk_cb,
                .ident = "Lenovo ideapad 330S-15ARR",
                .matches = {
index 3f97491..6db6d96 100644 (file)
@@ -2794,7 +2794,7 @@ static int identity_mapping(struct device *dev)
        struct device_domain_info *info;
 
        info = dev->archdata.iommu;
-       if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+       if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
                return (info->domain == si_domain);
 
        return 0;
@@ -3471,7 +3471,7 @@ static bool iommu_need_mapping(struct device *dev)
                if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
                        dma_mask = dev->coherent_dma_mask;
 
-               if (dma_mask >= dma_get_required_mask(dev))
+               if (dma_mask >= dma_direct_get_required_mask(dev))
                        return false;
 
                /*
@@ -3775,6 +3775,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        return nelems;
 }
 
+static u64 intel_get_required_mask(struct device *dev)
+{
+       if (!iommu_need_mapping(dev))
+               return dma_direct_get_required_mask(dev);
+       return DMA_BIT_MASK(32);
+}
+
 static const struct dma_map_ops intel_dma_ops = {
        .alloc = intel_alloc_coherent,
        .free = intel_free_coherent,
@@ -3787,6 +3794,7 @@ static const struct dma_map_ops intel_dma_ops = {
        .dma_supported = dma_direct_supported,
        .mmap = dma_common_mmap,
        .get_sgtable = dma_common_get_sgtable,
+       .get_required_mask = intel_get_required_mask,
 };
 
 static void
index 2371034..2639fc7 100644 (file)
@@ -1105,10 +1105,8 @@ static int ipmmu_probe(struct platform_device *pdev)
        /* Root devices have mandatory IRQs */
        if (ipmmu_is_root(mmu)) {
                irq = platform_get_irq(pdev, 0);
-               if (irq < 0) {
-                       dev_err(&pdev->dev, "no IRQ found\n");
+               if (irq < 0)
                        return irq;
-               }
 
                ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
                                       dev_name(&pdev->dev), mmu);
index 62e54f1..787e8ee 100644 (file)
@@ -175,6 +175,22 @@ static DEFINE_IDA(its_vpeid_ida);
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
 #define gic_data_rdist_vlpi_base()     (gic_data_rdist_rd_base() + SZ_128K)
 
+static u16 get_its_list(struct its_vm *vm)
+{
+       struct its_node *its;
+       unsigned long its_list = 0;
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               if (!its->is_v4)
+                       continue;
+
+               if (vm->vlpi_count[its->list_nr])
+                       __set_bit(its->list_nr, &its_list);
+       }
+
+       return (u16)its_list;
+}
+
 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
                                               u32 event)
 {
@@ -976,17 +992,15 @@ static void its_send_vmapp(struct its_node *its,
 
 static void its_send_vmovp(struct its_vpe *vpe)
 {
-       struct its_cmd_desc desc;
+       struct its_cmd_desc desc = {};
        struct its_node *its;
        unsigned long flags;
        int col_id = vpe->col_idx;
 
        desc.its_vmovp_cmd.vpe = vpe;
-       desc.its_vmovp_cmd.its_list = (u16)its_list_map;
 
        if (!its_list_map) {
                its = list_first_entry(&its_nodes, struct its_node, entry);
-               desc.its_vmovp_cmd.seq_num = 0;
                desc.its_vmovp_cmd.col = &its->collections[col_id];
                its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
                return;
@@ -1003,6 +1017,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
        raw_spin_lock_irqsave(&vmovp_lock, flags);
 
        desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+       desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
 
        /* Emit VMOVPs */
        list_for_each_entry(its, &its_nodes, entry) {
index daefc52..7d0a12f 100644 (file)
@@ -252,8 +252,8 @@ static int __init plic_init(struct device_node *node,
                        continue;
                }
 
-               /* skip context holes */
-               if (parent.args[0] == -1)
+               /* skip contexts other than supervisor external interrupt */
+               if (parent.args[0] != IRQ_S_EXT)
                        continue;
 
                hartid = plic_find_hart_id(parent.np);
index c92b405..ba86195 100644 (file)
@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &(cdev->recvwait), wait);
        mask = EPOLLOUT | EPOLLWRNORM;
-       if (!skb_queue_empty(&cdev->recvqueue))
+       if (!skb_queue_empty_lockless(&cdev->recvqueue))
                mask |= EPOLLIN | EPOLLRDNORM;
        return mask;
 }
index 705c620..7b726f0 100644 (file)
@@ -18,7 +18,7 @@
 
 static int clamped;
 static struct wf_control *clamp_control;
-static struct dev_pm_qos_request qos_req;
+static struct freq_qos_request qos_req;
 static unsigned int min_freq, max_freq;
 
 static int clamp_set(struct wf_control *ct, s32 value)
@@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value)
        }
        clamped = value;
 
-       return dev_pm_qos_update_request(&qos_req, freq);
+       return freq_qos_update_request(&qos_req, freq);
 }
 
 static int clamp_get(struct wf_control *ct, s32 *value)
@@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void)
 
        min_freq = policy->cpuinfo.min_freq;
        max_freq = policy->cpuinfo.max_freq;
+
+       ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
+                                  max_freq);
+
        cpufreq_cpu_put(policy);
 
+       if (ret < 0) {
+               pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+                      ret);
+               return ret;
+       }
+
        dev = get_cpu_device(0);
        if (unlikely(!dev)) {
                pr_warn("%s: No cpu device for cpu0\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto fail;
        }
 
        clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
-       if (clamp == NULL)
-               return -ENOMEM;
-
-       ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    max_freq);
-       if (ret < 0) {
-               pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
-                      ret);
-               goto free;
+       if (clamp == NULL) {
+               ret = -ENOMEM;
+               goto fail;
        }
 
        clamp->ops = &clamp_ops;
        clamp->name = "cpufreq-clamp";
        ret = wf_register_control(clamp);
        if (ret)
-               goto fail;
+               goto free;
+
        clamp_control = clamp;
        return 0;
- fail:
-       dev_pm_qos_remove_request(&qos_req);
 
  free:
        kfree(clamp);
+ fail:
+       freq_qos_remove_request(&qos_req);
        return ret;
 }
 
@@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void)
 {
        if (clamp_control) {
                wf_unregister_control(clamp_control);
-               dev_pm_qos_remove_request(&qos_req);
+               freq_qos_remove_request(&qos_req);
        }
 }
 
index 310dae2..b2c325e 100644 (file)
@@ -129,11 +129,27 @@ static int mt6397_irq_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
                        mt6397_irq_resume);
 
+struct chip_data {
+       u32 cid_addr;
+       u32 cid_shift;
+};
+
+static const struct chip_data mt6323_core = {
+       .cid_addr = MT6323_CID,
+       .cid_shift = 0,
+};
+
+static const struct chip_data mt6397_core = {
+       .cid_addr = MT6397_CID,
+       .cid_shift = 0,
+};
+
 static int mt6397_probe(struct platform_device *pdev)
 {
        int ret;
        unsigned int id;
        struct mt6397_chip *pmic;
+       const struct chip_data *pmic_core;
 
        pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
        if (!pmic)
@@ -149,28 +165,30 @@ static int mt6397_probe(struct platform_device *pdev)
        if (!pmic->regmap)
                return -ENODEV;
 
-       platform_set_drvdata(pdev, pmic);
+       pmic_core = of_device_get_match_data(&pdev->dev);
+       if (!pmic_core)
+               return -ENODEV;
 
-       ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+       ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
        if (ret) {
-               dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+               dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
                return ret;
        }
 
+       pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+       platform_set_drvdata(pdev, pmic);
+
        pmic->irq = platform_get_irq(pdev, 0);
        if (pmic->irq <= 0)
                return pmic->irq;
 
-       switch (id & 0xff) {
-       case MT6323_CHIP_ID:
-               pmic->int_con[0] = MT6323_INT_CON0;
-               pmic->int_con[1] = MT6323_INT_CON1;
-               pmic->int_status[0] = MT6323_INT_STATUS0;
-               pmic->int_status[1] = MT6323_INT_STATUS1;
-               ret = mt6397_irq_init(pmic);
-               if (ret)
-                       return ret;
+       ret = mt6397_irq_init(pmic);
+       if (ret)
+               return ret;
 
+       switch (pmic->chip_id) {
+       case MT6323_CHIP_ID:
                ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
                                           ARRAY_SIZE(mt6323_devs), NULL,
                                           0, pmic->irq_domain);
@@ -178,21 +196,13 @@ static int mt6397_probe(struct platform_device *pdev)
 
        case MT6391_CHIP_ID:
        case MT6397_CHIP_ID:
-               pmic->int_con[0] = MT6397_INT_CON0;
-               pmic->int_con[1] = MT6397_INT_CON1;
-               pmic->int_status[0] = MT6397_INT_STATUS0;
-               pmic->int_status[1] = MT6397_INT_STATUS1;
-               ret = mt6397_irq_init(pmic);
-               if (ret)
-                       return ret;
-
                ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
                                           ARRAY_SIZE(mt6397_devs), NULL,
                                           0, pmic->irq_domain);
                break;
 
        default:
-               dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+               dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
                return -ENODEV;
        }
 
@@ -205,9 +215,15 @@ static int mt6397_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id mt6397_of_match[] = {
-       { .compatible = "mediatek,mt6397" },
-       { .compatible = "mediatek,mt6323" },
-       { }
+       {
+               .compatible = "mediatek,mt6323",
+               .data = &mt6323_core,
+       }, {
+               .compatible = "mediatek,mt6397",
+               .data = &mt6397_core,
+       }, {
+               /* sentinel */
+       }
 };
 MODULE_DEVICE_TABLE(of, mt6397_of_match);
 
index f7bdae5..5047f73 100644 (file)
@@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
        cq_host->slot[tag].flags = 0;
 
        cq_host->qcnt += 1;
-
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
index 78e7e35..4031217 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
 #include <linux/highmem.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
        ssp->ssp_pio_words[2] = cmd1;
        ssp->dma_dir = DMA_NONE;
        ssp->slave_dirn = DMA_TRANS_NONE;
-       desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
@@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
        ssp->ssp_pio_words[2] = cmd1;
        ssp->dma_dir = DMA_NONE;
        ssp->slave_dirn = DMA_TRANS_NONE;
-       desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
@@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        host->data = data;
        ssp->dma_dir = dma_data_dir;
        ssp->slave_dirn = slave_dirn;
-       desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
index e7d1920..0ae986c 100644 (file)
@@ -358,7 +358,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
        pm_runtime_use_autosuspend(&pdev->dev);
 
        /* HS200 is broken at this moment */
-       host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+       host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
 
        ret = sdhci_add_host(host);
        if (ret)
index 41c2677..083e7e0 100644 (file)
@@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * on temperature
         */
        if (temperature < -20000)
-               phase_delay = min(max_window + 4 * max_len - 24,
+               phase_delay = min(max_window + 4 * (max_len - 1) - 24,
                                  max_window +
                                  DIV_ROUND_UP(13 * max_len, 16) * 4);
        else if (temperature < 20000)
index 2f0b092..c5ba13f 100644 (file)
@@ -163,7 +163,6 @@ struct tmio_mmc_host {
        unsigned long           last_req_ts;
        struct mutex            ios_lock;       /* protect set_ios() context */
        bool                    native_hotplug;
-       bool                    runtime_synced;
        bool                    sdio_irq_enabled;
 
        /* Mandatory callback */
index 9b6e100..86b5911 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
 #include <linux/pm_qos.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
@@ -1248,10 +1249,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
        /* See if we also get DMA */
        tmio_mmc_request_dma(_host, pdata);
 
+       dev_pm_domain_start(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_sync(&pdev->dev);
 
        ret = mmc_add_host(mmc);
        if (ret)
@@ -1333,11 +1336,6 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
 {
        struct tmio_mmc_host *host = dev_get_drvdata(dev);
 
-       if (!host->runtime_synced) {
-               host->runtime_synced = true;
-               return 0;
-       }
-
        tmio_mmc_clk_enable(host);
        tmio_mmc_hw_reset(host->mmc);
 
index 8c79bad..4f2e691 100644 (file)
@@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
        struct bond_vlan_tag *tags;
 
        if (is_vlan_dev(upper) &&
-           bond->nest_level == vlan_get_encap_level(upper) - 1) {
+           bond->dev->lower_level == upper->lower_level - 1) {
                if (upper->addr_assign_type == NET_ADDR_STOLEN) {
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_proto(upper),
index 21d8fcc..62f6557 100644 (file)
@@ -1733,8 +1733,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                goto err_upper_unlink;
        }
 
-       bond->nest_level = dev_get_nest_level(bond_dev) + 1;
-
        /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
@@ -1816,7 +1814,8 @@ err_detach:
        slave_disable_netpoll(new_slave);
 
 err_close:
-       slave_dev->priv_flags &= ~IFF_BONDING;
+       if (!netif_is_bond_master(slave_dev))
+               slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
 err_restore_mac:
@@ -1956,9 +1955,6 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
-               bond->nest_level = SINGLE_DEPTH_NESTING;
-       } else {
-               bond->nest_level = dev_get_nest_level(bond_dev) + 1;
        }
 
        unblock_netpoll_tx();
@@ -2017,7 +2013,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        else
                dev_set_mtu(slave_dev, slave->original_mtu);
 
-       slave_dev->priv_flags &= ~IFF_BONDING;
+       if (!netif_is_bond_master(slave_dev))
+               slave_dev->priv_flags &= ~IFF_BONDING;
 
        bond_free_slave(slave);
 
@@ -2086,8 +2083,7 @@ static int bond_miimon_inspect(struct bonding *bond)
        ignore_updelay = !rcu_dereference(bond->curr_active_slave);
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               slave->new_link = BOND_LINK_NOCHANGE;
-               slave->link_new_state = slave->link;
+               bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
 
                link_state = bond_check_dev_link(bond, slave->dev, 0);
 
@@ -2121,7 +2117,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        }
 
                        if (slave->delay <= 0) {
-                               slave->new_link = BOND_LINK_DOWN;
+                               bond_propose_link_state(slave, BOND_LINK_DOWN);
                                commit++;
                                continue;
                        }
@@ -2158,7 +2154,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                                slave->delay = 0;
 
                        if (slave->delay <= 0) {
-                               slave->new_link = BOND_LINK_UP;
+                               bond_propose_link_state(slave, BOND_LINK_UP);
                                commit++;
                                ignore_updelay = false;
                                continue;
@@ -2196,7 +2192,7 @@ static void bond_miimon_commit(struct bonding *bond)
        struct slave *slave, *primary;
 
        bond_for_each_slave(bond, slave, iter) {
-               switch (slave->new_link) {
+               switch (slave->link_new_state) {
                case BOND_LINK_NOCHANGE:
                        /* For 802.3ad mode, check current slave speed and
                         * duplex again in case its port was disabled after
@@ -2268,8 +2264,8 @@ static void bond_miimon_commit(struct bonding *bond)
 
                default:
                        slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
-                                 slave->new_link);
-                       slave->new_link = BOND_LINK_NOCHANGE;
+                                 slave->link_new_state);
+                       bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
 
                        continue;
                }
@@ -2677,13 +2673,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
        bond_for_each_slave_rcu(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
-               slave->new_link = BOND_LINK_NOCHANGE;
+               bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
 
                if (slave->link != BOND_LINK_UP) {
                        if (bond_time_in_interval(bond, trans_start, 1) &&
                            bond_time_in_interval(bond, slave->last_rx, 1)) {
 
-                               slave->new_link = BOND_LINK_UP;
+                               bond_propose_link_state(slave, BOND_LINK_UP);
                                slave_state_changed = 1;
 
                                /* primary_slave has no meaning in round-robin
@@ -2708,7 +2704,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
                        if (!bond_time_in_interval(bond, trans_start, 2) ||
                            !bond_time_in_interval(bond, slave->last_rx, 2)) {
 
-                               slave->new_link = BOND_LINK_DOWN;
+                               bond_propose_link_state(slave, BOND_LINK_DOWN);
                                slave_state_changed = 1;
 
                                if (slave->link_failure_count < UINT_MAX)
@@ -2739,8 +2735,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
                        goto re_arm;
 
                bond_for_each_slave(bond, slave, iter) {
-                       if (slave->new_link != BOND_LINK_NOCHANGE)
-                               slave->link = slave->new_link;
+                       if (slave->link_new_state != BOND_LINK_NOCHANGE)
+                               slave->link = slave->link_new_state;
                }
 
                if (slave_state_changed) {
@@ -2763,9 +2759,9 @@ re_arm:
 }
 
 /* Called to inspect slaves for active-backup mode ARP monitor link state
- * changes.  Sets new_link in slaves to specify what action should take
- * place for the slave.  Returns 0 if no changes are found, >0 if changes
- * to link states must be committed.
+ * changes.  Sets proposed link state in slaves to specify what action
+ * should take place for the slave.  Returns 0 if no changes are found, >0
+ * if changes to link states must be committed.
  *
  * Called with rcu_read_lock held.
  */
@@ -2777,12 +2773,12 @@ static int bond_ab_arp_inspect(struct bonding *bond)
        int commit = 0;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               slave->new_link = BOND_LINK_NOCHANGE;
+               bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
                last_rx = slave_last_rx(bond, slave);
 
                if (slave->link != BOND_LINK_UP) {
                        if (bond_time_in_interval(bond, last_rx, 1)) {
-                               slave->new_link = BOND_LINK_UP;
+                               bond_propose_link_state(slave, BOND_LINK_UP);
                                commit++;
                        }
                        continue;
@@ -2810,7 +2806,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
                if (!bond_is_active_slave(slave) &&
                    !rcu_access_pointer(bond->current_arp_slave) &&
                    !bond_time_in_interval(bond, last_rx, 3)) {
-                       slave->new_link = BOND_LINK_DOWN;
+                       bond_propose_link_state(slave, BOND_LINK_DOWN);
                        commit++;
                }
 
@@ -2823,7 +2819,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
                if (bond_is_active_slave(slave) &&
                    (!bond_time_in_interval(bond, trans_start, 2) ||
                     !bond_time_in_interval(bond, last_rx, 2))) {
-                       slave->new_link = BOND_LINK_DOWN;
+                       bond_propose_link_state(slave, BOND_LINK_DOWN);
                        commit++;
                }
        }
@@ -2843,7 +2839,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
        struct slave *slave;
 
        bond_for_each_slave(bond, slave, iter) {
-               switch (slave->new_link) {
+               switch (slave->link_new_state) {
                case BOND_LINK_NOCHANGE:
                        continue;
 
@@ -2893,8 +2889,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
                        continue;
 
                default:
-                       slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n",
-                                 slave->new_link);
+                       slave_err(bond->dev, slave->dev,
+                                 "impossible: link_new_state %d on slave\n",
+                                 slave->link_new_state);
                        continue;
                }
 
@@ -3442,13 +3439,6 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        }
 }
 
-static int bond_get_nest_level(struct net_device *bond_dev)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-
-       return bond->nest_level;
-}
-
 static void bond_get_stats(struct net_device *bond_dev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3457,7 +3447,7 @@ static void bond_get_stats(struct net_device *bond_dev,
        struct list_head *iter;
        struct slave *slave;
 
-       spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
+       spin_lock(&bond->stats_lock);
        memcpy(stats, &bond->bond_stats, sizeof(*stats));
 
        rcu_read_lock();
@@ -4268,7 +4258,6 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_neigh_setup        = bond_neigh_setup,
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
-       .ndo_get_lock_subclass  = bond_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
@@ -4296,7 +4285,6 @@ void bond_setup(struct net_device *bond_dev)
        struct bonding *bond = netdev_priv(bond_dev);
 
        spin_lock_init(&bond->mode_lock);
-       spin_lock_init(&bond->stats_lock);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4365,6 +4353,7 @@ static void bond_uninit(struct net_device *bond_dev)
 
        list_del(&bond->bond_list);
 
+       lockdep_unregister_key(&bond->stats_lock_key);
        bond_debug_unregister(bond);
 }
 
@@ -4768,8 +4757,9 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
-       bond->nest_level = SINGLE_DEPTH_NESTING;
-       netdev_lockdep_set_classes(bond_dev);
+       spin_lock_init(&bond->stats_lock);
+       lockdep_register_key(&bond->stats_lock_key);
+       lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
 
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
index 606b7d8..8e9f562 100644 (file)
@@ -52,6 +52,7 @@
 #define CONTROL_EX_PDR         BIT(8)
 
 /* control register */
+#define CONTROL_SWR            BIT(15)
 #define CONTROL_TEST           BIT(7)
 #define CONTROL_CCE            BIT(6)
 #define CONTROL_DISABLE_AR     BIT(5)
@@ -97,6 +98,9 @@
 #define BTR_TSEG2_SHIFT                12
 #define BTR_TSEG2_MASK         (0x7 << BTR_TSEG2_SHIFT)
 
+/* interrupt register */
+#define INT_STS_PENDING                0x8000
+
 /* brp extension register */
 #define BRP_EXT_BRPE_MASK      0x0f
 #define BRP_EXT_BRPE_SHIFT     0
@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
                                   IF_MCONT_RCV_EOB);
 }
 
+static int c_can_software_reset(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+       int retry = 0;
+
+       if (priv->type != BOSCH_D_CAN)
+               return 0;
+
+       priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
+       while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
+               msleep(20);
+               if (retry++ > 100) {
+                       netdev_err(dev, "CCTRL: software reset failed\n");
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Configure C_CAN chip:
  * - enable/disable auto-retransmission
@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
 static int c_can_chip_config(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = c_can_software_reset(dev);
+       if (err)
+               return err;
 
        /* enable automatic retransmission */
        priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
        struct can_berr_counter bec;
 
        switch (error_type) {
+       case C_CAN_NO_ERROR:
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               break;
        case C_CAN_ERROR_WARNING:
                /* error warning state */
                priv->can.can_stats.error_warning++;
@@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
                                ERR_CNT_RP_SHIFT;
 
        switch (error_type) {
+       case C_CAN_NO_ERROR:
+               /* error warning state */
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+               cf->data[6] = bec.txerr;
+               cf->data[7] = bec.rxerr;
+               break;
        case C_CAN_ERROR_WARNING:
                /* error warning state */
                cf->can_id |= CAN_ERR_CRTL;
@@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
        u16 curr, last = priv->last_status;
        int work_done = 0;
 
-       priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
-       /* Ack status on C_CAN. D_CAN is self clearing */
-       if (priv->type != BOSCH_D_CAN)
-               priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+       /* Only read the status register if a status interrupt was pending */
+       if (atomic_xchg(&priv->sie_pending, 0)) {
+               priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+               /* Ack status on C_CAN. D_CAN is self clearing */
+               if (priv->type != BOSCH_D_CAN)
+                       priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+       } else {
+               /* no change detected ... */
+               curr = last;
+       }
 
        /* handle state changes */
        if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
        /* handle bus recovery events */
        if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
                netdev_dbg(dev, "left bus off state\n");
-               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
        }
+
        if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
                netdev_dbg(dev, "left error passive state\n");
-               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
+       }
+
+       if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
+               netdev_dbg(dev, "left error warning state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
        }
 
        /* handle lec errors on the bus */
@@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
 {
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
+       int reg_int;
 
-       if (!priv->read_reg(priv, C_CAN_INT_REG))
+       reg_int = priv->read_reg(priv, C_CAN_INT_REG);
+       if (!reg_int)
                return IRQ_NONE;
 
+       /* save for later use */
+       if (reg_int & INT_STS_PENDING)
+               atomic_set(&priv->sie_pending, 1);
+
        /* disable all interrupts and schedule the NAPI */
        c_can_irq_control(priv, false);
        napi_schedule(&priv->napi);
index 8acdc7f..d5567a7 100644 (file)
@@ -198,6 +198,7 @@ struct c_can_priv {
        struct net_device *dev;
        struct device *device;
        atomic_t tx_active;
+       atomic_t sie_pending;
        unsigned long tx_dir;
        int last_status;
        u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
index ac86be5..1c88c36 100644 (file)
@@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev)
                return;
 
        ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+       of_node_put(dn);
        if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
                netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
 }
index dc5695d..57f9a2f 100644 (file)
@@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
        struct can_frame *cf;
        bool rx_errors = false, tx_errors = false;
        u32 timestamp;
+       int err;
 
        timestamp = priv->read(&regs->timer) << 16;
 
@@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
        if (tx_errors)
                dev->stats.tx_errors++;
 
-       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+       err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+       if (err)
+               dev->stats.rx_fifo_errors++;
 }
 
 static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
@@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
        int flt;
        struct can_berr_counter bec;
        u32 timestamp;
+       int err;
 
        timestamp = priv->read(&regs->timer) << 16;
 
@@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
        if (unlikely(new_state == CAN_STATE_BUS_OFF))
                can_bus_off(dev);
 
-       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+       err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+       if (err)
+               dev->stats.rx_fifo_errors++;
 }
 
 static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev)
                reg_mecr = priv->read(&regs->mecr);
                reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
                priv->write(reg_mecr, &regs->mecr);
+               reg_mecr |= FLEXCAN_MECR_ECCDIS;
                reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
                              FLEXCAN_MECR_FANCEI_MSK);
                priv->write(reg_mecr, &regs->mecr);
index e6a668e..84cae16 100644 (file)
@@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
        return cb_b->timestamp - cb_a->timestamp;
 }
 
-static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+/**
+ * can_rx_offload_offload_one() - Read one CAN frame from HW
+ * @offload: pointer to rx_offload context
+ * @n: number of mailbox to read
+ *
+ * The task of this function is to read a CAN frame from mailbox @n
+ * from the device and return the mailbox's content as a struct
+ * sk_buff.
+ *
+ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
+ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
+ * allocated, the mailbox contents is discarded by reading it into an
+ * overflow buffer. This way the mailbox is marked as free by the
+ * driver.
+ *
+ * Return: A pointer to skb containing the CAN frame on success.
+ *
+ *         NULL if the mailbox @n is empty.
+ *
+ *         ERR_PTR() in case of an error
+ */
+static struct sk_buff *
+can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
 {
-       struct sk_buff *skb = NULL;
+       struct sk_buff *skb = NULL, *skb_error = NULL;
        struct can_rx_offload_cb *cb;
        struct can_frame *cf;
        int ret;
 
-       /* If queue is full or skb not available, read to discard mailbox */
-       if (likely(skb_queue_len(&offload->skb_queue) <=
-                  offload->skb_queue_len_max))
+       if (likely(skb_queue_len(&offload->skb_queue) <
+                  offload->skb_queue_len_max)) {
                skb = alloc_can_skb(offload->dev, &cf);
+               if (unlikely(!skb))
+                       skb_error = ERR_PTR(-ENOMEM);   /* skb alloc failed */
+       } else {
+               skb_error = ERR_PTR(-ENOBUFS);          /* skb_queue is full */
+       }
 
-       if (!skb) {
+       /* If queue is full or skb not available, drop by reading into
+        * overflow buffer.
+        */
+       if (unlikely(skb_error)) {
                struct can_frame cf_overflow;
                u32 timestamp;
 
                ret = offload->mailbox_read(offload, &cf_overflow,
                                            &timestamp, n);
-               if (ret)
-                       offload->dev->stats.rx_dropped++;
 
-               return NULL;
+               /* Mailbox was empty. */
+               if (unlikely(!ret))
+                       return NULL;
+
+               /* Mailbox has been read and we're dropping it or
+                * there was a problem reading the mailbox.
+                *
+                * Increment error counters in any case.
+                */
+               offload->dev->stats.rx_dropped++;
+               offload->dev->stats.rx_fifo_errors++;
+
+               /* There was a problem reading the mailbox, propagate
+                * error value.
+                */
+               if (unlikely(ret < 0))
+                       return ERR_PTR(ret);
+
+               return skb_error;
        }
 
        cb = can_rx_offload_get_cb(skb);
        ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
-       if (!ret) {
+
+       /* Mailbox was empty. */
+       if (unlikely(!ret)) {
                kfree_skb(skb);
                return NULL;
        }
 
+       /* There was a problem reading the mailbox, propagate error value. */
+       if (unlikely(ret < 0)) {
+               kfree_skb(skb);
+
+               offload->dev->stats.rx_dropped++;
+               offload->dev->stats.rx_fifo_errors++;
+
+               return ERR_PTR(ret);
+       }
+
+       /* Mailbox was read. */
        return skb;
 }
 
@@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
                        continue;
 
                skb = can_rx_offload_offload_one(offload, i);
-               if (!skb)
-                       break;
+               if (IS_ERR_OR_NULL(skb))
+                       continue;
 
                __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
        }
@@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
        struct sk_buff *skb;
        int received = 0;
 
-       while ((skb = can_rx_offload_offload_one(offload, 0))) {
+       while (1) {
+               skb = can_rx_offload_offload_one(offload, 0);
+               if (IS_ERR(skb))
+                       continue;
+               if (!skb)
+                       break;
+
                skb_queue_tail(&offload->skb_queue, skb);
                received++;
        }
@@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
        unsigned long flags;
 
        if (skb_queue_len(&offload->skb_queue) >
-           offload->skb_queue_len_max)
-               return -ENOMEM;
+           offload->skb_queue_len_max) {
+               kfree_skb(skb);
+               return -ENOBUFS;
+       }
 
        cb = can_rx_offload_get_cb(skb);
        cb->timestamp = timestamp;
@@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
                              struct sk_buff *skb)
 {
        if (skb_queue_len(&offload->skb_queue) >
-           offload->skb_queue_len_max)
-               return -ENOMEM;
+           offload->skb_queue_len_max) {
+               kfree_skb(skb);
+               return -ENOBUFS;
+       }
 
        skb_queue_tail(&offload->skb_queue, skb);
        can_rx_offload_schedule(offload);
index bb60322..0a9f42e 100644 (file)
@@ -617,6 +617,7 @@ err_free_chan:
        sl->tty = NULL;
        tty->disc_data = NULL;
        clear_bit(SLF_INUSE, &sl->flags);
+       free_netdev(sl->dev);
 
 err_exit:
        rtnl_unlock();
index bee9f7b..bb20a9b 100644 (file)
@@ -717,6 +717,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
        if (priv->after_suspend) {
                mcp251x_hw_reset(spi);
                mcp251x_setup(net, spi);
+               priv->force_quit = 0;
                if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
                        mcp251x_set_normal_mode(spi);
                } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -728,7 +729,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
                        mcp251x_hw_sleep(spi);
                }
                priv->after_suspend = 0;
-               priv->force_quit = 0;
        }
 
        if (priv->restart_tx) {
index f8b19ee..31ad364 100644 (file)
@@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
  */
 #define HECC_MAX_RX_MBOX       (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
 #define HECC_RX_FIRST_MBOX     (HECC_MAX_MAILBOXES - 1)
+#define HECC_RX_LAST_MBOX      (HECC_MAX_TX_MBOX)
 
 /* TI HECC module registers */
 #define HECC_CANME             0x0     /* Mailbox enable */
@@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
 #define HECC_CANTA             0x10    /* Transmission acknowledge */
 #define HECC_CANAA             0x14    /* Abort acknowledge */
 #define HECC_CANRMP            0x18    /* Receive message pending */
-#define HECC_CANRML            0x1C    /* Remote message lost */
+#define HECC_CANRML            0x1C    /* Receive message lost */
 #define HECC_CANRFP            0x20    /* Remote frame pending */
 #define HECC_CANGAM            0x24    /* SECC only:Global acceptance mask */
 #define HECC_CANMC             0x28    /* Master control */
@@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
 #define HECC_BUS_ERROR         (HECC_CANES_FE | HECC_CANES_BE |\
                                HECC_CANES_CRCE | HECC_CANES_SE |\
                                HECC_CANES_ACKE)
+#define HECC_CANES_FLAGS       (HECC_BUS_ERROR | HECC_CANES_BO |\
+                               HECC_CANES_EP | HECC_CANES_EW)
 
 #define HECC_CANMCF_RTR                BIT(4)  /* Remote transmit request */
 
@@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev)
                hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
        }
 
-       /* Prevent message over-write & Enable interrupts */
-       hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
+       /* Enable tx interrupts */
+       hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
+
+       /* Prevent message over-write to create a rx fifo, but not for
+        * the lowest priority mailbox, since that allows detecting
+        * overflows instead of the hardware silently dropping the
+        * messages.
+        */
+       mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+       hecc_write(priv, HECC_CANOPC, mbx_mask);
+
+       /* Enable interrupts */
        if (priv->use_hecc1int) {
                hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
                hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
@@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev)
 {
        struct ti_hecc_priv *priv = netdev_priv(ndev);
 
+       /* Disable the CPK; stop sending, erroring and acking */
+       hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
+
        /* Disable interrupts and disable mailboxes */
        hecc_write(priv, HECC_CANGIM, 0);
        hecc_write(priv, HECC_CANMIM, 0);
@@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
        hecc_set_bit(priv, HECC_CANME, mbx_mask);
        spin_unlock_irqrestore(&priv->mbx_lock, flags);
 
-       hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
-       hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
        hecc_write(priv, HECC_CANTRS, mbx_mask);
 
        return NETDEV_TX_OK;
@@ -526,8 +540,10 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
                                         u32 *timestamp, unsigned int mbxno)
 {
        struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
-       u32 data;
+       u32 data, mbx_mask;
+       int ret = 1;
 
+       mbx_mask = BIT(mbxno);
        data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
        if (data & HECC_CANMID_IDE)
                cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -548,7 +564,25 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
 
        *timestamp = hecc_read_stamp(priv, mbxno);
 
-       return 1;
+       /* Check for FIFO overrun.
+        *
+        * All but the last RX mailbox have activated overwrite
+        * protection. So skip check for overrun, if we're not
+        * handling the last RX mailbox.
+        *
+        * As the overwrite protection for the last RX mailbox is
+        * disabled, the CAN core might update while we're reading
+        * it. This means the skb might be inconsistent.
+        *
+        * Return an error to let rx-offload discard this CAN frame.
+        */
+       if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
+                    hecc_read(priv, HECC_CANRML) & mbx_mask))
+               ret = -ENOBUFS;
+
+       hecc_write(priv, HECC_CANRMP, mbx_mask);
+
+       return ret;
 }
 
 static int ti_hecc_error(struct net_device *ndev, int int_status,
@@ -558,92 +592,73 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
        struct can_frame *cf;
        struct sk_buff *skb;
        u32 timestamp;
+       int err;
 
-       /* propagate the error condition to the can stack */
-       skb = alloc_can_err_skb(ndev, &cf);
-       if (!skb) {
-               if (printk_ratelimit())
-                       netdev_err(priv->ndev,
-                                  "%s: alloc_can_err_skb() failed\n",
-                                  __func__);
-               return -ENOMEM;
-       }
-
-       if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
-               if ((int_status & HECC_CANGIF_BOIF) == 0) {
-                       priv->can.state = CAN_STATE_ERROR_WARNING;
-                       ++priv->can.can_stats.error_warning;
-                       cf->can_id |= CAN_ERR_CRTL;
-                       if (hecc_read(priv, HECC_CANTEC) > 96)
-                               cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
-                       if (hecc_read(priv, HECC_CANREC) > 96)
-                               cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
-               }
-               hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
-               netdev_dbg(priv->ndev, "Error Warning interrupt\n");
-               hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
-       }
-
-       if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
-               if ((int_status & HECC_CANGIF_BOIF) == 0) {
-                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
-                       ++priv->can.can_stats.error_passive;
-                       cf->can_id |= CAN_ERR_CRTL;
-                       if (hecc_read(priv, HECC_CANTEC) > 127)
-                               cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
-                       if (hecc_read(priv, HECC_CANREC) > 127)
-                               cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+       if (err_status & HECC_BUS_ERROR) {
+               /* propagate the error condition to the can stack */
+               skb = alloc_can_err_skb(ndev, &cf);
+               if (!skb) {
+                       if (net_ratelimit())
+                               netdev_err(priv->ndev,
+                                          "%s: alloc_can_err_skb() failed\n",
+                                          __func__);
+                       return -ENOMEM;
                }
-               hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
-               netdev_dbg(priv->ndev, "Error passive interrupt\n");
-               hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
-       }
-
-       /* Need to check busoff condition in error status register too to
-        * ensure warning interrupts don't hog the system
-        */
-       if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
-               priv->can.state = CAN_STATE_BUS_OFF;
-               cf->can_id |= CAN_ERR_BUSOFF;
-               hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
-               hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
-               /* Disable all interrupts in bus-off to avoid int hog */
-               hecc_write(priv, HECC_CANGIM, 0);
-               ++priv->can.can_stats.bus_off;
-               can_bus_off(ndev);
-       }
 
-       if (err_status & HECC_BUS_ERROR) {
                ++priv->can.can_stats.bus_error;
                cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-               if (err_status & HECC_CANES_FE) {
-                       hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
+               if (err_status & HECC_CANES_FE)
                        cf->data[2] |= CAN_ERR_PROT_FORM;
-               }
-               if (err_status & HECC_CANES_BE) {
-                       hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
+               if (err_status & HECC_CANES_BE)
                        cf->data[2] |= CAN_ERR_PROT_BIT;
-               }
-               if (err_status & HECC_CANES_SE) {
-                       hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
+               if (err_status & HECC_CANES_SE)
                        cf->data[2] |= CAN_ERR_PROT_STUFF;
-               }
-               if (err_status & HECC_CANES_CRCE) {
-                       hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
+               if (err_status & HECC_CANES_CRCE)
                        cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
-               }
-               if (err_status & HECC_CANES_ACKE) {
-                       hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
+               if (err_status & HECC_CANES_ACKE)
                        cf->data[3] = CAN_ERR_PROT_LOC_ACK;
-               }
+
+               timestamp = hecc_read(priv, HECC_CANLNT);
+               err = can_rx_offload_queue_sorted(&priv->offload, skb,
+                                                 timestamp);
+               if (err)
+                       ndev->stats.rx_fifo_errors++;
        }
 
-       timestamp = hecc_read(priv, HECC_CANLNT);
-       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+       hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
 
        return 0;
 }
 
+static void ti_hecc_change_state(struct net_device *ndev,
+                                enum can_state rx_state,
+                                enum can_state tx_state)
+{
+       struct ti_hecc_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u32 timestamp;
+       int err;
+
+       skb = alloc_can_err_skb(priv->ndev, &cf);
+       if (unlikely(!skb)) {
+               priv->can.state = max(tx_state, rx_state);
+               return;
+       }
+
+       can_change_state(priv->ndev, cf, tx_state, rx_state);
+
+       if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
+               cf->data[6] = hecc_read(priv, HECC_CANTEC);
+               cf->data[7] = hecc_read(priv, HECC_CANREC);
+       }
+
+       timestamp = hecc_read(priv, HECC_CANLNT);
+       err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+       if (err)
+               ndev->stats.rx_fifo_errors++;
+}
+
 static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = (struct net_device *)dev_id;
@@ -651,6 +666,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
        struct net_device_stats *stats = &ndev->stats;
        u32 mbxno, mbx_mask, int_status, err_status, stamp;
        unsigned long flags, rx_pending;
+       u32 handled = 0;
 
        int_status = hecc_read(priv,
                               priv->use_hecc1int ?
@@ -660,17 +676,66 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
                return IRQ_NONE;
 
        err_status = hecc_read(priv, HECC_CANES);
-       if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
-                         HECC_CANES_EP | HECC_CANES_EW))
+       if (unlikely(err_status & HECC_CANES_FLAGS))
                ti_hecc_error(ndev, int_status, err_status);
 
+       if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
+               enum can_state rx_state, tx_state;
+               u32 rec = hecc_read(priv, HECC_CANREC);
+               u32 tec = hecc_read(priv, HECC_CANTEC);
+
+               if (int_status & HECC_CANGIF_WLIF) {
+                       handled |= HECC_CANGIF_WLIF;
+                       rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
+                       tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
+                       netdev_dbg(priv->ndev, "Error Warning interrupt\n");
+                       ti_hecc_change_state(ndev, rx_state, tx_state);
+               }
+
+               if (int_status & HECC_CANGIF_EPIF) {
+                       handled |= HECC_CANGIF_EPIF;
+                       rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
+                       tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
+                       netdev_dbg(priv->ndev, "Error passive interrupt\n");
+                       ti_hecc_change_state(ndev, rx_state, tx_state);
+               }
+
+               if (int_status & HECC_CANGIF_BOIF) {
+                       handled |= HECC_CANGIF_BOIF;
+                       rx_state = CAN_STATE_BUS_OFF;
+                       tx_state = CAN_STATE_BUS_OFF;
+                       netdev_dbg(priv->ndev, "Bus off interrupt\n");
+
+                       /* Disable all interrupts */
+                       hecc_write(priv, HECC_CANGIM, 0);
+                       can_bus_off(ndev);
+                       ti_hecc_change_state(ndev, rx_state, tx_state);
+               }
+       } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
+               enum can_state new_state, tx_state, rx_state;
+               u32 rec = hecc_read(priv, HECC_CANREC);
+               u32 tec = hecc_read(priv, HECC_CANTEC);
+
+               if (rec >= 128 || tec >= 128)
+                       new_state = CAN_STATE_ERROR_PASSIVE;
+               else if (rec >= 96 || tec >= 96)
+                       new_state = CAN_STATE_ERROR_WARNING;
+               else
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+
+               if (new_state < priv->can.state) {
+                       rx_state = rec >= tec ? new_state : 0;
+                       tx_state = rec <= tec ? new_state : 0;
+                       ti_hecc_change_state(ndev, rx_state, tx_state);
+               }
+       }
+
        if (int_status & HECC_CANGIF_GMIF) {
                while (priv->tx_tail - priv->tx_head > 0) {
                        mbxno = get_tx_tail_mb(priv);
                        mbx_mask = BIT(mbxno);
                        if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
                                break;
-                       hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
                        hecc_write(priv, HECC_CANTA, mbx_mask);
                        spin_lock_irqsave(&priv->mbx_lock, flags);
                        hecc_clear_bit(priv, HECC_CANME, mbx_mask);
@@ -695,16 +760,15 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
                while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
                        can_rx_offload_irq_offload_timestamp(&priv->offload,
                                                             rx_pending);
-                       hecc_write(priv, HECC_CANRMP, rx_pending);
                }
        }
 
        /* clear all interrupt conditions - read back to avoid spurious ints */
        if (priv->use_hecc1int) {
-               hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
+               hecc_write(priv, HECC_CANGIF1, handled);
                int_status = hecc_read(priv, HECC_CANGIF1);
        } else {
-               hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
+               hecc_write(priv, HECC_CANGIF0, handled);
                int_status = hecc_read(priv, HECC_CANGIF0);
        }
 
@@ -877,7 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
 
        priv->offload.mailbox_read = ti_hecc_mailbox_read;
        priv->offload.mb_first = HECC_RX_FIRST_MBOX;
-       priv->offload.mb_last = HECC_MAX_TX_MBOX;
+       priv->offload.mb_last = HECC_RX_LAST_MBOX;
        err = can_rx_offload_add_timestamp(ndev, &priv->offload);
        if (err) {
                dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
index bd6eb99..2f74f67 100644 (file)
@@ -623,6 +623,7 @@ static int gs_can_open(struct net_device *netdev)
                                           rc);
 
                                usb_unanchor_urb(urb);
+                               usb_free_urb(urb);
                                break;
                        }
 
index 19a702a..21faa2e 100644 (file)
@@ -876,9 +876,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf)
        netdev_info(priv->netdev, "device disconnected\n");
 
        unregister_candev(priv->netdev);
-       free_candev(priv->netdev);
-
        mcba_urb_unlink(priv);
+       free_candev(priv->netdev);
 }
 
 static struct usb_driver mcba_usb_driver = {
index 617da29..d2539c9 100644 (file)
@@ -100,7 +100,7 @@ struct pcan_usb_msg_context {
        u8 *end;
        u8 rec_cnt;
        u8 rec_idx;
-       u8 rec_data_idx;
+       u8 rec_ts_idx;
        struct net_device *netdev;
        struct pcan_usb *pdev;
 };
@@ -436,8 +436,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
                }
                if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
                        /* no error (back to active state) */
-                       mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
-                       return 0;
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+                       break;
                }
                break;
 
@@ -460,9 +460,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
                }
 
                if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
-                       /* no error (back to active state) */
-                       mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
-                       return 0;
+                       /* no error (back to warning state) */
+                       new_state = CAN_STATE_ERROR_WARNING;
+                       break;
                }
                break;
 
@@ -501,6 +501,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
                mc->pdev->dev.can.can_stats.error_warning++;
                break;
 
+       case CAN_STATE_ERROR_ACTIVE:
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+               break;
+
        default:
                /* CAN_STATE_MAX (trick to handle other errors) */
                cf->can_id |= CAN_ERR_CRTL;
@@ -547,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
        mc->ptr += PCAN_USB_CMD_ARGS;
 
        if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
-               int err = pcan_usb_decode_ts(mc, !mc->rec_idx);
+               int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
 
                if (err)
                        return err;
+
+               /* Next packet in the buffer will have a timestamp on a single
+                * byte
+                */
+               mc->rec_ts_idx++;
        }
 
        switch (f) {
@@ -632,10 +642,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
 
        cf->can_dlc = get_can_dlc(rec_len);
 
-       /* first data packet timestamp is a word */
-       if (pcan_usb_decode_ts(mc, !mc->rec_data_idx))
+       /* Only first packet timestamp is a word */
+       if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
                goto decode_failed;
 
+       /* Next packet in the buffer will have a timestamp on a single byte */
+       mc->rec_ts_idx++;
+
        /* read data */
        memset(cf->data, 0x0, sizeof(cf->data));
        if (status_len & PCAN_USB_STATUSLEN_RTR) {
@@ -688,7 +701,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
                /* handle normal can frames here */
                } else {
                        err = pcan_usb_decode_data(&mc, sl);
-                       mc.rec_data_idx++;
                }
        }
 
index 65dce64..0b7766b 100644 (file)
@@ -750,7 +750,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        dev = netdev_priv(netdev);
 
        /* allocate a buffer large enough to send commands */
-       dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
+       dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
        if (!dev->cmd_buf) {
                err = -ENOMEM;
                goto lbl_free_candev;
index d596a2a..8fa224b 100644 (file)
@@ -996,9 +996,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
                netdev_info(priv->netdev, "device disconnected\n");
 
                unregister_netdev(priv->netdev);
-               free_candev(priv->netdev);
-
                unlink_all_urbs(priv);
+               free_candev(priv->netdev);
        }
 
 }
index 911b343..7c482b2 100644 (file)
@@ -1599,7 +1599,6 @@ static const struct xcan_devtype_data xcan_zynq_data = {
 
 static const struct xcan_devtype_data xcan_axi_data = {
        .cantype = XAXI_CAN,
-       .flags = XCAN_FLAG_TXFEMP,
        .bittiming_const = &xcan_bittiming_const,
        .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
        .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
index 26509fa..69fc130 100644 (file)
@@ -37,22 +37,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
        unsigned int i;
        u32 reg, offset;
 
-       if (priv->type == BCM7445_DEVICE_ID)
-               offset = CORE_STS_OVERRIDE_IMP;
-       else
-               offset = CORE_STS_OVERRIDE_IMP2;
-
        /* Enable the port memories */
        reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
        reg &= ~P_TXQ_PSM_VDD(port);
        core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 
-       /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
-       reg = core_readl(priv, CORE_IMP_CTL);
-       reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
-       reg &= ~(RX_DIS | TX_DIS);
-       core_writel(priv, reg, CORE_IMP_CTL);
-
        /* Enable forwarding */
        core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
 
@@ -71,10 +60,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
        b53_brcm_hdr_setup(ds, port);
 
-       /* Force link status for IMP port */
-       reg = core_readl(priv, offset);
-       reg |= (MII_SW_OR | LINK_STS);
-       core_writel(priv, reg, offset);
+       if (port == 8) {
+               if (priv->type == BCM7445_DEVICE_ID)
+                       offset = CORE_STS_OVERRIDE_IMP;
+               else
+                       offset = CORE_STS_OVERRIDE_IMP2;
+
+               /* Force link status for IMP port */
+               reg = core_readl(priv, offset);
+               reg |= (MII_SW_OR | LINK_STS);
+               core_writel(priv, reg, offset);
+
+               /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+               reg = core_readl(priv, CORE_IMP_CTL);
+               reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+               reg &= ~(RX_DIS | TX_DIS);
+               core_writel(priv, reg, CORE_IMP_CTL);
+       } else {
+               reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+               reg &= ~(RX_DIS | TX_DIS);
+               core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+       }
 }
 
 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
@@ -1209,10 +1215,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
        priv->wol_ports_mask = 0;
+       /* Disable interrupts */
+       bcm_sf2_intr_disable(priv);
        dsa_unregister_switch(priv->dev->ds);
        bcm_sf2_cfp_exit(priv->dev->ds);
-       /* Disable all ports and interrupts */
-       bcm_sf2_sw_suspend(priv->dev->ds);
        bcm_sf2_mdio_unregister(priv);
 
        return 0;
index 073cbd0..d838c17 100644 (file)
@@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
        int pin;
        int err;
 
+       /* Reject requests with unsupported flags */
+       if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+                               PTP_RISING_EDGE |
+                               PTP_FALLING_EDGE |
+                               PTP_STRICT_FLAGS))
+               return -EOPNOTSUPP;
+
+       /* Reject requests to enable time stamping on both edges. */
+       if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+           (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+           (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+               return -EOPNOTSUPP;
+
        pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
 
        if (pin < 0)
index f40b248..ffac0ea 100644 (file)
@@ -26,8 +26,8 @@ config NET_DSA_SJA1105_PTP
 
 config NET_DSA_SJA1105_TAS
        bool "Support for the Time-Aware Scheduler on NXP SJA1105"
-       depends on NET_DSA_SJA1105
-       depends on NET_SCH_TAPRIO
+       depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+       depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
        help
          This enables support for the TTEthernet-based egress scheduling
          engine in the SJA1105 DSA driver, which is controlled using a
index 42d2e1b..664d664 100644 (file)
@@ -256,6 +256,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
        if (priv->regulator)
                regulator_disable(priv->regulator);
 
+       if (priv->soc_data->need_div_macclk)
+               clk_disable_unprepare(priv->macclk);
+
        free_netdev(ndev);
        return err;
 }
index b4a8cf6..04ec909 100644 (file)
@@ -10382,7 +10382,8 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
 {
        bnxt_unmap_bars(bp, bp->pdev);
        pci_release_regions(bp->pdev);
-       pci_disable_device(bp->pdev);
+       if (pci_is_enabled(bp->pdev))
+               pci_disable_device(bp->pdev);
 }
 
 static void bnxt_init_dflt_coal(struct bnxt *bp)
@@ -10669,14 +10670,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
        }
        /* fall through */
-       case BNXT_FW_RESET_STATE_RESET_FW: {
-               u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
-
+       case BNXT_FW_RESET_STATE_RESET_FW:
                bnxt_reset_all(bp);
                bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
-               bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+               bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
                return;
-       }
        case BNXT_FW_RESET_STATE_ENABLE_DEV:
                if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
                    bp->fw_health) {
index e664392..7151244 100644 (file)
@@ -29,25 +29,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
        val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
        health_status = val & 0xffff;
 
-       if (health_status == BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Healthy;");
-               if (rc)
-                       return rc;
-       } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Not yet completed initialization;");
+       if (health_status < BNXT_FW_STATUS_HEALTHY) {
+               rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+                                                 "Not yet completed initialization");
                if (rc)
                        return rc;
        } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Encountered fatal error and cannot recover;");
+               rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+                                                 "Encountered fatal error and cannot recover");
                if (rc)
                        return rc;
        }
 
        if (val >> 16) {
-               rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+               rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
                if (rc)
                        return rc;
        }
@@ -215,25 +210,68 @@ enum bnxt_dl_param_id {
 
 static const struct bnxt_dl_nvm_param nvm_params[] = {
        {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
        {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
        {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
-        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
        {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
-        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
        {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
 };
 
+union bnxt_nvm_data {
+       u8      val8;
+       __le32  val32;
+};
+
+static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
+                                 union devlink_param_value *src,
+                                 int nvm_num_bits, int dl_num_bytes)
+{
+       u32 val32 = 0;
+
+       if (nvm_num_bits == 1) {
+               dst->val8 = src->vbool;
+               return;
+       }
+       if (dl_num_bytes == 4)
+               val32 = src->vu32;
+       else if (dl_num_bytes == 2)
+               val32 = (u32)src->vu16;
+       else if (dl_num_bytes == 1)
+               val32 = (u32)src->vu8;
+       dst->val32 = cpu_to_le32(val32);
+}
+
+static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
+                                   union bnxt_nvm_data *src,
+                                   int nvm_num_bits, int dl_num_bytes)
+{
+       u32 val32;
+
+       if (nvm_num_bits == 1) {
+               dst->vbool = src->val8;
+               return;
+       }
+       val32 = le32_to_cpu(src->val32);
+       if (dl_num_bytes == 4)
+               dst->vu32 = val32;
+       else if (dl_num_bytes == 2)
+               dst->vu16 = (u16)val32;
+       else if (dl_num_bytes == 1)
+               dst->vu8 = (u8)val32;
+}
+
 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                             int msg_len, union devlink_param_value *val)
 {
        struct hwrm_nvm_get_variable_input *req = msg;
-       void *data_addr = NULL, *buf = NULL;
        struct bnxt_dl_nvm_param nvm_param;
-       int bytesize, idx = 0, rc, i;
+       union bnxt_nvm_data *data;
        dma_addr_t data_dma_addr;
+       int idx = 0, rc, i;
 
        /* Get/Set NVM CFG parameter is supported only on PFs */
        if (BNXT_VF(bp))
@@ -254,47 +292,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
        else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
                idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
 
-       bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
-       switch (bytesize) {
-       case 1:
-               if (nvm_param.num_bits == 1)
-                       buf = &val->vbool;
-               else
-                       buf = &val->vu8;
-               break;
-       case 2:
-               buf = &val->vu16;
-               break;
-       case 4:
-               buf = &val->vu32;
-               break;
-       default:
-               return -EFAULT;
-       }
-
-       data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
-                                      &data_dma_addr, GFP_KERNEL);
-       if (!data_addr)
+       data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+                                 &data_dma_addr, GFP_KERNEL);
+       if (!data)
                return -ENOMEM;
 
        req->dest_data_addr = cpu_to_le64(data_dma_addr);
-       req->data_len = cpu_to_le16(nvm_param.num_bits);
+       req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
        req->option_num = cpu_to_le16(nvm_param.offset);
        req->index_0 = cpu_to_le16(idx);
        if (idx)
                req->dimensions = cpu_to_le16(1);
 
        if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
-               memcpy(data_addr, buf, bytesize);
+               bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
+                                     nvm_param.dl_num_bytes);
                rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
        } else {
                rc = hwrm_send_message_silent(bp, msg, msg_len,
                                              HWRM_CMD_TIMEOUT);
+               if (!rc)
+                       bnxt_copy_from_nvm_data(val, data,
+                                               nvm_param.nvm_num_bits,
+                                               nvm_param.dl_num_bytes);
        }
-       if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
-               memcpy(buf, data_addr, bytesize);
-
-       dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+       dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
        if (rc == -EACCES)
                netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
        return rc;
index b97e0ba..2f4fd0a 100644 (file)
@@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param {
        u16 id;
        u16 offset;
        u16 dir_type;
-       u16 num_bits;
+       u16 nvm_num_bits;
+       u8 dl_num_bytes;
 };
 
 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
index 0f13828..1de5181 100644 (file)
@@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv)
 
        /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
        bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
-       udelay(2);
-       bcmgenet_umac_writel(priv, 0, UMAC_CMD);
 }
 
 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2614,8 +2612,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
        spin_unlock_irq(&priv->lock);
 
        if (status & UMAC_IRQ_PHY_DET_R &&
-           priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+           priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
                phy_init_hw(priv->dev->phydev);
+               genphy_config_aneg(priv->dev->phydev);
+       }
 
        /* Link UP/DOWN event */
        if (status & UMAC_IRQ_LINK_EVENT)
@@ -2879,12 +2879,6 @@ static int bcmgenet_open(struct net_device *dev)
        if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
-       ret = bcmgenet_mii_connect(dev);
-       if (ret) {
-               netdev_err(dev, "failed to connect to PHY\n");
-               goto err_clk_disable;
-       }
-
        /* take MAC out of reset */
        bcmgenet_umac_reset(priv);
 
@@ -2894,12 +2888,6 @@ static int bcmgenet_open(struct net_device *dev)
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
-       ret = bcmgenet_mii_config(dev, true);
-       if (ret) {
-               netdev_err(dev, "unsupported PHY\n");
-               goto err_disconnect_phy;
-       }
-
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
        if (priv->internal_phy) {
@@ -2915,7 +2903,7 @@ static int bcmgenet_open(struct net_device *dev)
        ret = bcmgenet_init_dma(priv);
        if (ret) {
                netdev_err(dev, "failed to initialize DMA\n");
-               goto err_disconnect_phy;
+               goto err_clk_disable;
        }
 
        /* Always enable ring 16 - descriptor ring */
@@ -2938,19 +2926,25 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
+       ret = bcmgenet_mii_probe(dev);
+       if (ret) {
+               netdev_err(dev, "failed to connect to PHY\n");
+               goto err_irq1;
+       }
+
        bcmgenet_netif_start(dev);
 
        netif_tx_start_all_queues(dev);
 
        return 0;
 
+err_irq1:
+       free_irq(priv->irq1, priv);
 err_irq0:
        free_irq(priv->irq0, priv);
 err_fini_dma:
        bcmgenet_dma_teardown(priv);
        bcmgenet_fini_dma(priv);
-err_disconnect_phy:
-       phy_disconnect(dev->phydev);
 err_clk_disable:
        if (priv->internal_phy)
                bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3631,8 +3625,6 @@ static int bcmgenet_resume(struct device *d)
        if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
-       phy_init_hw(dev->phydev);
-
        bcmgenet_umac_reset(priv);
 
        init_umac(priv);
@@ -3641,7 +3633,10 @@ static int bcmgenet_resume(struct device *d)
        if (priv->wolopts)
                clk_disable_unprepare(priv->clk_wol);
 
+       phy_init_hw(dev->phydev);
+
        /* Speed settings must be restored */
+       genphy_config_aneg(dev->phydev);
        bcmgenet_mii_config(priv->dev, false);
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
index 7fbf573..dbc69d8 100644 (file)
@@ -720,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_connect(struct net_device *dev);
 int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
index 17bb8d6..dbe18cd 100644 (file)
@@ -173,46 +173,6 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
                                          bcmgenet_fixed_phy_link_update);
 }
 
-int bcmgenet_mii_connect(struct net_device *dev)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       struct device_node *dn = priv->pdev->dev.of_node;
-       struct phy_device *phydev;
-       u32 phy_flags = 0;
-       int ret;
-
-       /* Communicate the integrated PHY revision */
-       if (priv->internal_phy)
-               phy_flags = priv->gphy_rev;
-
-       /* Initialize link state variables that bcmgenet_mii_setup() uses */
-       priv->old_link = -1;
-       priv->old_speed = -1;
-       priv->old_duplex = -1;
-       priv->old_pause = -1;
-
-       if (dn) {
-               phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
-                                       phy_flags, priv->phy_interface);
-               if (!phydev) {
-                       pr_err("could not attach to PHY\n");
-                       return -ENODEV;
-               }
-       } else {
-               phydev = dev->phydev;
-               phydev->dev_flags = phy_flags;
-
-               ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
-                                        priv->phy_interface);
-               if (ret) {
-                       pr_err("could not attach to PHY\n");
-                       return -ENODEV;
-               }
-       }
-
-       return 0;
-}
-
 int bcmgenet_mii_config(struct net_device *dev, bool init)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -221,8 +181,38 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
        const char *phy_name = NULL;
        u32 id_mode_dis = 0;
        u32 port_ctrl;
+       int bmcr = -1;
+       int ret;
        u32 reg;
 
+       /* MAC clocking workaround during reset of umac state machines */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (reg & CMD_SW_RESET) {
+               /* An MII PHY must be isolated to prevent TXC contention */
+               if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+                       ret = phy_read(phydev, MII_BMCR);
+                       if (ret >= 0) {
+                               bmcr = ret;
+                               ret = phy_write(phydev, MII_BMCR,
+                                               bmcr | BMCR_ISOLATE);
+                       }
+                       if (ret) {
+                               netdev_err(dev, "failed to isolate PHY\n");
+                               return ret;
+                       }
+               }
+               /* Switch MAC clocking to RGMII generated clock */
+               bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+               /* Ensure 5 clks with Rx disabled
+                * followed by 5 clks with Reset asserted
+                */
+               udelay(4);
+               reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+               /* Ensure 5 more clocks before Rx is enabled */
+               udelay(2);
+       }
+
        priv->ext_phy = !priv->internal_phy &&
                        (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
@@ -254,6 +244,9 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                phy_set_max_speed(phydev, SPEED_100);
                bcmgenet_sys_writel(priv,
                                    PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+               /* Restore the MII PHY after isolation */
+               if (bmcr >= 0)
+                       phy_write(phydev, MII_BMCR, bmcr);
                break;
 
        case PHY_INTERFACE_MODE_REVMII:
@@ -306,21 +299,71 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
-       if (init) {
-               linkmode_copy(phydev->advertising, phydev->supported);
+       if (init)
+               dev_info(kdev, "configuring instance for %s\n", phy_name);
 
-               /* The internal PHY has its link interrupts routed to the
-                * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
-                * that prevents the signaling of link UP interrupts when
-                * the link operates at 10Mbps, so fallback to polling for
-                * those versions of GENET.
-                */
-               if (priv->internal_phy && !GENET_IS_V5(priv))
-                       phydev->irq = PHY_IGNORE_INTERRUPT;
+       return 0;
+}
 
-               dev_info(kdev, "configuring instance for %s\n", phy_name);
+int bcmgenet_mii_probe(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device_node *dn = priv->pdev->dev.of_node;
+       struct phy_device *phydev;
+       u32 phy_flags = 0;
+       int ret;
+
+       /* Communicate the integrated PHY revision */
+       if (priv->internal_phy)
+               phy_flags = priv->gphy_rev;
+
+       /* Initialize link state variables that bcmgenet_mii_setup() uses */
+       priv->old_link = -1;
+       priv->old_speed = -1;
+       priv->old_duplex = -1;
+       priv->old_pause = -1;
+
+       if (dn) {
+               phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+                                       phy_flags, priv->phy_interface);
+               if (!phydev) {
+                       pr_err("could not attach to PHY\n");
+                       return -ENODEV;
+               }
+       } else {
+               phydev = dev->phydev;
+               phydev->dev_flags = phy_flags;
+
+               ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+                                        priv->phy_interface);
+               if (ret) {
+                       pr_err("could not attach to PHY\n");
+                       return -ENODEV;
+               }
        }
 
+       /* Configure port multiplexer based on what the probed PHY device since
+        * reading the 'max-speed' property determines the maximum supported
+        * PHY speed which is needed for bcmgenet_mii_config() to configure
+        * things appropriately.
+        */
+       ret = bcmgenet_mii_config(dev, true);
+       if (ret) {
+               phy_disconnect(dev->phydev);
+               return ret;
+       }
+
+       linkmode_copy(phydev->advertising, phydev->supported);
+
+       /* The internal PHY has its link interrupts routed to the
+        * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+        * that prevents the signaling of link UP interrupts when
+        * the link operates at 10Mbps, so fallback to polling for
+        * those versions of GENET.
+        */
+       if (priv->internal_phy && !GENET_IS_V5(priv))
+               dev->phydev->irq = PHY_IGNORE_INTERRUPT;
+
        return 0;
 }
 
index 77f3511..ca3aa12 100644 (file)
@@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
 
        switch (rq->type) {
        case PTP_CLK_REQ_PEROUT:
+               /* Reject requests with unsupported flags */
+               if (rq->perout.flags)
+                       return -EOPNOTSUPP;
+
                if (rq->perout.index != 0)
                        return -EINVAL;
 
index 0e5de88..cdd7e5d 100644 (file)
@@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
        netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
 
        netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
-       netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
+       netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
 
        mac = of_get_mac_address(pdev->dev.of_node);
 
index a4dead4..86b528d 100644 (file)
@@ -695,10 +695,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
        lld->write_cmpl_support = adap->params.write_cmpl_support;
 }
 
-static void uld_attach(struct adapter *adap, unsigned int uld)
+static int uld_attach(struct adapter *adap, unsigned int uld)
 {
-       void *handle;
        struct cxgb4_lld_info lli;
+       void *handle;
 
        uld_init(adap, &lli);
        uld_queue_init(adap, uld, &lli);
@@ -708,7 +708,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
                dev_warn(adap->pdev_dev,
                         "could not attach to the %s driver, error %ld\n",
                         adap->uld[uld].name, PTR_ERR(handle));
-               return;
+               return PTR_ERR(handle);
        }
 
        adap->uld[uld].handle = handle;
@@ -716,22 +716,22 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
 
        if (adap->flags & CXGB4_FULL_INIT_DONE)
                adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+
+       return 0;
 }
 
-/**
- *     cxgb4_register_uld - register an upper-layer driver
- *     @type: the ULD type
- *     @p: the ULD methods
+/* cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
  *
- *     Registers an upper-layer driver with this driver and notifies the ULD
- *     about any presently available devices that support its type.  Returns
- *     %-EBUSY if a ULD of the same type is already registered.
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type.
  */
 void cxgb4_register_uld(enum cxgb4_uld type,
                        const struct cxgb4_uld_info *p)
 {
-       int ret = 0;
        struct adapter *adap;
+       int ret = 0;
 
        if (type >= CXGB4_ULD_MAX)
                return;
@@ -763,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
                if (ret)
                        goto free_irq;
                adap->uld[type] = *p;
-               uld_attach(adap, type);
+               ret = uld_attach(adap, type);
+               if (ret)
+                       goto free_txq;
                continue;
+free_txq:
+               release_sge_txq_uld(adap, type);
 free_irq:
                if (adap->flags & CXGB4_FULL_INIT_DONE)
                        quiesce_rx_uld(adap, type);
index b3da81e..928bfea 100644 (file)
@@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
         * write the CIDX Updates into the Status Page at the end of the
         * TX Queue.
         */
-       c.autoequiqe_to_viid = htonl((dbqt
-                                     ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
-                                     : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+       c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
                                     FW_EQ_ETH_CMD_VIID_V(pi->viid));
 
        c.fetchszm_to_iqid =
-               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
-                                                ? HOSTFCMODE_INGRESS_QUEUE_X
-                                                : HOSTFCMODE_STATUS_PAGE_X) |
+               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
                      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
                      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
 
index f1a0c4d..f37c9a0 100644 (file)
@@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
 {
        struct net_device *dev;
        struct ep93xx_priv *ep;
+       struct resource *mem;
 
        dev = platform_get_drvdata(pdev);
        if (dev == NULL)
@@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
                iounmap(ep->base_addr);
 
        if (ep->res != NULL) {
-               release_resource(ep->res);
-               kfree(ep->res);
+               mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               release_mem_region(mem->start, resource_size(mem));
        }
 
        free_netdev(dev);
index e736ce2..a8f4c69 100644 (file)
@@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
        struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
 
        gemini_port_remove(port);
+       free_netdev(port->netdev);
        return 0;
 }
 
index 0b12f89..9fdf77d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Register definitions for Gemini GMAC Ethernet device driver
  *
  * Copyright (C) 2006 Storlink, Corp.
index 9b7af94..96e9565 100644 (file)
@@ -727,6 +727,18 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
         */
        nfrags = skb_shinfo(skb)->nr_frags;
 
+       /* Setup HW checksumming */
+       csum_vlan = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+               goto drop;
+
+       /* Add VLAN tag */
+       if (skb_vlan_tag_present(skb)) {
+               csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+               csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+       }
+
        /* Get header len */
        len = skb_headlen(skb);
 
@@ -753,19 +765,6 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
        if (nfrags == 0)
                f_ctl_stat |= FTGMAC100_TXDES0_LTS;
        txdes->txdes3 = cpu_to_le32(map);
-
-       /* Setup HW checksumming */
-       csum_vlan = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL &&
-           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
-               goto drop;
-
-       /* Add VLAN tag */
-       if (skb_vlan_tag_present(skb)) {
-               csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
-               csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
-       }
-
        txdes->txdes1 = cpu_to_le32(csum_vlan);
 
        /* Next descriptor */
index 19379ba..bf5add9 100644 (file)
@@ -2232,8 +2232,16 @@ err_set_cdan:
 err_service_reg:
        free_channel(priv, channel);
 err_alloc_ch:
-       if (err == -EPROBE_DEFER)
+       if (err == -EPROBE_DEFER) {
+               for (i = 0; i < priv->num_channels; i++) {
+                       channel = priv->channel[i];
+                       nctx = &channel->nctx;
+                       dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+                       free_channel(priv, channel);
+               }
+               priv->num_channels = 0;
                return err;
+       }
 
        if (cpumask_empty(&priv->dpio_cpumask)) {
                dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
index ff2e177..df2458a 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2018 NXP
  */
index 720cd50..4ac05bf 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2013-2016 Freescale Semiconductor Inc.
  * Copyright 2016-2018 NXP
index be7914c..311c184 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2013-2016 Freescale Semiconductor Inc.
  * Copyright 2016-2018 NXP
index d4d4c72..a9c386b 100644 (file)
@@ -3558,7 +3558,7 @@ fec_probe(struct platform_device *pdev)
 
        for (i = 0; i < irq_cnt; i++) {
                snprintf(irq_name, sizeof(irq_name), "int%d", i);
-               irq = platform_get_irq_byname(pdev, irq_name);
+               irq = platform_get_irq_byname_optional(pdev, irq_name);
                if (irq < 0)
                        irq = platform_get_irq(pdev, i);
                if (irq < 0) {
@@ -3645,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev)
                regulator_disable(fep->reg_phy);
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
+       clk_disable_unprepare(fep->clk_ahb);
+       clk_disable_unprepare(fep->clk_ipg);
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
index 19e2365..945643c 100644 (file)
@@ -600,9 +600,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
 
        INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
 
-       irq = platform_get_irq_byname(pdev, "pps");
+       irq = platform_get_irq_byname_optional(pdev, "pps");
        if (irq < 0)
-               irq = platform_get_irq(pdev, irq_idx);
+               irq = platform_get_irq_optional(pdev, irq_idx);
        /* Failure to get an irq is not fatal,
         * only the PTP_CLOCK_PPS clock events should stop
         */
index 59564ac..edec61d 100644 (file)
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
 
        len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
        page_info = &rx->data.page_info[idx];
+       dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
+                               PAGE_SIZE, DMA_FROM_DEVICE);
 
        /* gvnic can only receive into registered segments. If the buffer
         * can't be recycled, our only choice is to copy the data out of
index 778b87b..0a9a7ee 100644 (file)
@@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
        seg_desc->seg.seg_addr = cpu_to_be64(addr);
 }
 
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
+                                   u64 iov_offset, u64 iov_len)
+{
+       dma_addr_t dma;
+       u64 addr;
+
+       for (addr = iov_offset; addr < iov_offset + iov_len;
+            addr += PAGE_SIZE) {
+               dma = page_buses[addr / PAGE_SIZE];
+               dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
+       }
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
+                         struct device *dev)
 {
        int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
        union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
        skb_copy_bits(skb, 0,
                      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
                      hlen);
+       gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+                               info->iov[hdr_nfrags - 1].iov_offset,
+                               info->iov[hdr_nfrags - 1].iov_len);
        copy_offset = hlen;
 
        for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
                skb_copy_bits(skb, copy_offset,
                              tx->tx_fifo.base + info->iov[i].iov_offset,
                              info->iov[i].iov_len);
+               gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+                                       info->iov[i].iov_offset,
+                                       info->iov[i].iov_len);
                copy_offset += info->iov[i].iov_len;
        }
 
@@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
                gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
                return NETDEV_TX_BUSY;
        }
-       nsegs = gve_tx_add_skb(tx, skb);
+       nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
 
        netdev_tx_sent_queue(tx->netdev_txq, skb->len);
        skb_tx_timestamp(skb);
index c841674..4606a7e 100644 (file)
@@ -237,6 +237,7 @@ struct hip04_priv {
        dma_addr_t rx_phys[RX_DESC_NUM];
        unsigned int rx_head;
        unsigned int rx_buf_size;
+       unsigned int rx_cnt_remaining;
 
        struct device_node *phy_node;
        struct phy_device *phy;
@@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
        struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
        struct net_device *ndev = priv->ndev;
        struct net_device_stats *stats = &ndev->stats;
-       unsigned int cnt = hip04_recv_cnt(priv);
        struct rx_desc *desc;
        struct sk_buff *skb;
        unsigned char *buf;
@@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 
        /* clean up tx descriptors */
        tx_remaining = hip04_tx_reclaim(ndev, false);
-
-       while (cnt && !last) {
+       priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+       while (priv->rx_cnt_remaining && !last) {
                buf = priv->rx_buf[priv->rx_head];
                skb = build_skb(buf, priv->rx_buf_size);
                if (unlikely(!skb)) {
@@ -635,11 +635,13 @@ refill:
                hip04_set_recv_desc(priv, phys);
 
                priv->rx_head = RX_NEXT(priv->rx_head);
-               if (rx >= budget)
+               if (rx >= budget) {
+                       --priv->rx_cnt_remaining;
                        goto done;
+               }
 
-               if (--cnt == 0)
-                       cnt = hip04_recv_cnt(priv);
+               if (--priv->rx_cnt_remaining == 0)
+                       priv->rx_cnt_remaining += hip04_recv_cnt(priv);
        }
 
        if (!(priv->reg_inten & RCV_INT)) {
@@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_device *ndev)
        int i;
 
        priv->rx_head = 0;
+       priv->rx_cnt_remaining = 0;
        priv->tx_head = 0;
        priv->tx_tail = 0;
        hip04_reset_ppe(priv);
@@ -1038,7 +1041,6 @@ static int hip04_remove(struct platform_device *pdev)
 
        hip04_free_ring(ndev, d);
        unregister_netdev(ndev);
-       free_irq(ndev->irq, ndev);
        of_node_put(priv->phy_node);
        cancel_work_sync(&priv->tx_timeout_task);
        free_netdev(ndev);
index 6d0457e..0833927 100644 (file)
@@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
 
        ring->q = q;
        ring->flags = flags;
-       spin_lock_init(&ring->lock);
        ring->coal_param = q->handle->coal_param;
        assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
 
index e9c67c0..6ab9458 100644 (file)
@@ -274,9 +274,6 @@ struct hnae_ring {
        /* statistic */
        struct ring_stats stats;
 
-       /* ring lock for poll one */
-       spinlock_t lock;
-
        dma_addr_t desc_dma_addr;
        u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
        u16 desc_num;       /* total number of desc */
index a48396d..14ab204 100644 (file)
@@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
        return u > c ? (h > c && h <= u) : (h > c || h <= u);
 }
 
-/* netif_tx_lock will turn down the performance, set only when necessary */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
-#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
-#else
-#define NETIF_TX_LOCK(ring)
-#define NETIF_TX_UNLOCK(ring)
-#endif
-
 /* reclaim all desc in one budget
  * return error or number of desc left
  */
@@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
        int head;
        int bytes, pkts;
 
-       NETIF_TX_LOCK(ring);
-
        head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
        rmb(); /* make sure head is ready before touch any data */
 
-       if (is_ring_empty(ring) || head == ring->next_to_clean) {
-               NETIF_TX_UNLOCK(ring);
+       if (is_ring_empty(ring) || head == ring->next_to_clean)
                return 0; /* no data to poll */
-       }
 
        if (!is_valid_clean_head(ring, head)) {
                netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
                           ring->next_to_use, ring->next_to_clean);
                ring->stats.io_err_cnt++;
-               NETIF_TX_UNLOCK(ring);
                return -EIO;
        }
 
@@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
        ring->stats.tx_pkts += pkts;
        ring->stats.tx_bytes += bytes;
 
-       NETIF_TX_UNLOCK(ring);
-
        dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
        netdev_tx_completed_queue(dev_queue, pkts, bytes);
 
@@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
        int head;
        int bytes, pkts;
 
-       NETIF_TX_LOCK(ring);
-
        head = ring->next_to_use; /* ntu :soft setted ring position*/
        bytes = 0;
        pkts = 0;
        while (head != ring->next_to_clean)
                hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
 
-       NETIF_TX_UNLOCK(ring);
-
        dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
        netdev_tx_reset_queue(dev_queue);
 }
index 75ccc1e..a099893 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HNAE3_H
index 2110fa3..5d468ed 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HNS3_ENET_H
index 680c350..52c9d20 100644 (file)
@@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
 #define HNS3_NIC_LB_TEST_TX_CNT_ERR    2
 #define HNS3_NIC_LB_TEST_RX_CNT_ERR    3
 
-struct hns3_link_mode_mapping {
-       u32 hns3_link_mode;
-       u32 ethtool_link_mode;
-};
-
 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
 {
        struct hnae3_handle *h = hns3_get_handle(ndev);
index 4821fe0..1426eb5 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HCLGE_CMD_H
index c063301..a1790af 100644 (file)
@@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
        if (ret)
                return ret;
 
-       for (i = 0; i < HNAE3_MAX_TC; i++) {
+       for (i = 0; i < hdev->tc_max; i++) {
                switch (ets->tc_tsa[i]) {
                case IEEE_8021QAZ_TSA_STRICT:
                        if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
        struct net_device *netdev = h->kinfo.netdev;
        struct hclge_dev *hdev = vport->back;
        u8 i, j, pfc_map, *prio_tc;
+       int ret;
 
        if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
            hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
@@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
 
        hclge_tm_pfc_info_update(hdev);
 
-       return hclge_pause_setup_hw(hdev, false);
+       ret = hclge_pause_setup_hw(hdev, false);
+       if (ret)
+               return ret;
+
+       ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+       if (ret)
+               return ret;
+
+       ret = hclge_buffer_alloc(hdev);
+       if (ret) {
+               hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+               return ret;
+       }
+
+       return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
 }
 
 /* DCBX configuration */
index 278f21e..b04702e 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HCLGE_DCB_H__
index e02e01b..c052bb3 100644 (file)
@@ -3587,12 +3587,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev)
 {
        struct hclge_pf_rst_done_cmd *req;
        struct hclge_desc desc;
+       int ret;
 
        req = (struct hclge_pf_rst_done_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
        req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
 
-       return hclge_cmd_send(&hdev->hw, &desc, 1);
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       /* To be compatible with the old firmware, which does not support
+        * command HCLGE_OPC_PF_RST_DONE, just print a warning and
+        * return success
+        */
+       if (ret == -EOPNOTSUPP) {
+               dev_warn(&hdev->pdev->dev,
+                        "current firmware does not support command(0x%x)!\n",
+                        HCLGE_OPC_PF_RST_DONE);
+               return 0;
+       } else if (ret) {
+               dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
+                       ret);
+       }
+
+       return ret;
 }
 
 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
@@ -6247,11 +6263,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
 
        func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
        req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+       /* read current config parameter */
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
-                                  false);
+                                  true);
        req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
        req->func_id = cpu_to_le32(func_id);
-       req->switch_param = switch_param;
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "read mac vlan switch parameter fail, ret = %d\n", ret);
+               return ret;
+       }
+
+       /* modify and write new config parameter */
+       hclge_cmd_reuse_desc(&desc, false);
+       req->switch_param = (req->switch_param & param_mask) | switch_param;
        req->param_mask = param_mask;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
index c3d56b8..59b8243 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HCLGE_MAIN_H
index ef095d9..dd9a121 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HCLGE_MDIO_H
index 8186109..260f22d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #ifndef __HCLGE_TM_H
index 71d3d88..be56e63 100644 (file)
@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
        for (i = 0; i < adapter->num_rx_queues; i++)
                rxdr[i].count = rxdr->count;
 
+       err = 0;
        if (netif_running(adapter->netdev)) {
                /* Try to get new resources before deleting old */
                err = e1000_setup_all_rx_resources(adapter);
@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
                err = e1000_up(adapter);
-               if (err)
-                       goto err_setup;
        }
        kfree(tx_old);
        kfree(rx_old);
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
-       return 0;
+       return err;
+
 err_setup_tx:
        e1000_free_all_rx_resources(adapter);
 err_setup_rx:
@@ -646,7 +646,6 @@ err_alloc_rx:
 err_alloc_tx:
        if (netif_running(adapter->netdev))
                e1000_up(adapter);
-err_setup:
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return err;
 }
index 530613f..69a2daa 100644 (file)
@@ -20,6 +20,8 @@
 
 /* API version 1.7 implements additional link and PHY-specific APIs  */
 #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
 /* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
 #define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
 
index d37c6e0..7560f06 100644 (file)
@@ -1876,7 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
             hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
                hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
 
-       if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+       if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+           hw->mac.type != I40E_MAC_X722) {
                __le32 tmp;
 
                memcpy(&tmp, resp->link_type, sizeof(tmp));
index b1c3227..d07e1a8 100644 (file)
@@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                err = i40e_queue_pair_enable(vsi, qid);
                if (err)
                        return err;
-
-               /* Kick start the NAPI context so that receiving will start */
-               err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
-               if (err)
-                       return err;
        }
 
        return 0;
@@ -694,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
                i40e_xdp_ring_update_tail(xdp_ring);
 
                xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
-               if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
-                       xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
        }
 
        return !!budget && work_done;
@@ -774,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
        i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
 
 out_xmit:
-       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
-               if (tx_ring->next_to_clean == tx_ring->next_to_use)
-                       xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
-               else
-                       xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
-       }
+       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
+               xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
 
        xmit_done = i40e_xmit_zc(tx_ring, budget);
 
index 8f310e5..821987d 100644 (file)
@@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
        q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
        q_vector->ring_mask |= BIT(r_idx);
        wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
-            q_vector->rx.current_itr);
+            q_vector->rx.current_itr >> 1);
        q_vector->rx.current_itr = q_vector->rx.target_itr;
 }
 
@@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
        q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
        q_vector->num_ringpairs++;
        wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
-            q_vector->tx.target_itr);
+            q_vector->tx.target_itr >> 1);
        q_vector->tx.current_itr = q_vector->tx.target_itr;
 }
 
index fc624b7..2fde965 100644 (file)
@@ -1036,7 +1036,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
        struct ice_aqc_query_txsched_res_resp *buf;
        enum ice_status status = 0;
        __le16 max_sibl;
-       u8 i;
+       u16 i;
 
        if (hw->layer_info)
                return status;
index 3ec2ce0..8a6ef35 100644 (file)
@@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
                        ? igb_setup_copper_link_82575
                        : igb_setup_serdes_link_82575;
 
-       if (mac->type == e1000_82580) {
+       if (mac->type == e1000_82580 || mac->type == e1000_i350) {
                switch (hw->device_id) {
                /* feature not supported on these id's */
                case E1000_DEV_ID_DH89XXCC_SGMII:
index 105b062..ed7e667 100644 (file)
@@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
                struct net_device *netdev = igb->netdev;
                hw->hw_addr = NULL;
                netdev_err(netdev, "PCIe link lost\n");
-               WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
+               WARN(pci_device_is_present(igb->pdev),
+                    "igb: Failed to read reg 0x%x!\n", reg);
        }
 
        return value;
@@ -2064,7 +2065,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
        if ((hw->phy.media_type == e1000_media_type_copper) &&
            (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
                swap_now = true;
-       } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+       } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+                  !(connsw & E1000_CONNSW_SERDESD)) {
                /* copper signal takes time to appear */
                if (adapter->copper_tries < 4) {
                        adapter->copper_tries++;
@@ -2370,7 +2372,7 @@ void igb_reset(struct igb_adapter *adapter)
                adapter->ei.get_invariants(hw);
                adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
        }
-       if ((mac->type == e1000_82575) &&
+       if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
            (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
                igb_enable_mas(adapter);
        }
@@ -5673,8 +5675,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
         * should have been handled by the upper layers.
         */
        if (tx_ring->launchtime_enable) {
-               ts = ns_to_timespec64(first->skb->tstamp);
-               first->skb->tstamp = 0;
+               ts = ktime_to_timespec64(first->skb->tstamp);
+               first->skb->tstamp = ktime_set(0, 0);
                context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
        } else {
                context_desc->seqnum_seed = 0;
index fd3071f..c39e921 100644 (file)
@@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
 
        switch (rq->type) {
        case PTP_CLK_REQ_EXTTS:
+               /* Reject requests with unsupported flags */
+               if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+                                       PTP_RISING_EDGE |
+                                       PTP_FALLING_EDGE |
+                                       PTP_STRICT_FLAGS))
+                       return -EOPNOTSUPP;
+
+               /* Reject requests failing to enable both edges. */
+               if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+                   (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+                   (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+                       return -EOPNOTSUPP;
+
                if (on) {
                        pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
                                           rq->extts.index);
@@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
+               /* Reject requests with unsupported flags */
+               if (rq->perout.flags)
+                       return -EOPNOTSUPP;
+
                if (on) {
                        pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
                                           rq->perout.index);
index 63b62d7..2488867 100644 (file)
@@ -824,8 +824,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
         * should have been handled by the upper layers.
         */
        if (tx_ring->launchtime_enable) {
-               ts = ns_to_timespec64(first->skb->tstamp);
-               first->skb->tstamp = 0;
+               ts = ktime_to_timespec64(first->skb->tstamp);
+               first->skb->tstamp = ktime_set(0, 0);
                context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
        } else {
                context_desc->launch_time = 0;
@@ -4047,7 +4047,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
                hw->hw_addr = NULL;
                netif_device_detach(netdev);
                netdev_err(netdev, "PCIe link lost, device now detached\n");
-               WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
+               WARN(pci_device_is_present(igc->pdev),
+                    "igc: Failed to read reg 0x%x!\n", reg);
        }
 
        return value;
index 1ce2397..91b3780 100644 (file)
@@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
                        set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
 
-               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
                if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
                        continue;
 
index 100ac89..d6feaac 100644 (file)
@@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
        if (tx_desc) {
                ixgbe_xdp_ring_update_tail(xdp_ring);
                xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
-               if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
-                       xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
        }
 
        return !!budget && work_done;
@@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
        if (xsk_frames)
                xsk_umem_complete_tx(umem, xsk_frames);
 
-       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
-               if (tx_ring->next_to_clean == tx_ring->next_to_use)
-                       xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
-               else
-                       xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
-       }
+       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
+               xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
 
        return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
 }
index c8425d3..e47783c 100644 (file)
@@ -160,16 +160,23 @@ static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
                             (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
 }
 #else
-void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
-                           struct mvneta_bm_pool *bm_pool, u8 port_map) {}
-void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-                        u8 port_map) {}
-int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
-                         struct mvneta_bm_pool *bm_pool) {return 0; }
-struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
-                                         enum mvneta_bm_type type, u8 port_id,
-                                         int pkt_size) { return NULL; }
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+                                         struct mvneta_bm_pool *bm_pool,
+                                         u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+                                      struct mvneta_bm_pool *bm_pool,
+                                      u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+                                       struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+                                                       u8 pool_id,
+                                                       enum mvneta_bm_type type,
+                                                       u8 port_id,
+                                                       int pkt_size)
+{ return NULL; }
 
 static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
                                         struct mvneta_bm_pool *bm_pool,
@@ -178,7 +185,8 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
 static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
                                        struct mvneta_bm_pool *bm_pool)
 { return 0; }
-struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
-void mvneta_bm_put(struct mvneta_bm *priv) {}
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
 #endif /* CONFIG_MVNETA_BM */
 #endif
index 206dc5d..5c1f389 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 CGX driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index fb3ba49..473d975 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 CGX driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index e332e82..413c3f2 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index 76a4575..75439fc 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index 8d6d90f..5d4df31 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index b2ce957..da649f6 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index c9d60b0..5222e42 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index 09a8d61..1ea92a2 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index f920dac..84a3906 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
  *
  * Copyright (C) 2018 Marvell International Ltd.
  *
index fce9b3a..d44ac66 100644 (file)
@@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
        /*
         * Subtract 1 from the limit because we need to allocate a
-        * spare CQE so the HCA HW can tell the difference between an
-        * empty CQ and a full CQ.
+        * spare CQE to enable resizing the CQ.
         */
        dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
        dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
@@ -4011,6 +4010,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_params_unregister;
 
        devlink_params_publish(devlink);
+       devlink_reload_enable(devlink);
        pci_save_state(pdev);
        return 0;
 
@@ -4122,6 +4122,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        struct devlink *devlink = priv_to_devlink(priv);
        int active_vfs = 0;
 
+       devlink_reload_disable(devlink);
+
        if (mlx4_is_slave(dev))
                persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
 
index 4356f3a..1187ef1 100644 (file)
@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 }
 
-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+                                struct resource_allocator *res_alloc,
+                                int vf)
 {
-       /* reduce the sink counter */
-       return (dev->caps.max_counters - 1 -
-               (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
-               / MLX4_MAX_PORTS;
+       struct mlx4_active_ports actv_ports;
+       int ports, counters_guaranteed;
+
+       /* For master, only allocate according to the number of phys ports */
+       if (vf == mlx4_master_func_num(dev))
+               return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+       /* calculate real number of ports for the VF */
+       actv_ports = mlx4_get_active_ports(dev, vf);
+       ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+       counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+       /* If we do not have enough counters for this VF, do not
+        * allocate any for it. '-1' to reduce the sink counter.
+        */
+       if ((res_alloc->res_reserved + counters_guaranteed) >
+           (dev->caps.max_counters - 1))
+               return 0;
+
+       return counters_guaranteed;
 }
 
 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        int i, j;
        int t;
-       int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 
        priv->mfunc.master.res_tracker.slave_list =
                kcalloc(dev->num_slaves, sizeof(struct slave_list),
@@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                break;
                        case RES_COUNTER:
                                res_alloc->quota[t] = dev->caps.max_counters;
-                               if (t == mlx4_master_func_num(dev))
-                                       res_alloc->guaranteed[t] =
-                                               MLX4_PF_COUNTERS_PER_PORT *
-                                               MLX4_MAX_PORTS;
-                               else if (t <= max_vfs_guarantee_counter)
-                                       res_alloc->guaranteed[t] =
-                                               MLX4_VF_COUNTERS_PER_PORT *
-                                               MLX4_MAX_PORTS;
-                               else
-                                       res_alloc->guaranteed[t] = 0;
+                               res_alloc->guaranteed[t] =
+                                       mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
                                break;
                        default:
                                break;
index 8d76452..f1a7bc4 100644 (file)
@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
        u8  num_wqebbs;
        u8  num_dma;
 #ifdef CONFIG_MLX5_EN_TLS
-       skb_frag_t *resync_dump_frag;
+       struct page *resync_dump_frag_page;
 #endif
 };
 
@@ -410,6 +410,7 @@ struct mlx5e_txqsq {
        struct device             *pdev;
        __be32                     mkey_be;
        unsigned long              state;
+       unsigned int               hw_mtu;
        struct hwtstamp_config    *tstamp;
        struct mlx5_clock         *clock;
 
index b3a249b..ac44bbe 100644 (file)
@@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
                                    "Failed to create hv vhca stats agent, err = %ld\n",
                                    PTR_ERR(agent));
 
-               kfree(priv->stats_agent.buf);
+               kvfree(priv->stats_agent.buf);
                return IS_ERR_OR_NULL(agent);
        }
 
@@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
                return;
 
        mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
-       kfree(priv->stats_agent.buf);
+       kvfree(priv->stats_agent.buf);
 }
index f8ee18b..13af725 100644 (file)
@@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        if (ret)
                return ret;
 
-       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
+       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
+               ip_rt_put(rt);
                return -ENETUNREACH;
+       }
 #else
        return -EOPNOTSUPP;
 #endif
 
        ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               ip_rt_put(rt);
                return ret;
+       }
 
        if (!(*out_ttl))
                *out_ttl = ip4_dst_hoplimit(&rt->dst);
@@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
                *out_ttl = ip6_dst_hoplimit(dst);
 
        ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               dst_release(dst);
                return ret;
+       }
 #else
        return -EOPNOTSUPP;
 #endif
index 87be967..7c8796d 100644 (file)
 #else
 /* TLS offload requires additional stop_room for:
  *  - a resync SKB.
- * kTLS offload requires additional stop_room for:
- * - static params WQE,
- * - progress params WQE, and
- * - resync DUMP per frag.
+ * kTLS offload requires fixed additional stop_room for:
+ * - a static params WQE, and a progress params WQE.
+ * The additional MTU-depending room for the resync DUMP WQEs
+ * will be calculated and added in runtime.
  */
 #define MLX5E_SQ_TLS_ROOM  \
        (MLX5_SEND_WQE_MAX_WQEBBS + \
-        MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
-        MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
+        MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
 #endif
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
@@ -92,7 +91,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
 
        /* fill sq frag edge with nops to avoid wqe wrapping two pages */
        for (; wi < edge_wi; wi++) {
-               wi->skb        = NULL;
+               memset(wi, 0, sizeof(*wi));
                wi->num_wqebbs = 1;
                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
index d2ff74d..46725cd 100644 (file)
@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
                return -ENOMEM;
 
        tx_priv->expected_seq = start_offload_tcp_sn;
-       tx_priv->crypto_info  = crypto_info;
+       tx_priv->crypto_info  = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
 
        /* tc and underlay_qpn values are not in use for tls tis */
index b7298f9..a3efa29 100644 (file)
         MLX5_ST_SZ_BYTES(tls_progress_params))
 #define MLX5E_KTLS_PROGRESS_WQEBBS \
        (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+
+struct mlx5e_dump_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_data_seg data;
+};
+
+#define MLX5E_KTLS_DUMP_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
 
 enum {
        MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD     = 0,
@@ -37,7 +44,7 @@ enum {
 
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
-       struct tls_crypto_info *crypto_info;
+       struct tls12_crypto_info_aes_gcm_128 crypto_info;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -86,14 +93,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
                                         struct mlx5e_tx_wqe **wqe, u16 *pi);
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
-                                          struct mlx5e_sq_dma *dma);
-
+                                          u32 *dma_fifo_cc);
+static inline u8
+mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
+                           unsigned int sync_len)
+{
+       /* Given the MTU and sync_len, calculates an upper bound for the
+        * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+        */
+       return MLX5E_KTLS_DUMP_WQEBBS *
+               (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+}
 #else
 
 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
 }
 
+static inline void
+mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                     struct mlx5e_tx_wqe_info *wi,
+                                     u32 *dma_fifo_cc) {}
+
 #endif
 
 #endif /* __MLX5E_TLS_H__ */
index d195366..778dab1 100644 (file)
@@ -24,17 +24,12 @@ enum {
 static void
 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
 {
-       struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
-       struct tls12_crypto_info_aes_gcm_128 *info;
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
        char *initial_rn, *gcm_iv;
        u16 salt_sz, rec_seq_sz;
        char *salt, *rec_seq;
        u8 tls_version;
 
-       if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
-               return;
-
-       info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        EXTRACT_INFO_FIELDS;
 
        gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
@@ -108,16 +103,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
 }
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
-                      u16 pi, u8 num_wqebbs,
-                      skb_frag_t *resync_dump_frag,
-                      u32 num_bytes)
+                      u16 pi, u8 num_wqebbs, u32 num_bytes,
+                      struct page *page)
 {
        struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
 
-       wi->skb              = NULL;
-       wi->num_wqebbs       = num_wqebbs;
-       wi->resync_dump_frag = resync_dump_frag;
-       wi->num_bytes        = num_bytes;
+       memset(wi, 0, sizeof(*wi));
+       wi->num_wqebbs = num_wqebbs;
+       wi->num_bytes  = num_bytes;
+       wi->resync_dump_frag_page = page;
 }
 
 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -145,7 +139,7 @@ post_static_params(struct mlx5e_txqsq *sq,
 
        umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
        build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
        sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
 }
 
@@ -159,7 +153,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
 
        wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
        build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
        sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
 }
 
@@ -169,6 +163,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
                              bool skip_static_post, bool fence_first_post)
 {
        bool progress_fence = skip_static_post || !fence_first_post;
+       struct mlx5_wq_cyc *wq = &sq->wq;
+       u16 contig_wqebbs_room, pi;
+
+       pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+       contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+       if (unlikely(contig_wqebbs_room <
+                    MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
+               mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
        if (!skip_static_post)
                post_static_params(sq, priv_tx, fence_first_post);
@@ -180,29 +182,36 @@ struct tx_sync_info {
        u64 rcd_sn;
        s32 sync_len;
        int nr_frags;
-       skb_frag_t *frags[MAX_SKB_FRAGS];
+       skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+enum mlx5e_ktls_sync_retval {
+       MLX5E_KTLS_SYNC_DONE,
+       MLX5E_KTLS_SYNC_FAIL,
+       MLX5E_KTLS_SYNC_SKIP_NO_DATA,
 };
 
-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
-                            u32 tcp_seq, struct tx_sync_info *info)
+static enum mlx5e_ktls_sync_retval
+tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+                u32 tcp_seq, struct tx_sync_info *info)
 {
        struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
+       enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
        struct tls_record_info *record;
        int remaining, i = 0;
        unsigned long flags;
-       bool ret = true;
 
        spin_lock_irqsave(&tx_ctx->lock, flags);
        record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
 
        if (unlikely(!record)) {
-               ret = false;
+               ret = MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
        if (unlikely(tcp_seq < tls_record_start_seq(record))) {
-               if (!tls_record_is_start_marker(record))
-                       ret = false;
+               ret = tls_record_is_start_marker(record) ?
+                       MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
@@ -211,13 +220,13 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
        while (remaining > 0) {
                skb_frag_t *frag = &record->frags[i];
 
-               __skb_frag_ref(frag);
+               get_page(skb_frag_page(frag));
                remaining -= skb_frag_size(frag);
-               info->frags[i++] = frag;
+               info->frags[i++] = *frag;
        }
        /* reduce the part which will be sent with the original SKB */
        if (remaining < 0)
-               skb_frag_size_add(info->frags[i - 1], remaining);
+               skb_frag_size_add(&info->frags[i - 1], remaining);
        info->nr_frags = i;
 out:
        spin_unlock_irqrestore(&tx_ctx->lock, flags);
@@ -229,17 +238,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
                      struct mlx5e_ktls_offload_context_tx *priv_tx,
                      u64 rcd_sn)
 {
-       struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
-       struct tls12_crypto_info_aes_gcm_128 *info;
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
        __be64 rn_be = cpu_to_be64(rcd_sn);
        bool skip_static_post;
        u16 rec_seq_sz;
        char *rec_seq;
 
-       if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
-               return;
-
-       info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        rec_seq = info->rec_seq;
        rec_seq_sz = sizeof(info->rec_seq);
 
@@ -250,11 +254,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
        mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
 }
 
-struct mlx5e_dump_wqe {
-       struct mlx5_wqe_ctrl_seg ctrl;
-       struct mlx5_wqe_data_seg data;
-};
-
 static int
 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
 {
@@ -262,7 +261,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        struct mlx5_wqe_data_seg *dseg;
        struct mlx5e_dump_wqe *wqe;
        dma_addr_t dma_addr = 0;
-       u8  num_wqebbs;
        u16 ds_cnt;
        int fsz;
        u16 pi;
@@ -270,7 +268,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
 
        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
-       num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 
        cseg = &wqe->ctrl;
        dseg = &wqe->data;
@@ -291,24 +288,27 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        dseg->byte_count = cpu_to_be32(fsz);
        mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 
-       tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
-       sq->pc += num_wqebbs;
-
-       WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
-            "unexpected DUMP num_wqebbs, %d > %d",
-            num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
+       sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
 
        return 0;
 }
 
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
-                                          struct mlx5e_sq_dma *dma)
+                                          u32 *dma_fifo_cc)
 {
-       struct mlx5e_sq_stats *stats = sq->stats;
+       struct mlx5e_sq_stats *stats;
+       struct mlx5e_sq_dma *dma;
+
+       if (!wi->resync_dump_frag_page)
+               return;
+
+       dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+       stats = sq->stats;
 
        mlx5e_tx_dma_unmap(sq->pdev, dma);
-       __skb_frag_unref(wi->resync_dump_frag);
+       put_page(wi->resync_dump_frag_page);
        stats->tls_dump_packets++;
        stats->tls_dump_bytes += wi->num_bytes;
 }
@@ -318,25 +318,31 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
        struct mlx5_wq_cyc *wq = &sq->wq;
        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
-       tx_fill_wi(sq, pi, 1, NULL, 0);
+       tx_fill_wi(sq, pi, 1, 0, NULL);
 
        mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
 }
 
-static struct sk_buff *
+static enum mlx5e_ktls_sync_retval
 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                         struct mlx5e_txqsq *sq,
-                        struct sk_buff *skb,
+                        int datalen,
                         u32 seq)
 {
        struct mlx5e_sq_stats *stats = sq->stats;
        struct mlx5_wq_cyc *wq = &sq->wq;
+       enum mlx5e_ktls_sync_retval ret;
        struct tx_sync_info info = {};
        u16 contig_wqebbs_room, pi;
        u8 num_wqebbs;
-       int i;
-
-       if (!tx_sync_info_get(priv_tx, seq, &info)) {
+       int i = 0;
+
+       ret = tx_sync_info_get(priv_tx, seq, &info);
+       if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
+               if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
+                       stats->tls_skip_no_sync_data++;
+                       return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
+               }
                /* We might get here if a retransmission reaches the driver
                 * after the relevant record is acked.
                 * It should be safe to drop the packet in this case
@@ -346,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        }
 
        if (unlikely(info.sync_len < 0)) {
-               u32 payload;
-               int headln;
-
-               headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               payload = skb->len - headln;
-               if (likely(payload <= -info.sync_len))
-                       return skb;
+               if (likely(datalen <= -info.sync_len))
+                       return MLX5E_KTLS_SYNC_DONE;
 
                stats->tls_drop_bypass_req++;
                goto err_out;
@@ -360,30 +361,62 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
 
        stats->tls_ooo++;
 
-       num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
-               (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
+       tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+        * actual data xmit.
+        */
+       if (!info.nr_frags) {
+               tx_post_fence_nop(sq);
+               return MLX5E_KTLS_SYNC_DONE;
+       }
+
+       num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
        pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
        contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+
        if (unlikely(contig_wqebbs_room < num_wqebbs))
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
 
-       for (i = 0; i < info.nr_frags; i++)
-               if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
-                       goto err_out;
+       for (; i < info.nr_frags; i++) {
+               unsigned int orig_fsz, frag_offset = 0, n = 0;
+               skb_frag_t *f = &info.frags[i];
 
-       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
-        * actual data xmit.
-        */
-       if (!info.nr_frags)
-               tx_post_fence_nop(sq);
+               orig_fsz = skb_frag_size(f);
 
-       return skb;
+               do {
+                       bool fence = !(i || frag_offset);
+                       unsigned int fsz;
+
+                       n++;
+                       fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
+                       skb_frag_size_set(f, fsz);
+                       if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+                               page_ref_add(skb_frag_page(f), n - 1);
+                               goto err_out;
+                       }
+
+                       skb_frag_off_add(f, fsz);
+                       frag_offset += fsz;
+               } while (frag_offset < orig_fsz);
+
+               page_ref_add(skb_frag_page(f), n - 1);
+       }
+
+       return MLX5E_KTLS_SYNC_DONE;
 
 err_out:
-       dev_kfree_skb_any(skb);
-       return NULL;
+       for (; i < info.nr_frags; i++)
+               /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+                * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
+                * released only upon their completions (or in mlx5e_free_txqsq_descs,
+                * if channel closes).
+                */
+               put_page(skb_frag_page(&info.frags[i]));
+
+       return MLX5E_KTLS_SYNC_FAIL;
 }
 
 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
@@ -419,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
 
        seq = ntohl(tcp_hdr(skb)->seq);
        if (unlikely(priv_tx->expected_seq != seq)) {
-               skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
-               if (unlikely(!skb))
+               enum mlx5e_ktls_sync_retval ret =
+                       mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+
+               if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+                       *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+               else if (ret == MLX5E_KTLS_SYNC_FAIL)
+                       goto err_out;
+               else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
                        goto out;
-               *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
        }
 
        priv_tx->expected_seq = seq + datalen;
index c5a9c20..327c93a 100644 (file)
@@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(const unsigned long *adver)
 {
 #define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
        int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
-       __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
 
        bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
        return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
index 7569287..772bfdb 100644 (file)
@@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
+       sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
        sq->stop_room = MLX5E_SQ_STOP_ROOM;
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+#ifdef CONFIG_MLX5_EN_TLS
        if (mlx5_accel_is_tls_device(c->priv->mdev)) {
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
-               sq->stop_room += MLX5E_SQ_TLS_ROOM;
+               sq->stop_room += MLX5E_SQ_TLS_ROOM +
+                       mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
+                                                   TLS_MAX_PAYLOAD_SIZE);
        }
+#endif
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1349,9 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        /* last doorbell out, godspeed .. */
        if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
                u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+               struct mlx5e_tx_wqe_info *wi;
                struct mlx5e_tx_wqe *nop;
 
-               sq->db.wqe_info[pi].skb = NULL;
+               wi = &sq->db.wqe_info[pi];
+
+               memset(wi, 0, sizeof(*wi));
+               wi->num_wqebbs = 1;
                nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
index 95892a3..cd9bb7c 100644 (file)
@@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        mutex_lock(&esw->offloads.encap_tbl_lock);
        encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
-       if (e->compl_result || (encap_connected == neigh_connected &&
-                               ether_addr_equal(e->h_dest, ha)))
+       if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+                                   ether_addr_equal(e->h_dest, ha)))
                goto unlock;
 
        mlx5e_take_all_encap_flows(e, &flow_list);
index d6a5472..82cffb3 100644 (file)
@@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
                return 0;
 
-       if (rq->cqd.left)
+       if (rq->cqd.left) {
                work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
+               if (rq->cqd.left || work_done >= budget)
+                       goto out;
+       }
 
        cqe = mlx5_cqwq_get_cqe(cqwq);
        if (!cqe) {
index 840ec94..bbff8d8 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/udp.h>
 #include <net/udp.h>
 #include "en.h"
+#include "en/port.h"
 
 enum {
        MLX5E_ST_LINK_STATE,
@@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
 
 static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
 {
-       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-       u32 eth_proto_oper;
-       int i;
+       u32 speed;
 
        if (!netif_carrier_ok(priv->netdev))
                return 1;
 
-       if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
-               return 1;
-
-       eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
-       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
-               if (eth_proto_oper & MLX5E_PROT_MASK(i))
-                       return 0;
-       }
-       return 1;
+       return mlx5e_port_linkspeed(priv->mdev, &speed);
 }
 
 struct mlx5ehdr {
index ac6fdcd..7e6ebd0 100644 (file)
@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
 #endif
 
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
                        s->tx_tls_ctx               += sq_stats->tls_ctx;
                        s->tx_tls_ooo               += sq_stats->tls_ooo;
+                       s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
+                       s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
                        s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
+                       s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
                        s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
                        s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
-                       s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
-                       s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
 #endif
                        s->tx_cqes              += sq_stats->cqes;
                }
@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
 #endif
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
index 79f261b..869f350 100644 (file)
@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
        u64 tx_tls_encrypted_bytes;
        u64 tx_tls_ctx;
        u64 tx_tls_ooo;
+       u64 tx_tls_dump_packets;
+       u64 tx_tls_dump_bytes;
        u64 tx_tls_resync_bytes;
+       u64 tx_tls_skip_no_sync_data;
        u64 tx_tls_drop_no_sync_data;
        u64 tx_tls_drop_bypass_req;
-       u64 tx_tls_dump_packets;
-       u64 tx_tls_dump_bytes;
 #endif
 
        u64 rx_xsk_packets;
@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
        u64 tls_encrypted_bytes;
        u64 tls_ctx;
        u64 tls_ooo;
+       u64 tls_dump_packets;
+       u64 tls_dump_bytes;
        u64 tls_resync_bytes;
+       u64 tls_skip_no_sync_data;
        u64 tls_drop_no_sync_data;
        u64 tls_drop_bypass_req;
-       u64 tls_dump_packets;
-       u64 tls_dump_bytes;
 #endif
        /* less likely accessed in data path */
        u64 csum_none;
index 3e78a72..fda0b37 100644 (file)
@@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        mlx5_eswitch_del_vlan_action(esw, attr);
 
        for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
-               if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+               if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
                        mlx5e_detach_encap(priv, flow, out_index);
+                       kfree(attr->parse_attr->tun_info[out_index]);
+               }
        kvfree(attr->parse_attr);
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
                        mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
        }
 
+       kfree(e->tun_info);
        kfree(e->encap_header);
        kfree_rcu(e, rcu);
 }
@@ -2972,6 +2975,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
        return NULL;
 }
 
+static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+       size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+       return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow,
                              struct net_device *mirred_dev,
@@ -3028,13 +3038,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        refcount_set(&e->refcnt, 1);
        init_completion(&e->res_ready);
 
+       tun_info = dup_tun_info(tun_info);
+       if (!tun_info) {
+               err = -ENOMEM;
+               goto out_err_init;
+       }
        e->tun_info = tun_info;
        err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
-       if (err) {
-               kfree(e);
-               e = NULL;
-               goto out_err;
-       }
+       if (err)
+               goto out_err_init;
 
        INIT_LIST_HEAD(&e->flows);
        hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
@@ -3075,6 +3087,12 @@ out_err:
        if (e)
                mlx5e_encap_put(priv, e);
        return err;
+
+out_err_init:
+       mutex_unlock(&esw->offloads.encap_tbl_lock);
+       kfree(tun_info);
+       kfree(e);
+       return err;
 }
 
 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
@@ -3160,7 +3178,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
                               struct mlx5_esw_flow_attr *attr,
                               u32 *action)
 {
-       int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+       int nest_level = attr->parse_attr->filter_dev->lower_level;
        struct flow_action_entry vlan_act = {
                .id = FLOW_ACTION_VLAN_POP,
        };
@@ -3295,7 +3313,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                        } else if (encap) {
                                parse_attr->mirred_ifindex[attr->out_count] =
                                        out_dev->ifindex;
-                               parse_attr->tun_info[attr->out_count] = info;
+                               parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+                               if (!parse_attr->tun_info[attr->out_count])
+                                       return -ENOMEM;
                                encap = false;
                                attr->dests[attr->out_count].flags |=
                                        MLX5_ESW_DEST_ENCAP;
index d3a67a9..67dc4f0 100644 (file)
@@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
                                 struct mlx5_err_cqe *err_cqe)
 {
-       u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+       struct mlx5_cqwq *wq = &sq->cq.wq;
+       u32 ci;
+
+       ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
 
        netdev_err(sq->channel->netdev,
                   "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
@@ -479,14 +482,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                        skb = wi->skb;
 
                        if (unlikely(!skb)) {
-#ifdef CONFIG_MLX5_EN_TLS
-                               if (wi->resync_dump_frag) {
-                                       struct mlx5e_sq_dma *dma =
-                                               mlx5e_dma_get(sq, dma_fifo_cc++);
-
-                                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
-                               }
-#endif
+                               mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
                                sqcc += wi->num_wqebbs;
                                continue;
                        }
@@ -542,29 +538,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_tx_wqe_info *wi;
        struct sk_buff *skb;
+       u32 dma_fifo_cc;
+       u16 sqcc;
        u16 ci;
        int i;
 
-       while (sq->cc != sq->pc) {
-               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+       sqcc = sq->cc;
+       dma_fifo_cc = sq->dma_fifo_cc;
+
+       while (sqcc != sq->pc) {
+               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
                wi = &sq->db.wqe_info[ci];
                skb = wi->skb;
 
-               if (!skb) { /* nop */
-                       sq->cc++;
+               if (!skb) {
+                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
+                       sqcc += wi->num_wqebbs;
                        continue;
                }
 
                for (i = 0; i < wi->num_dma; i++) {
                        struct mlx5e_sq_dma *dma =
-                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+                               mlx5e_dma_get(sq, dma_fifo_cc++);
 
                        mlx5e_tx_dma_unmap(sq->pdev, dma);
                }
 
                dev_kfree_skb_any(skb);
-               sq->cc += wi->num_wqebbs;
+               sqcc += wi->num_wqebbs;
        }
+
+       sq->dma_fifo_cc = dma_fifo_cc;
+       sq->cc = sqcc;
 }
 
 #ifdef CONFIG_MLX5_CORE_IPOIB
index 00d71db..9004a07 100644 (file)
@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
 
        mlx5_eswitch_set_rule_source_port(esw, spec, attr);
 
-       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
        if (attr->outer_match_level != MLX5_MATCH_NONE)
                spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 
@@ -1080,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
                            MLX5_CAP_GEN(dev, max_flow_counter_15_0);
        fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
 
-       esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
+       esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
                  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
                  max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
                  fdb_max);
index 1d55a32..366bda1 100644 (file)
@@ -177,22 +177,33 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
        memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
 }
 
+static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+                                               const struct mlx5_flow_spec *spec)
+{
+       u32 port_mask, port_value;
+
+       if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
+               return spec->flow_context.flow_source ==
+                                       MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+       port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+                            misc_parameters.source_port);
+       port_value = MLX5_GET(fte_match_param, spec->match_value,
+                             misc_parameters.source_port);
+       return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+}
+
 bool
 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
                              struct mlx5_flow_act *flow_act,
                              struct mlx5_flow_spec *spec)
 {
-       u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
-                                misc_parameters.source_port);
-       u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
-                                 misc_parameters.source_port);
-
        if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
                return false;
 
        /* push vlan on RX */
        return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
-               ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+               mlx5_eswitch_offload_is_uplink_port(esw, spec);
 }
 
 struct mlx5_flow_handle *
index 4c50efe..6102113 100644 (file)
@@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        }
 
        err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
-       if (err)
+       if (err) {
+               kvfree(in);
                goto err_cqwq;
+       }
 
        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
        MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
index 579c306..3c816e8 100644 (file)
@@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                                MLX5_SET(dest_format_struct, in_dests,
                                         destination_eswitch_owner_vhca_id,
                                         dst->dest_attr.vport.vhca_id);
-                               if (extended_dest) {
+                               if (extended_dest &&
+                                   dst->dest_attr.vport.pkt_reformat) {
                                        MLX5_SET(dest_format_struct, in_dests,
                                                 packet_reformat,
                                                 !!(dst->dest_attr.vport.flags &
index d685122..c07f315 100644 (file)
@@ -572,7 +572,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
                return -ENOMEM;
        err = mlx5_crdump_collect(dev, cr_data);
        if (err)
-               return err;
+               goto free_data;
 
        if (priv_ctx) {
                struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
index 0059b29..43f9760 100644 (file)
@@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
        if (!MLX5_PPS_CAP(mdev))
                return -EOPNOTSUPP;
 
+       /* Reject requests with unsupported flags */
+       if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+                               PTP_RISING_EDGE |
+                               PTP_FALLING_EDGE |
+                               PTP_STRICT_FLAGS))
+               return -EOPNOTSUPP;
+
+       /* Reject requests to enable time stamping on both edges. */
+       if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+           (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+           (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+               return -EOPNOTSUPP;
+
        if (rq->extts.index >= clock->ptp_info.n_pins)
                return -EINVAL;
 
@@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
        if (!MLX5_PPS_CAP(mdev))
                return -EOPNOTSUPP;
 
+       /* Reject requests with unsupported flags */
+       if (rq->perout.flags)
+               return -EOPNOTSUPP;
+
        if (rq->perout.index >= clock->ptp_info.n_pins)
                return -EINVAL;
 
index b74b7d0..004c56c 100644 (file)
@@ -1577,6 +1577,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
                break;
        case DR_ACTION_TYP_MODIFY_HDR:
                mlx5dr_icm_free_chunk(action->rewrite.chunk);
+               kfree(action->rewrite.data);
                refcount_dec(&action->rewrite.dmn->refcount);
                break;
        default:
index e8b6560..5dcb8ba 100644 (file)
@@ -1096,6 +1096,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
        if (htbl)
                mlx5dr_htbl_put(htbl);
 
+       kfree(hw_ste_arr);
+
        return 0;
 
 free_ste:
index 14dcc78..0a0884d 100644 (file)
@@ -1186,9 +1186,12 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
        if (err)
                goto err_thermal_init;
 
-       if (mlxsw_driver->params_register && !reload)
+       if (mlxsw_driver->params_register)
                devlink_params_publish(devlink);
 
+       if (!reload)
+               devlink_reload_enable(devlink);
+
        return 0;
 
 err_thermal_init:
@@ -1249,6 +1252,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
 {
        struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
+       if (!reload)
+               devlink_reload_disable(devlink);
        if (devlink_is_reload_failed(devlink)) {
                if (!reload)
                        /* Only the parts that were not de-initialized in the
@@ -1259,7 +1264,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
                        return;
        }
 
-       if (mlxsw_core->driver->params_unregister && !reload)
+       if (mlxsw_core->driver->params_unregister)
                devlink_params_unpublish(devlink);
        mlxsw_thermal_fini(mlxsw_core->thermal);
        mlxsw_hwmon_fini(mlxsw_core->hwmon);
index 57b26c2..e8fe9a9 100644 (file)
@@ -429,6 +429,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
        int pulse_width = 0;
        int perout_bit = 0;
 
+       /* Reject requests with unsupported flags */
+       if (perout->flags)
+               return -EOPNOTSUPP;
+
        if (!on) {
                lan743x_ptp_perout_off(adapter);
                return 0;
index 4d1bce4..672ea13 100644 (file)
@@ -261,8 +261,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
                port->pvid = vid;
 
        /* Untagged egress vlan clasification */
-       if (untagged)
+       if (untagged && port->vid != vid) {
+               if (port->vid) {
+                       dev_err(ocelot->dev,
+                               "Port already has a native VLAN: %d\n",
+                               port->vid);
+                       return -EBUSY;
+               }
                port->vid = vid;
+       }
 
        ocelot_vlan_port_apply(ocelot, port);
 
@@ -934,7 +941,7 @@ end:
 static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
                                  u16 vid)
 {
-       return ocelot_vlan_vid_add(dev, vid, false, true);
+       return ocelot_vlan_vid_add(dev, vid, false, false);
 }
 
 static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
@@ -1673,9 +1680,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
        struct ocelot_port *ocelot_port = netdev_priv(dev);
        int err = 0;
 
-       if (!ocelot_netdevice_dev_check(dev))
-               return 0;
-
        switch (event) {
        case NETDEV_CHANGEUPPER:
                if (netif_is_bridge_master(info->upper_dev)) {
@@ -1712,12 +1716,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        int ret = 0;
 
+       if (!ocelot_netdevice_dev_check(dev))
+               return 0;
+
        if (event == NETDEV_PRECHANGEUPPER &&
            netif_is_lag_master(info->upper_dev)) {
                struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
                struct netlink_ext_ack *extack;
 
-               if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+               if (lag_upper_info &&
+                   lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
                        extack = netdev_notifier_info_to_extack(&info->info);
                        NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
 
index e40773c..06ac806 100644 (file)
@@ -523,7 +523,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
 #define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
 #define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
 
-void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask,
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                     u32 offset);
 #define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
 #define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
index 1eef446..79d72c8 100644 (file)
@@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr)
        nfp_port_free(repr->port);
 }
 
-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
-
-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
-                                          struct netdev_queue *txq,
-                                          void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
-}
-
-static void nfp_repr_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
-}
-
 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
                  u32 cmsg_port_id, struct nfp_port *port,
                  struct net_device *pf_netdev)
@@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        u32 repr_cap = nn->tlv_caps.repr_cap;
        int err;
 
-       nfp_repr_set_lockdep_class(netdev);
-
        repr->port = port;
        repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
        if (!repr->dst)
index 72107a0..20faa8d 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
index 15e4323..aab3114 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/utsname.h>
index 2ce7009..38f7f40 100644 (file)
 #define QED_ROCE_QPS                   (8192)
 #define QED_ROCE_DPIS                  (8)
 #define QED_RDMA_SRQS                   QED_ROCE_QPS
-#define QED_NVM_CFG_SET_FLAGS          0xE
-#define QED_NVM_CFG_SET_PF_FLAGS       0x1E
 #define QED_NVM_CFG_GET_FLAGS          0xA
 #define QED_NVM_CFG_GET_PF_FLAGS       0x1A
+#define QED_NVM_CFG_MAX_ATTRS          50
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 entity_id, len, buf[32];
+       bool need_nvm_init = true;
        struct qed_ptt *ptt;
        u16 cfg_id, count;
        int rc = 0, i;
@@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
 
        DP_VERBOSE(cdev, NETIF_MSG_DRV,
                   "Read config ids: num_attrs = %0d\n", count);
-       /* NVM CFG ID attributes */
-       for (i = 0; i < count; i++) {
+       /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
+        * arithmetic operations in the implementation.
+        */
+       for (i = 1; i <= count; i++) {
                cfg_id = *((u16 *)*data);
                *data += 2;
                entity_id = **data;
@@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
                memcpy(buf, *data, len);
                *data += len;
 
-               flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
-                       QED_NVM_CFG_SET_FLAGS;
+               flags = 0;
+               if (need_nvm_init) {
+                       flags |= QED_NVM_CFG_OPTION_INIT;
+                       need_nvm_init = false;
+               }
+
+               /* Commit to flash and free the resources */
+               if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
+                       flags |= QED_NVM_CFG_OPTION_COMMIT |
+                                QED_NVM_CFG_OPTION_FREE;
+                       need_nvm_init = true;
+               }
+
+               if (entity_id)
+                       flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
 
                DP_VERBOSE(cdev, NETIF_MSG_DRV,
                           "cfg_id = %d entity = %d len = %d\n", cfg_id,
index 78f77b7..dcb5c91 100644 (file)
@@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
            (qed_iov_validate_active_txq(p_hwfn, vf))) {
                vf->b_malicious = true;
                DP_NOTICE(p_hwfn,
-                         "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+                         "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
                          vf->abs_vf_id);
                status = PFVF_STATUS_MALICIOUS;
                goto out;
index 8d1c208..a220cc7 100644 (file)
@@ -1208,8 +1208,16 @@ enum qede_remove_mode {
 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 {
        struct net_device *ndev = pci_get_drvdata(pdev);
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct qed_dev *cdev = edev->cdev;
+       struct qede_dev *edev;
+       struct qed_dev *cdev;
+
+       if (!ndev) {
+               dev_info(&pdev->dev, "Device has already been removed\n");
+               return;
+       }
+
+       edev = netdev_priv(ndev);
+       cdev = edev->cdev;
 
        DP_INFO(edev, "Starting qede_remove\n");
 
index 9c54b71..06de595 100644 (file)
@@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
        if (port->nr_rmnet_devs)
                return -EINVAL;
 
-       kfree(port);
-
        netdev_rx_handler_unregister(real_dev);
 
+       kfree(port);
+
        /* release reference on real_dev */
        dev_put(real_dev);
 
index 350b0d9..c4e961e 100644 (file)
@@ -916,6 +916,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
 
 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
 {
+       if (reg == 0x1f)
+               return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
+
        if (tp->ocp_base != OCP_STD_PHY_BASE)
                reg -= 0x10;
 
@@ -1029,6 +1032,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
 {
        int value;
 
+       /* Work around issue with chip reporting wrong PHY ID */
+       if (reg == MII_PHYSID2)
+               return 0xc912;
+
        r8168dp_2_mdio_start(tp);
 
        value = r8169_mdio_read(tp, reg);
index a9c89d5..9f88b5d 100644 (file)
@@ -955,6 +955,8 @@ enum RAVB_QUEUE {
 #define NUM_RX_QUEUE   2
 #define NUM_TX_QUEUE   2
 
+#define RX_BUF_SZ      (2048 - ETH_FCS_LEN + sizeof(__sum16))
+
 /* TX descriptors per packet */
 #define NUM_TX_DESC_GEN2       2
 #define NUM_TX_DESC_GEN3       1
@@ -1018,7 +1020,6 @@ struct ravb_private {
        u32 dirty_rx[NUM_RX_QUEUE];     /* Producer ring indices */
        u32 cur_tx[NUM_TX_QUEUE];
        u32 dirty_tx[NUM_TX_QUEUE];
-       u32 rx_buf_sz;                  /* Based on MTU+slack. */
        struct napi_struct napi[NUM_RX_QUEUE];
        struct work_struct work;
        /* MII transceiver section. */
index de9aa8c..3f165c1 100644 (file)
@@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
                                               le32_to_cpu(desc->dptr)))
                                dma_unmap_single(ndev->dev.parent,
                                                 le32_to_cpu(desc->dptr),
-                                                priv->rx_buf_sz,
+                                                RX_BUF_SZ,
                                                 DMA_FROM_DEVICE);
                }
                ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
                /* RX descriptor */
                rx_desc = &priv->rx_ring[q][i];
-               rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+               rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
-                                         priv->rx_buf_sz,
+                                         RX_BUF_SZ,
                                          DMA_FROM_DEVICE);
                /* We just set the data size to 0 for a failed mapping which
                 * should prevent DMA from happening...
@@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        int ring_size;
        int i;
 
-       priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
-               ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
-
        /* Allocate RX and TX skb rings */
        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
                                  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
                goto error;
 
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
-               skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
+               skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
                if (!skb)
                        goto error;
                ravb_set_buffer_align(skb);
@@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                        skb = priv->rx_skb[q][entry];
                        priv->rx_skb[q][entry] = NULL;
                        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-                                        priv->rx_buf_sz,
+                                        RX_BUF_SZ,
                                         DMA_FROM_DEVICE);
                        get_ts &= (q == RAVB_NC) ?
                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
                desc = &priv->rx_ring[q][entry];
-               desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+               desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 
                if (!priv->rx_skb[q][entry]) {
                        skb = netdev_alloc_skb(ndev,
-                                              priv->rx_buf_sz +
+                                              RX_BUF_SZ +
                                               RAVB_ALIGN - 1);
                        if (!skb)
                                break;  /* Better luck next round. */
@@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
 
 static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
 {
-       if (netif_running(ndev))
-               return -EBUSY;
+       struct ravb_private *priv = netdev_priv(ndev);
 
        ndev->mtu = new_mtu;
+
+       if (netif_running(ndev)) {
+               synchronize_irq(priv->emac_irq);
+               ravb_emac_init(ndev);
+       }
+
        netdev_update_features(ndev);
 
        return 0;
index 9a42580..6984bd5 100644 (file)
@@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
        struct net_device *ndev = priv->ndev;
        unsigned long flags;
 
+       /* Reject requests with unsupported flags */
+       if (req->flags & ~(PTP_ENABLE_FEATURE |
+                          PTP_RISING_EDGE |
+                          PTP_FALLING_EDGE |
+                          PTP_STRICT_FLAGS))
+               return -EOPNOTSUPP;
+
        if (req->index)
                return -EINVAL;
 
@@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
        unsigned long flags;
        int error = 0;
 
+       /* Reject requests with unsupported flags */
+       if (req->flags)
+               return -EOPNOTSUPP;
+
        if (req->index)
                return -EINVAL;
 
index ddcc191..6e47be6 100644 (file)
@@ -1226,7 +1226,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
 dwmac_mux:
        sun8i_dwmac_unset_syscon(gmac);
 dwmac_exit:
-       sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+       stmmac_pltfr_remove(pdev);
 return ret;
 }
 
index 5a7b0ac..66e60c7 100644 (file)
@@ -432,7 +432,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                         * bits used depends on the hardware configuration
                         * selected at core configuration time.
                         */
-                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr,
+                       u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
                                        ETH_ALEN)) >> (32 - mcbitslog2);
                        /* The most significant bit determines the register to
                         * use (H/L) while the other 5 bits determine the bit
index 775db77..23fecf6 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
 // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
 // stmmac Support for 5.xx Ethernet QoS cores
 
index 9903738..9d08a93 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
 /*
  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
  * stmmac XGMAC definitions.
index 5031398..070bd7d 100644 (file)
@@ -224,6 +224,7 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
        writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
 
        value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
+       value &= ~XGMAC_TSA;
        value |= XGMAC_CC | XGMAC_CBS;
        writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
 }
@@ -463,7 +464,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
                value |= XGMAC_FILTER_HMC;
 
                netdev_for_each_mc_addr(ha, dev) {
-                       int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
+                       u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
                                        (32 - mcbitslog2));
                        mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
                }
index ae48154..bd5838c 100644 (file)
@@ -288,7 +288,8 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
 
 static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
 {
-       *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+       if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
+               *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
        return 0;
 }
 
index 965cbe3..f70ca53 100644 (file)
@@ -369,7 +369,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
        dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
        dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
        dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
-       dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+       dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
        dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
        dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
        dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
@@ -470,6 +470,7 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
 static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
 {
        u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+       u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
 
        value &= ~XGMAC_TXQEN;
        if (qmode != MTL_QUEUE_AVB) {
@@ -477,6 +478,7 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
                writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
        } else {
                value |= 0x1 << XGMAC_TXQEN_SHIFT;
+               writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
        }
 
        writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
index ddb851d..9010d88 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
 // Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
 // stmmac HW Interface Callbacks
 
index a223584..252cf48 100644 (file)
 #define MMC_XGMAC_RX_PKT_SMD_ERR       0x22c
 #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK   0x230
 #define MMC_XGMAC_RX_FPE_FRAG          0x234
+#define MMC_XGMAC_RX_IPC_INTR_MASK     0x25c
 
 static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
 {
@@ -333,8 +334,9 @@ static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
 
 static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
 {
-       writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
-       writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+       writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+       writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
 }
 
 static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
index 3dfd04e..f826365 100644 (file)
@@ -2995,6 +2995,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        } else {
                stmmac_set_desc_addr(priv, first, des);
                tmp_pay_len = pay_len;
+               des += proto_hdr_len;
+               pay_len = 0;
        }
 
        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -3022,6 +3024,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Only the last descriptor gets to point to the skb. */
        tx_q->tx_skbuff[tx_q->cur_tx] = skb;
 
+       /* Manage tx mitigation */
+       tx_q->tx_count_frames += nfrags + 1;
+       if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+           !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+             priv->hwts_tx_en)) {
+               stmmac_tx_timer_arm(priv, queue);
+       } else {
+               desc = &tx_q->dma_tx[tx_q->cur_tx];
+               tx_q->tx_count_frames = 0;
+               stmmac_set_tx_ic(priv, desc);
+               priv->xstats.tx_set_ic_bit++;
+       }
+
        /* We've used all descriptors we need for this skb, however,
         * advance cur_tx so that it references a fresh descriptor.
         * ndo_start_xmit will fill this descriptor the next time it's
@@ -3039,19 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->xstats.tx_tso_frames++;
        priv->xstats.tx_tso_nfrags += nfrags;
 
-       /* Manage tx mitigation */
-       tx_q->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
-           !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
-           (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-           priv->hwts_tx_en)) {
-               stmmac_tx_timer_arm(priv, queue);
-       } else {
-               tx_q->tx_count_frames = 0;
-               stmmac_set_tx_ic(priv, desc);
-               priv->xstats.tx_set_ic_bit++;
-       }
-
        if (priv->sarc_type)
                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
 
@@ -3223,6 +3225,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Only the last descriptor gets to point to the skb. */
        tx_q->tx_skbuff[entry] = skb;
 
+       /* According to the coalesce parameter the IC bit for the latest
+        * segment is reset and the timer re-started to clean the tx status.
+        * This approach takes care about the fragments: desc is the first
+        * element in case of no SG.
+        */
+       tx_q->tx_count_frames += nfrags + 1;
+       if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+           !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+             priv->hwts_tx_en)) {
+               stmmac_tx_timer_arm(priv, queue);
+       } else {
+               if (likely(priv->extend_desc))
+                       desc = &tx_q->dma_etx[entry].basic;
+               else
+                       desc = &tx_q->dma_tx[entry];
+
+               tx_q->tx_count_frames = 0;
+               stmmac_set_tx_ic(priv, desc);
+               priv->xstats.tx_set_ic_bit++;
+       }
+
        /* We've used all descriptors we need for this skb, however,
         * advance cur_tx so that it references a fresh descriptor.
         * ndo_start_xmit will fill this descriptor the next time it's
@@ -3258,23 +3281,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        dev->stats.tx_bytes += skb->len;
 
-       /* According to the coalesce parameter the IC bit for the latest
-        * segment is reset and the timer re-started to clean the tx status.
-        * This approach takes care about the fragments: desc is the first
-        * element in case of no SG.
-        */
-       tx_q->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
-           !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
-           (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-           priv->hwts_tx_en)) {
-               stmmac_tx_timer_arm(priv, queue);
-       } else {
-               tx_q->tx_count_frames = 0;
-               stmmac_set_tx_ic(priv, desc);
-               priv->xstats.tx_set_ic_bit++;
-       }
-
        if (priv->sarc_type)
                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
 
@@ -3505,8 +3511,6 @@ read_again:
                if (unlikely(status & dma_own))
                        break;
 
-               count++;
-
                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
                next_entry = rx_q->cur_rx;
 
@@ -3533,6 +3537,7 @@ read_again:
                        goto read_again;
                if (unlikely(error)) {
                        dev_kfree_skb(skb);
+                       count++;
                        continue;
                }
 
@@ -3572,6 +3577,7 @@ read_again:
                        skb = napi_alloc_skb(&ch->rx_napi, len);
                        if (!skb) {
                                priv->dev->stats.rx_dropped++;
+                               count++;
                                continue;
                        }
 
@@ -3637,6 +3643,7 @@ read_again:
 
                priv->dev->stats.rx_packets++;
                priv->dev->stats.rx_bytes += len;
+               count++;
        }
 
        if (status & rx_not_ls) {
index df638b1..0989e2b 100644 (file)
@@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
 
        switch (rq->type) {
        case PTP_CLK_REQ_PEROUT:
+               /* Reject requests with unsupported flags */
+               if (rq->perout.flags)
+                       return -EOPNOTSUPP;
+
                cfg = &priv->pps[rq->perout.index];
 
                cfg->start.tv_sec = rq->perout.start.sec;
index e4ac3c4..ac3f658 100644 (file)
@@ -6,7 +6,9 @@
  * Author: Jose Abreu <joabreu@synopsys.com>
  */
 
+#include <linux/bitrev.h>
 #include <linux/completion.h>
+#include <linux/crc32.h>
 #include <linux/ethtool.h>
 #include <linux/ip.h>
 #include <linux/phy.h>
@@ -485,12 +487,48 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
        return -EOPNOTSUPP;
 }
 
+static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
+{
+       int mc_offset = 32 - priv->hw->mcast_bits_log2;
+       struct netdev_hw_addr *ha;
+       u32 hash, hash_nr;
+
+       /* First compute the hash for desired addr */
+       hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
+       hash_nr = hash >> 5;
+       hash = 1 << (hash & 0x1f);
+
+       /* Now, check if it collides with any existing one */
+       netdev_for_each_mc_addr(ha, priv->dev) {
+               u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
+               if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
+                       return false;
+       }
+
+       /* No collisions, address is good to go */
+       return true;
+}
+
+static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
+{
+       struct netdev_hw_addr *ha;
+
+       /* Check if it collides with any existing one */
+       netdev_for_each_uc_addr(ha, priv->dev) {
+               if (!memcmp(ha->addr, addr, ETH_ALEN))
+                       return false;
+       }
+
+       /* No collisions, address is good to go */
+       return true;
+}
+
 static int stmmac_test_hfilt(struct stmmac_priv *priv)
 {
-       unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
-       unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
+       unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+       unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct stmmac_packet_attrs attr = { };
-       int ret;
+       int ret, tries = 256;
 
        ret = stmmac_filter_check(priv);
        if (ret)
@@ -499,6 +537,16 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
        if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
                return -EOPNOTSUPP;
 
+       while (--tries) {
+               /* We only need to check the bd_addr for collisions */
+               bd_addr[ETH_ALEN - 1] = tries;
+               if (stmmac_hash_check(priv, bd_addr))
+                       break;
+       }
+
+       if (!tries)
+               return -EOPNOTSUPP;
+
        ret = dev_mc_add(priv->dev, gd_addr);
        if (ret)
                return ret;
@@ -523,13 +571,25 @@ cleanup:
 
 static int stmmac_test_pfilt(struct stmmac_priv *priv)
 {
-       unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
-       unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
+       unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
+       unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct stmmac_packet_attrs attr = { };
-       int ret;
+       int ret, tries = 256;
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
+       if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+               return -EOPNOTSUPP;
+
+       while (--tries) {
+               /* We only need to check the bd_addr for collisions */
+               bd_addr[ETH_ALEN - 1] = tries;
+               if (stmmac_perfect_check(priv, bd_addr))
+                       break;
+       }
+
+       if (!tries)
+               return -EOPNOTSUPP;
 
        ret = dev_uc_add(priv->dev, gd_addr);
        if (ret)
@@ -553,39 +613,31 @@ cleanup:
        return ret;
 }
 
-static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
-{
-       return 0;
-}
-
-static void stmmac_test_set_rx_mode(struct net_device *netdev)
-{
-       /* As we are in test mode of ethtool we already own the rtnl lock
-        * so no address will change from user. We can just call the
-        * ndo_set_rx_mode() callback directly */
-       if (netdev->netdev_ops->ndo_set_rx_mode)
-               netdev->netdev_ops->ndo_set_rx_mode(netdev);
-}
-
 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
 {
-       unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
-       unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+       unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+       unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct stmmac_packet_attrs attr = { };
-       int ret;
+       int ret, tries = 256;
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
-       if (!priv->hw->multicast_filter_bins)
+       if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
                return -EOPNOTSUPP;
 
-       /* Remove all MC addresses */
-       __dev_mc_unsync(priv->dev, NULL);
-       stmmac_test_set_rx_mode(priv->dev);
+       while (--tries) {
+               /* We only need to check the mc_addr for collisions */
+               mc_addr[ETH_ALEN - 1] = tries;
+               if (stmmac_hash_check(priv, mc_addr))
+                       break;
+       }
+
+       if (!tries)
+               return -EOPNOTSUPP;
 
        ret = dev_uc_add(priv->dev, uc_addr);
        if (ret)
-               goto cleanup;
+               return ret;
 
        attr.dst = uc_addr;
 
@@ -602,30 +654,34 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
 
 cleanup:
        dev_uc_del(priv->dev, uc_addr);
-       __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
-       stmmac_test_set_rx_mode(priv->dev);
        return ret;
 }
 
 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
 {
-       unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
-       unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+       unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+       unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct stmmac_packet_attrs attr = { };
-       int ret;
+       int ret, tries = 256;
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
-       if (!priv->hw->multicast_filter_bins)
+       if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
                return -EOPNOTSUPP;
 
-       /* Remove all UC addresses */
-       __dev_uc_unsync(priv->dev, NULL);
-       stmmac_test_set_rx_mode(priv->dev);
+       while (--tries) {
+               /* We only need to check the uc_addr for collisions */
+               uc_addr[ETH_ALEN - 1] = tries;
+               if (stmmac_perfect_check(priv, uc_addr))
+                       break;
+       }
+
+       if (!tries)
+               return -EOPNOTSUPP;
 
        ret = dev_mc_add(priv->dev, mc_addr);
        if (ret)
-               goto cleanup;
+               return ret;
 
        attr.dst = mc_addr;
 
@@ -642,8 +698,6 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
 
 cleanup:
        dev_mc_del(priv->dev, mc_addr);
-       __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
-       stmmac_test_set_rx_mode(priv->dev);
        return ret;
 }
 
index bbbc1dc..b517c1a 100644 (file)
@@ -1237,8 +1237,17 @@ static int fjes_probe(struct platform_device *plat_dev)
        adapter->open_guard = false;
 
        adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+       if (unlikely(!adapter->txrx_wq)) {
+               err = -ENOMEM;
+               goto err_free_netdev;
+       }
+
        adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
                                              WQ_MEM_RECLAIM, 0);
+       if (unlikely(!adapter->control_wq)) {
+               err = -ENOMEM;
+               goto err_free_txrx_wq;
+       }
 
        INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
        INIT_WORK(&adapter->raise_intr_rxdata_task,
@@ -1255,7 +1264,7 @@ static int fjes_probe(struct platform_device *plat_dev)
        hw->hw_res.irq = platform_get_irq(plat_dev, 0);
        err = fjes_hw_init(&adapter->hw);
        if (err)
-               goto err_free_netdev;
+               goto err_free_control_wq;
 
        /* setup MAC address (02:00:00:00:00:[epid])*/
        netdev->dev_addr[0] = 2;
@@ -1277,6 +1286,10 @@ static int fjes_probe(struct platform_device *plat_dev)
 
 err_hw_exit:
        fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+       destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+       destroy_workqueue(adapter->txrx_wq);
 err_free_netdev:
        free_netdev(netdev);
 err_out:
index fbec711..fbea6f2 100644 (file)
@@ -107,27 +107,6 @@ struct bpqdev {
 
 static LIST_HEAD(bpq_devices);
 
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
-                                     struct netdev_queue *txq,
-                                     void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
 /* ------------------------------------------------------------------------ */
 
 
@@ -498,7 +477,6 @@ static int bpq_new_device(struct net_device *edev)
        err = register_netdevice(ndev);
        if (err)
                goto error;
-       bpq_set_lockdep_class(ndev);
 
        /* List protected by RTNL */
        list_add_rcu(&bpq->bpq_list, &bpq_devices);
index 39dddcd..963509a 100644 (file)
@@ -982,7 +982,7 @@ static int netvsc_attach(struct net_device *ndev,
        if (netif_running(ndev)) {
                ret = rndis_filter_open(nvdev);
                if (ret)
-                       return ret;
+                       goto err;
 
                rdev = nvdev->extension;
                if (!rdev->link_state)
@@ -990,6 +990,13 @@ static int netvsc_attach(struct net_device *ndev,
        }
 
        return 0;
+
+err:
+       netif_device_detach(ndev);
+
+       rndis_filter_device_remove(hdev, nvdev);
+
+       return ret;
 }
 
 static int netvsc_set_channels(struct net_device *net,
@@ -1807,8 +1814,10 @@ static int netvsc_set_features(struct net_device *ndev,
 
        ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
 
-       if (ret)
+       if (ret) {
                features ^= NETIF_F_LRO;
+               ndev->features = features;
+       }
 
 syncvf:
        if (!vf_netdev)
@@ -2335,8 +2344,6 @@ static int netvsc_probe(struct hv_device *dev,
                NETIF_F_HW_VLAN_CTAG_RX;
        net->vlan_features = net->features;
 
-       netdev_lockdep_set_classes(net);
-
        /* MTU range: 68 - 1500 or 65521 */
        net->min_mtu = NETVSC_MTU_MIN;
        if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
index 887bbba..ba3dfac 100644 (file)
@@ -131,8 +131,6 @@ static int ipvlan_init(struct net_device *dev)
        dev->gso_max_segs = phy_dev->gso_max_segs;
        dev->hard_header_len = phy_dev->hard_header_len;
 
-       netdev_lockdep_set_classes(dev);
-
        ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
index cb76373..afd8b2a 100644 (file)
@@ -267,7 +267,6 @@ struct macsec_dev {
        struct pcpu_secy_stats __percpu *stats;
        struct list_head secys;
        struct gro_cells gro_cells;
-       unsigned int nest_level;
 };
 
 /**
@@ -2750,7 +2749,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
 
 #define MACSEC_FEATURES \
        (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-static struct lock_class_key macsec_netdev_addr_lock_key;
 
 static int macsec_dev_init(struct net_device *dev)
 {
@@ -2958,11 +2956,6 @@ static int macsec_get_iflink(const struct net_device *dev)
        return macsec_priv(dev)->real_dev->ifindex;
 }
 
-static int macsec_get_nest_level(struct net_device *dev)
-{
-       return macsec_priv(dev)->nest_level;
-}
-
 static const struct net_device_ops macsec_netdev_ops = {
        .ndo_init               = macsec_dev_init,
        .ndo_uninit             = macsec_dev_uninit,
@@ -2976,7 +2969,6 @@ static const struct net_device_ops macsec_netdev_ops = {
        .ndo_start_xmit         = macsec_start_xmit,
        .ndo_get_stats64        = macsec_get_stats64,
        .ndo_get_iflink         = macsec_get_iflink,
-       .ndo_get_lock_subclass  = macsec_get_nest_level,
 };
 
 static const struct device_type macsec_type = {
@@ -3001,12 +2993,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
 static void macsec_free_netdev(struct net_device *dev)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
-       struct net_device *real_dev = macsec->real_dev;
 
        free_percpu(macsec->stats);
        free_percpu(macsec->secy.tx_sc.stats);
 
-       dev_put(real_dev);
 }
 
 static void macsec_setup(struct net_device *dev)
@@ -3261,14 +3251,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        if (err < 0)
                return err;
 
-       dev_hold(real_dev);
-
-       macsec->nest_level = dev_get_nest_level(real_dev) + 1;
-       netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macsec_netdev_addr_lock_key,
-                                      macsec_get_nest_level(dev));
-
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
                goto unregister;
index 940192c..34fc59b 100644 (file)
@@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  * "super class" of normal network devices; split their locks off into a
  * separate class since they always nest.
  */
-static struct lock_class_key macvlan_netdev_addr_lock_key;
-
 #define ALWAYS_ON_OFFLOADS \
        (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
         NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
@@ -869,19 +867,6 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
-static int macvlan_get_nest_level(struct net_device *dev)
-{
-       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
-}
-
-static void macvlan_set_lockdep_class(struct net_device *dev)
-{
-       netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macvlan_netdev_addr_lock_key,
-                                      macvlan_get_nest_level(dev));
-}
-
 static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -900,8 +885,6 @@ static int macvlan_init(struct net_device *dev)
        dev->gso_max_segs       = lowerdev->gso_max_segs;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
-       macvlan_set_lockdep_class(dev);
-
        vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->pcpu_stats)
                return -ENOMEM;
@@ -1161,7 +1144,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
-       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = macvlan_dev_poll_controller,
        .ndo_netpoll_setup      = macvlan_dev_netpoll_setup,
@@ -1445,7 +1427,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
-       vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 56576d4..44c2d85 100644 (file)
@@ -708,6 +708,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
                goto err_debugfs_exit;
 
        devlink_params_publish(devlink);
+       devlink_reload_enable(devlink);
        return nsim_dev;
 
 err_debugfs_exit:
@@ -732,6 +733,7 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
 {
        struct devlink *devlink = priv_to_devlink(nsim_dev);
 
+       devlink_reload_disable(devlink);
        nsim_bpf_dev_exit(nsim_dev);
        nsim_dev_debugfs_exit(nsim_dev);
        nsim_dev_traps_exit(devlink);
@@ -806,9 +808,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
 {
        struct nsim_dev_port *nsim_dev_port, *tmp;
 
+       mutex_lock(&nsim_dev->port_list_lock);
        list_for_each_entry_safe(nsim_dev_port, tmp,
                                 &nsim_dev->port_list, list)
                __nsim_dev_port_del(nsim_dev_port);
+       mutex_unlock(&nsim_dev->port_list_lock);
 }
 
 int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
@@ -822,14 +826,17 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
                return PTR_ERR(nsim_dev);
        dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
 
+       mutex_lock(&nsim_dev->port_list_lock);
        for (i = 0; i < nsim_bus_dev->port_count; i++) {
                err = __nsim_dev_port_add(nsim_dev, i);
                if (err)
                        goto err_port_del_all;
        }
+       mutex_unlock(&nsim_dev->port_list_lock);
        return 0;
 
 err_port_del_all:
+       mutex_unlock(&nsim_dev->port_list_lock);
        nsim_dev_port_del_all(nsim_dev);
        nsim_dev_destroy(nsim_dev);
        return err;
index 6580094..8f241b5 100644 (file)
@@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
 
        switch (rq->type) {
        case PTP_CLK_REQ_EXTTS:
+               /* Reject requests with unsupported flags */
+               if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+                                       PTP_RISING_EDGE |
+                                       PTP_FALLING_EDGE |
+                                       PTP_STRICT_FLAGS))
+                       return -EOPNOTSUPP;
+
+               /* Reject requests to enable time stamping on both edges. */
+               if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+                   (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+                   (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+                       return -EOPNOTSUPP;
+
                index = rq->extts.index;
                if (index >= N_EXT_TS)
                        return -EINVAL;
@@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
+               /* Reject requests with unsupported flags */
+               if (rq->perout.flags)
+                       return -EOPNOTSUPP;
                if (rq->perout.index >= N_PER_OUT)
                        return -EINVAL;
                return periodic_output(clock, rq, on, rq->perout.index);
index 2e29ab8..3587656 100644 (file)
@@ -64,11 +64,12 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
        if (mdiodev->dev.of_node)
                reset = devm_reset_control_get_exclusive(&mdiodev->dev,
                                                         "phy");
-       if (PTR_ERR(reset) == -ENOENT ||
-           PTR_ERR(reset) == -ENOTSUPP)
-               reset = NULL;
-       else if (IS_ERR(reset))
-               return PTR_ERR(reset);
+       if (IS_ERR(reset)) {
+               if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS)
+                       reset = NULL;
+               else
+                       return PTR_ERR(reset);
+       }
 
        mdiodev->reset_ctrl = reset;
 
index 20e2ebe..a578f7e 100644 (file)
@@ -87,8 +87,24 @@ struct phylink {
        phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
 #define phylink_info(pl, fmt, ...) \
        phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define phylink_dbg(pl, fmt, ...) \
+do {                                                                   \
+       if ((pl)->config->type == PHYLINK_NETDEV)                       \
+               netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__);           \
+       else if ((pl)->config->type == PHYLINK_DEV)                     \
+               dev_dbg((pl)->dev, fmt, ##__VA_ARGS__);                 \
+} while (0)
+#elif defined(DEBUG)
+#define phylink_dbg(pl, fmt, ...)                                      \
        phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+#else
+#define phylink_dbg(pl, fmt, ...)                                      \
+({                                                                     \
+       if (0)                                                          \
+               phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__);     \
+})
+#endif
 
 /**
  * phylink_set_port_modes() - set the port type modes in the ethtool mask
index dc3d92d..b732982 100644 (file)
@@ -327,6 +327,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .name           = "SMSC LAN8740",
 
        /* PHY_BASIC_FEATURES */
+       .flags          = PHY_RST_AFTER_CLK_EN,
 
        .probe          = smsc_phy_probe,
 
index 9a1b006..61824bb 100644 (file)
@@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_device *dev)
 {
        struct ppp *ppp;
 
-       netdev_lockdep_set_classes(dev);
-
        ppp = netdev_priv(dev);
        /* Let the netdevice take a reference on the ppp file. This ensures
         * that ppp_destroy_interface() won't run before the device gets
index cac64b9..4d479e3 100644 (file)
@@ -855,6 +855,7 @@ err_free_chan:
        sl->tty = NULL;
        tty->disc_data = NULL;
        clear_bit(SLF_INUSE, &sl->flags);
+       free_netdev(sl->dev);
 
 err_exit:
        rtnl_unlock();
index e8089de..8156b33 100644 (file)
@@ -1615,7 +1615,6 @@ static int team_init(struct net_device *dev)
        int err;
 
        team->dev = dev;
-       mutex_init(&team->lock);
        team_set_no_mode(team);
 
        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
@@ -1642,7 +1641,8 @@ static int team_init(struct net_device *dev)
                goto err_options_register;
        netif_carrier_off(dev);
 
-       netdev_lockdep_set_classes(dev);
+       lockdep_register_key(&team->team_lock_key);
+       __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
 
        return 0;
 
@@ -1673,6 +1673,7 @@ static void team_uninit(struct net_device *dev)
        team_queue_override_fini(team);
        mutex_unlock(&team->lock);
        netdev_change_features(dev);
+       lockdep_unregister_key(&team->team_lock_key);
 }
 
 static void team_destructor(struct net_device *dev)
@@ -1976,8 +1977,15 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
        err = team_port_del(team, port_dev);
        mutex_unlock(&team->lock);
 
-       if (!err)
-               netdev_change_features(dev);
+       if (err)
+               return err;
+
+       if (netif_is_team_master(port_dev)) {
+               lockdep_unregister_key(&team->team_lock_key);
+               lockdep_register_key(&team->team_lock_key);
+               lockdep_set_class(&team->lock, &team->team_lock_key);
+       }
+       netdev_change_features(dev);
 
        return err;
 }
index 011bd4c..af3994e 100644 (file)
@@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
-       if (ret < 0) {
+       if (ret < ETH_ALEN) {
                netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
                goto free;
        }
index 32f53de..fe63043 100644 (file)
@@ -787,6 +787,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
index 00cab3f..c2c82e6 100644 (file)
@@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
        /* read current mtu value from device */
        err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
                              USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
-                             0, iface_no, &max_datagram_size, 2);
-       if (err < 0) {
+                             0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
+       if (err != sizeof(max_datagram_size)) {
                dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
                goto out;
        }
@@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
        max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
        err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
                               USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
-                              0, iface_no, &max_datagram_size, 2);
+                              0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
        if (err < 0)
                dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
 
index 6294809..f24a1b0 100644 (file)
@@ -1264,8 +1264,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
                netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
                lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
 
-               if (dev->domain_data.phyirq > 0)
+               if (dev->domain_data.phyirq > 0) {
+                       local_irq_disable();
                        generic_handle_irq(dev->domain_data.phyirq);
+                       local_irq_enable();
+               }
        } else
                netdev_warn(dev->net,
                            "unexpected interrupt: 0x%08x\n", intdata);
index 596428e..4196c0e 100644 (file)
@@ -1362,6 +1362,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
+       {QMI_FIXED_INTF(0x413c, 0x81e0, 0)},    /* Dell Wireless 5821e with eSIM support*/
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
@@ -1370,6 +1371,8 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+       {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
+       {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with eSIM support*/
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index cee9fef..d4a95b5 100644 (file)
@@ -5755,6 +5755,7 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
        {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
        {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK,  0x0601)},
index ee52bde..b8228f5 100644 (file)
@@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_device *dev)
 
        /* similarly, oper state is irrelevant; set to up to avoid confusion */
        dev->operstate = IF_OPER_UP;
-       netdev_lockdep_set_classes(dev);
        return 0;
 
 out_rth:
index 3d9bcc9..8869154 100644 (file)
@@ -2487,9 +2487,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                vni = tunnel_id_to_key32(info->key.tun_id);
                ifindex = 0;
                dst_cache = &info->dst_cache;
-               if (info->options_len &&
-                   info->key.tun_flags & TUNNEL_VXLAN_OPT)
+               if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+                       if (info->options_len < sizeof(*md))
+                               goto drop;
                        md = ip_tunnel_info_opts(info);
+               }
                ttl = info->key.ttl;
                tos = info->key.tos;
                label = info->key.label;
@@ -3566,10 +3568,13 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct net_device *remote_dev = NULL;
        struct vxlan_fdb *f = NULL;
        bool unregister = false;
+       struct vxlan_rdst *dst;
        int err;
 
+       dst = &vxlan->default_dst;
        err = vxlan_dev_configure(net, dev, conf, false, extack);
        if (err)
                return err;
@@ -3577,14 +3582,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        dev->ethtool_ops = &vxlan_ethtool_ops;
 
        /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+       if (!vxlan_addr_any(&dst->remote_ip)) {
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
+                                      &dst->remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
                                       vxlan->cfg.dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
+                                      dst->remote_vni,
+                                      dst->remote_vni,
+                                      dst->remote_ifindex,
                                       NTF_SELF, &f);
                if (err)
                        return err;
@@ -3595,26 +3600,41 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                goto errout;
        unregister = true;
 
+       if (dst->remote_ifindex) {
+               remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
+               if (!remote_dev)
+                       goto errout;
+
+               err = netdev_upper_dev_link(remote_dev, dev, extack);
+               if (err)
+                       goto errout;
+       }
+
        err = rtnl_configure_link(dev, NULL);
        if (err)
-               goto errout;
+               goto unlink;
 
        if (f) {
-               vxlan_fdb_insert(vxlan, all_zeros_mac,
-                                vxlan->default_dst.remote_vni, f);
+               vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
 
                /* notify default fdb entry */
                err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
                                       RTM_NEWNEIGH, true, extack);
                if (err) {
                        vxlan_fdb_destroy(vxlan, f, false, false);
+                       if (remote_dev)
+                               netdev_upper_dev_unlink(remote_dev, dev);
                        goto unregister;
                }
        }
 
        list_add(&vxlan->next, &vn->vxlan_list);
+       if (remote_dev)
+               dst->remote_dev = remote_dev;
        return 0;
-
+unlink:
+       if (remote_dev)
+               netdev_upper_dev_unlink(remote_dev, dev);
 errout:
        /* unregister_netdevice() destroys the default FDB entry with deletion
         * notification. But the addition notification was not sent yet, so
@@ -3932,11 +3952,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct netlink_ext_ack *extack)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_rdst *dst = &vxlan->default_dst;
        struct net_device *lowerdev;
        struct vxlan_config conf;
+       struct vxlan_rdst *dst;
        int err;
 
+       dst = &vxlan->default_dst;
        err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
        if (err)
                return err;
@@ -3946,6 +3967,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (err)
                return err;
 
+       if (dst->remote_dev == lowerdev)
+               lowerdev = NULL;
+
+       err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
+                                            extack);
+       if (err)
+               return err;
+
        /* handle default dst entry */
        if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
                u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
@@ -3962,6 +3991,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                                               NTF_SELF, true, extack);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+                               netdev_adjacent_change_abort(dst->remote_dev,
+                                                            lowerdev, dev);
                                return err;
                        }
                }
@@ -3979,6 +4010,11 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (conf.age_interval != vxlan->cfg.age_interval)
                mod_timer(&vxlan->age_timer, jiffies);
 
+       netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+       if (lowerdev && lowerdev != dst->remote_dev) {
+               dst->remote_dev = lowerdev;
+               netdev_update_lockdep_key(lowerdev);
+       }
        vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
        return 0;
 }
@@ -3991,6 +4027,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
+       if (vxlan->default_dst.remote_dev)
+               netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
 }
 
 static size_t vxlan_get_size(const struct net_device *dev)
index 8efb493..5c79f05 100644 (file)
@@ -127,12 +127,12 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
                        "%d\n", result);
        result = 0;
 error_cmd:
-       kfree(cmd);
        kfree_skb(ack_skb);
 error_msg_to_dev:
 error_alloc:
        d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
                wimax_dev, state, result);
+       kfree(cmd);
        return result;
 }
 
index 39c6485..c0750ce 100644 (file)
@@ -520,7 +520,7 @@ struct iwl_scan_dwell {
 } __packed;
 
 /**
- * struct iwl_scan_config
+ * struct iwl_scan_config_v1
  * @flags:                     enum scan_config_flags
  * @tx_chains:                 valid_tx antenna - ANT_* definitions
  * @rx_chains:                 valid_rx antenna - ANT_* definitions
@@ -552,7 +552,7 @@ struct iwl_scan_config_v1 {
 #define SCAN_LB_LMAC_IDX 0
 #define SCAN_HB_LMAC_IDX 1
 
-struct iwl_scan_config {
+struct iwl_scan_config_v2 {
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -564,6 +564,24 @@ struct iwl_scan_config {
        u8 bcast_sta_id;
        u8 channel_flags;
        u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscouos_mode: whether to enable promiscouos mode
+ * @bcast_sta_id: the index of the station in the fw
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwl_scan_config {
+       u8 enable_cam_mode;
+       u8 enable_promiscouos_mode;
+       u8 bcast_sta_id;
+       u8 reserved;
+       __le32 tx_chains;
+       __le32 rx_chains;
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
 
 /**
index 423cc0c..0d5bc4c 100644 (file)
@@ -288,6 +288,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  *     STA_CONTEXT_DOT11AX_API_S
  * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
  *     version tables.
+ * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ *  SCAN_CONFIG_DB_CMD_API_S.
  *
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
@@ -321,6 +323,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE   = (__force iwl_ucode_tlv_api_t)53,
        IWL_UCODE_TLV_API_FTM_RTT_ACCURACY      = (__force iwl_ucode_tlv_api_t)54,
        IWL_UCODE_TLV_API_SAR_TABLE_VER         = (__force iwl_ucode_tlv_api_t)55,
+       IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG   = (__force iwl_ucode_tlv_api_t)56,
        IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP    = (__force iwl_ucode_tlv_api_t)57,
        IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER     = (__force iwl_ucode_tlv_api_t)58,
 
index cb4c551..695bbaa 100644 (file)
  *         Indicates MAC is entering a power-saving sleep power-down.
  *         Not a good time to access device-internal resources.
  */
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE                     (0x00000004)
 #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
 #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON               (0x00000400)
 
index f47e0f9..23c25a7 100644 (file)
@@ -449,6 +449,11 @@ enum {
 #define PERSISTENCE_BIT                        BIT(12)
 #define PREG_WFPM_ACCESS               BIT(12)
 
+#define HPM_HIPM_GEN_CFG                       0xA03458
+#define HPM_HIPM_GEN_CFG_CR_PG_EN              BIT(0)
+#define HPM_HIPM_GEN_CFG_CR_SLP_EN             BIT(1)
+#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE       BIT(10)
+
 #define UREG_DOORBELL_TO_ISR6          0xA05C04
 #define UREG_DOORBELL_TO_ISR6_NMI_BIT  BIT(0)
 #define UREG_DOORBELL_TO_ISR6_SUSPEND  BIT(18)
index 843d00b..5ca50f3 100644 (file)
@@ -1405,6 +1405,12 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
                          IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
 }
 
+static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_api(&mvm->fw->ucode_capa,
+                         IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
+}
+
 static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
 {
        return fw_has_api(&mvm->fw->ucode_capa,
index f6b3045..fcafa22 100644 (file)
@@ -1137,11 +1137,11 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
 }
 
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
-                                    u32 flags, u8 channel_flags,
-                                    u32 max_channels)
+static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
+                                       u32 flags, u8 channel_flags,
+                                       u32 max_channels)
 {
-       struct iwl_scan_config *cfg = config;
+       struct iwl_scan_config_v2 *cfg = config;
 
        cfg->flags = cpu_to_le32(flags);
        cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1185,7 +1185,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
 }
 
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
 {
        void *cfg;
        int ret, cmd_size;
@@ -1217,7 +1217,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        }
 
        if (iwl_mvm_cdb_scan_api(mvm))
-               cmd_size = sizeof(struct iwl_scan_config);
+               cmd_size = sizeof(struct iwl_scan_config_v2);
        else
                cmd_size = sizeof(struct iwl_scan_config_v1);
        cmd_size += num_channels;
@@ -1254,8 +1254,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                        flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
                                 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
                                 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
-               iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
-                                        num_channels);
+               iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
+                                           num_channels);
        } else {
                iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
                                            num_channels);
@@ -1277,6 +1277,30 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        return ret;
 }
 
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+       struct iwl_scan_config cfg;
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .len[0] = sizeof(cfg),
+               .data[0] = &cfg,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+
+       if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
+               return iwl_mvm_legacy_config_scan(mvm);
+
+       memset(&cfg, 0, sizeof(cfg));
+
+       cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+       cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+       cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+
+       IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
 {
        int i;
index 0bedba4..b3768d5 100644 (file)
@@ -1482,6 +1482,13 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                                            mvm_sta->sta_id, i);
                        txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
                                                         i, wdg);
+                       /*
+                        * on failures, just set it to IWL_MVM_INVALID_QUEUE
+                        * to try again later, we have no other good way of
+                        * failing here
+                        */
+                       if (txq_id < 0)
+                               txq_id = IWL_MVM_INVALID_QUEUE;
                        tid_data->txq_id = txq_id;
 
                        /*
@@ -1950,30 +1957,73 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
        sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
-static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
                                          u8 sta_id, u8 fifo)
 {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                mvm->trans->trans_cfg->base_params->wd_timeout :
                IWL_WATCHDOG_DISABLED;
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = fifo,
+               .sta_id = sta_id,
+               .tid = IWL_MAX_TID_COUNT,
+               .aggregate = false,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+
+       WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+       iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+}
+
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
+{
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+               mvm->trans->trans_cfg->base_params->wd_timeout :
+               IWL_WATCHDOG_DISABLED;
+
+       WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+       return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
+                                      wdg_timeout);
+}
 
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
+                                         int maccolor,
+                                         struct iwl_mvm_int_sta *sta,
+                                         u16 *queue, int fifo)
+{
+       int ret;
+
+       /* Map queue to fifo - needs to happen before adding station */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
+
+       ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+       if (ret) {
+               if (!iwl_mvm_has_new_tx_api(mvm))
+                       iwl_mvm_disable_txq(mvm, NULL, *queue,
+                                           IWL_MAX_TID_COUNT, 0);
+               return ret;
+       }
+
+       /*
+        * For 22000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
+        */
        if (iwl_mvm_has_new_tx_api(mvm)) {
-               int tvqm_queue =
-                       iwl_mvm_tvqm_enable_txq(mvm, sta_id,
-                                               IWL_MAX_TID_COUNT,
-                                               wdg_timeout);
-               *queue = tvqm_queue;
-       } else {
-               struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = fifo,
-                       .sta_id = sta_id,
-                       .tid = IWL_MAX_TID_COUNT,
-                       .aggregate = false,
-                       .frame_limit = IWL_FRAME_LIMIT,
-               };
+               int txq;
 
-               iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+               txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
+               if (txq < 0) {
+                       iwl_mvm_rm_sta_common(mvm, sta->sta_id);
+                       return txq;
+               }
+
+               *queue = txq;
        }
+
+       return 0;
 }
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
@@ -1989,59 +2039,26 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       /* Map Aux queue to fifo - needs to happen before adding Aux station */
-       if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
-                                             mvm->aux_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_MCAST);
-
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
-                                        MAC_INDEX_AUX, 0);
+       ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+                                            &mvm->aux_sta, &mvm->aux_queue,
+                                            IWL_MVM_TX_FIFO_MCAST);
        if (ret) {
                iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
                return ret;
        }
 
-       /*
-        * For 22000 firmware and on we cannot add queue to a station unknown
-        * to firmware so enable queue here - after the station was added
-        */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
-                                             mvm->aux_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_MCAST);
-
        return 0;
 }
 
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
-       /* Map snif queue to fifo - must happen before adding snif station */
-       if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
-                                             mvm->snif_sta.sta_id,
+       return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+                                             &mvm->snif_sta, &mvm->snif_queue,
                                              IWL_MVM_TX_FIFO_BE);
-
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
-                                        mvmvif->id, 0);
-       if (ret)
-               return ret;
-
-       /*
-        * For 22000 firmware and on we cannot add queue to a station unknown
-        * to firmware so enable queue here - after the station was added
-        */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
-                                             mvm->snif_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_BE);
-
-       return 0;
 }
 
 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2133,6 +2150,10 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
                                                IWL_MAX_TID_COUNT,
                                                wdg_timeout);
+               if (queue < 0) {
+                       iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+                       return queue;
+               }
 
                if (vif->type == NL80211_IFTYPE_AP ||
                    vif->type == NL80211_IFTYPE_ADHOC)
@@ -2307,10 +2328,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        }
        ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
                                         mvmvif->id, mvmvif->color);
-       if (ret) {
-               iwl_mvm_dealloc_int_sta(mvm, msta);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        /*
         * Enable cab queue after the ADD_STA command is sent.
@@ -2323,6 +2342,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
                                                    0,
                                                    timeout);
+               if (queue < 0) {
+                       ret = queue;
+                       goto err;
+               }
                mvmvif->cab_queue = queue;
        } else if (!fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_STA_TYPE))
@@ -2330,6 +2353,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                   timeout);
 
        return 0;
+err:
+       iwl_mvm_dealloc_int_sta(mvm, msta);
+       return ret;
 }
 
 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
index 6f4bb7c..040cec1 100644 (file)
@@ -573,20 +573,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
@@ -603,7 +603,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
@@ -618,60 +618,61 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
-
-       {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+       {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
 
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
@@ -1067,11 +1068,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  ((cfg != &iwl_ax200_cfg_cc &&
-                    cfg != &killer1650x_2ax_cfg &&
-                    cfg != &killer1650w_2ax_cfg &&
-                    cfg != &iwl_ax201_cfg_quz_hr) ||
-                   iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+                  iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
                u32 hw_status;
 
                hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
index df8455f..ca3bb4d 100644 (file)
 #include "internal.h"
 #include "fw/dbg.h"
 
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+       udelay(20);
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_PG_EN |
+                         HPM_HIPM_GEN_CFG_CR_SLP_EN);
+       udelay(20);
+       iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                           HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+       iwl_trans_sw_reset(trans);
+       iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       return 0;
+}
+
 /*
  * Start up NIC's basic functionality after it has been reset
  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -92,6 +110,13 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
 
        iwl_pcie_apm_config(trans);
 
+       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+           trans->cfg->integrated) {
+               ret = iwl_pcie_gen2_force_power_gating(trans);
+               if (ret)
+                       return ret;
+       }
+
        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
        if (ret)
                return ret;
index 8894027..d80f71f 100644 (file)
@@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
        unsigned int mss = skb_shinfo(skb)->gso_size;
-       u16 length, iv_len, amsdu_pad;
+       u16 length, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
        struct page **page_ptr;
        struct tso_t tso;
 
-       /* if the packet is protected, then it must be CCMP or GCMP */
-       iv_len = ieee80211_has_protected(hdr->frame_control) ?
-               IEEE80211_CCMP_HDR_LEN : 0;
-
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
                             &dev_cmd->hdr, start_len, 0);
 
        ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
        snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
-       total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+       total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
        amsdu_pad = 0;
 
        /* total amount of header we may need for this A-MSDU */
        hdr_room = DIV_ROUND_UP(total_len, mss) *
-               (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+               (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
        hdr_page = get_page_hdr(trans, hdr_room);
@@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
        start_hdr = hdr_page->pos;
        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
        *page_ptr = hdr_page->page;
-       memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
-       hdr_page->pos += iv_len;
 
        /*
-        * Pull the ieee80211 header + IV to be able to use TSO core,
+        * Pull the ieee80211 header to be able to use TSO core,
         * we will restore it for the tx_status flow.
         */
-       skb_pull(skb, hdr_len + iv_len);
+       skb_pull(skb, hdr_len);
 
        /*
         * Remove the length of all the headers that we don't actually
@@ -364,8 +358,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                }
        }
 
-       /* re -add the WiFi header and IV */
-       skb_push(skb, hdr_len + iv_len);
+       /* re -add the WiFi header */
+       skb_push(skb, hdr_len);
 
        return 0;
 
index 158a3d7..e323e9a 100644 (file)
@@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
        }
 }
 
-
-/*
- * HostAP uses two layers of net devices, where the inner
- * layer gets called all the time from the outer layer.
- * This is a natural nesting, which needs a split lock type.
- */
-static struct lock_class_key hostap_netdev_xmit_lock_key;
-static struct lock_class_key hostap_netdev_addr_lock_key;
-
-static void prism2_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &hostap_netdev_xmit_lock_key);
-}
-
-static void prism2_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock,
-                         &hostap_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
-}
-
 static struct net_device *
 prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
                       struct device *sdev)
@@ -3223,7 +3199,6 @@ while (0)
        if (ret >= 0)
                ret = register_netdevice(dev);
 
-       prism2_set_lockdep_class(dev);
        rtnl_unlock();
        if (ret < 0) {
                printk(KERN_WARNING "%s: register netdevice failed!\n",
index 4d03596..d7a1ddc 100644 (file)
@@ -8,6 +8,8 @@ mt76-y := \
        mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
        tx.o agg-rx.o mcu.o
 
+mt76-$(CONFIG_PCI) += pci.o
+
 mt76-usb-y := usb.o usb_trace.o
 
 CFLAGS_trace.o := -I$(src)
index c747eb2..8f69d00 100644 (file)
@@ -53,8 +53,10 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
        u32 ctrl;
        int i, idx = -1;
 
-       if (txwi)
+       if (txwi) {
                q->entry[q->head].txwi = DMA_DUMMY_DATA;
+               q->entry[q->head].skip_buf0 = true;
+       }
 
        for (i = 0; i < nbufs; i += 2, buf += 2) {
                u32 buf0 = buf[0].addr, buf1 = 0;
@@ -97,7 +99,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
        __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
        u32 ctrl = le32_to_cpu(__ctrl);
 
-       if (!e->txwi || !e->skb) {
+       if (!e->skip_buf0) {
                __le32 addr = READ_ONCE(q->desc[idx].buf0);
                u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
 
index 570c159..8aec7cc 100644 (file)
@@ -93,8 +93,9 @@ struct mt76_queue_entry {
                struct urb *urb;
        };
        enum mt76_txq_id qid;
-       bool schedule;
-       bool done;
+       bool skip_buf0:1;
+       bool schedule:1;
+       bool done:1;
 };
 
 struct mt76_queue_regs {
@@ -578,6 +579,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
 
 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
 
 static inline u16 mt76_chip(struct mt76_dev *dev)
 {
index 73c3104..cf611d1 100644 (file)
@@ -81,6 +81,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
        mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
 
+       mt76_pci_disable_aspm(pdev);
+
        return 0;
 
 error:
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644 (file)
index 0000000..04c5a69
--- /dev/null
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+       struct pci_dev *parent = pdev->bus->self;
+       u16 aspm_conf, parent_aspm_conf = 0;
+
+       pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+       aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+       if (parent) {
+               pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+                                         &parent_aspm_conf);
+               parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+       }
+
+       if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+               /* aspm already disabled */
+               return;
+       }
+
+       dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+                (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+                (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+       if (IS_ENABLED(CONFIG_PCIEASPM)) {
+               int err;
+
+               err = pci_disable_link_state(pdev, aspm_conf);
+               if (!err)
+                       return;
+       }
+
+       /* both device and parent should have the same ASPM setting.
+        * disable ASPM in downstream component first and then upstream.
+        */
+       pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+       if (parent)
+               pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+                                          aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
index 6087ec7..f88d265 100644 (file)
@@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                hdr = rtl_get_hdr(skb);
                fc = rtl_get_fc(skb);
 
-               if (!stats.crc && !stats.hwerror) {
+               if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
                        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
                               sizeof(rx_status));
 
@@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
                        }
                } else {
+                       /* drop packets with errors or those too short */
                        dev_kfree_skb_any(skb);
                }
 new_trx_end:
index 70f04c2..fff8dda 100644 (file)
@@ -754,6 +754,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
                                return;
                        } else {
                                noa_num = (noa_len - 2) / 13;
+                               if (noa_num > P2P_MAX_NOA_NUM)
+                                       noa_num = P2P_MAX_NOA_NUM;
+
                        }
                        noa_index = ie[3];
                        if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -848,6 +851,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
                                return;
                        } else {
                                noa_num = (noa_len - 2) / 13;
+                               if (noa_num > P2P_MAX_NOA_NUM)
+                                       noa_num = P2P_MAX_NOA_NUM;
+
                        }
                        noa_index = ie[3];
                        if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
index be92e12..7997cc6 100644 (file)
@@ -548,6 +548,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
        priv->is_connected = false;
        priv->is_up = false;
        INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
+       __module_get(THIS_MODULE);
 
        return 0;
 unregister_netdev:
@@ -578,6 +579,7 @@ static void virt_wifi_dellink(struct net_device *dev,
        netdev_upper_dev_unlink(priv->lowerdev, dev);
 
        unregister_netdevice_queue(dev, head);
+       module_put(THIS_MODULE);
 
        /* Deleting the wiphy is handled in the module destructor. */
 }
@@ -590,6 +592,42 @@ static struct rtnl_link_ops virt_wifi_link_ops = {
        .priv_size      = sizeof(struct virt_wifi_netdev_priv),
 };
 
+static bool netif_is_virt_wifi_dev(const struct net_device *dev)
+{
+       return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler;
+}
+
+static int virt_wifi_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr);
+       struct virt_wifi_netdev_priv *priv;
+       struct net_device *upper_dev;
+       LIST_HEAD(list_kill);
+
+       if (!netif_is_virt_wifi_dev(lower_dev))
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               priv = rtnl_dereference(lower_dev->rx_handler_data);
+               if (!priv)
+                       return NOTIFY_DONE;
+
+               upper_dev = priv->upperdev;
+
+               upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill);
+               unregister_netdevice_many(&list_kill);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block virt_wifi_notifier = {
+       .notifier_call = virt_wifi_event,
+};
+
 /* Acquires and releases the rtnl lock. */
 static int __init virt_wifi_init_module(void)
 {
@@ -598,14 +636,25 @@ static int __init virt_wifi_init_module(void)
        /* Guaranteed to be locallly-administered and not multicast. */
        eth_random_addr(fake_router_bssid);
 
+       err = register_netdevice_notifier(&virt_wifi_notifier);
+       if (err)
+               return err;
+
+       err = -ENOMEM;
        common_wiphy = virt_wifi_make_wiphy();
        if (!common_wiphy)
-               return -ENOMEM;
+               goto notifier;
 
        err = rtnl_link_register(&virt_wifi_link_ops);
        if (err)
-               virt_wifi_destroy_wiphy(common_wiphy);
+               goto destroy_wiphy;
 
+       return 0;
+
+destroy_wiphy:
+       virt_wifi_destroy_wiphy(common_wiphy);
+notifier:
+       unregister_netdevice_notifier(&virt_wifi_notifier);
        return err;
 }
 
@@ -615,6 +664,7 @@ static void __exit virt_wifi_cleanup_module(void)
        /* Will delete any devices that depend on the wiphy. */
        rtnl_link_unregister(&virt_wifi_link_ops);
        virt_wifi_destroy_wiphy(common_wiphy);
+       unregister_netdevice_notifier(&virt_wifi_notifier);
 }
 
 module_init(virt_wifi_init_module);
index 1cd113c..ad0abb1 100644 (file)
@@ -259,7 +259,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
                                                  *fw_vsc_cfg, len);
 
                if (r) {
-                       devm_kfree(dev, fw_vsc_cfg);
+                       devm_kfree(dev, *fw_vsc_cfg);
                        goto vsc_read_err;
                }
        } else {
index 307bd2a..4d1909a 100644 (file)
@@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
 
        if (r == -EREMOTEIO) {
                phy->hard_fault = r;
-               skb = NULL;
-       } else if (r < 0) {
+               if (info->mode == NXP_NCI_MODE_FW)
+                       nxp_nci_fw_recv_frame(phy->ndev, NULL);
+       }
+       if (r < 0) {
                nfc_err(&client->dev, "Read failed with error %d\n", r);
                goto exit_irq_handled;
        }
index f9ac176..2ce1793 100644 (file)
@@ -708,6 +708,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                                                        NFC_PROTO_FELICA_MASK;
                } else {
                        kfree_skb(nfcid_skb);
+                       nfcid_skb = NULL;
                        /* P2P in type A */
                        r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
                                        ST21NFCA_RF_READER_F_NFCID1,
index 30de7ef..e0f064d 100644 (file)
@@ -158,9 +158,11 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
        struct nvme_ns *ns;
 
        mutex_lock(&ctrl->scan_lock);
+       down_read(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list)
                if (nvme_mpath_clear_current_path(ns))
                        kblockd_schedule_work(&ns->head->requeue_work);
+       up_read(&ctrl->namespaces_rwsem);
        mutex_unlock(&ctrl->scan_lock);
 }
 
@@ -522,14 +524,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
        return 0;
 }
 
-static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
 {
        u32 nr_change_groups = 0;
        int error;
 
        mutex_lock(&ctrl->ana_lock);
-       error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
-                       groups_only ? NVME_ANA_LOG_RGO : 0,
+       error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
                        ctrl->ana_log_buf, ctrl->ana_log_size, 0);
        if (error) {
                dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -565,7 +566,7 @@ static void nvme_ana_work(struct work_struct *work)
 {
        struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
 
-       nvme_read_ana_log(ctrl, false);
+       nvme_read_ana_log(ctrl);
 }
 
 static void nvme_anatt_timeout(struct timer_list *t)
@@ -715,7 +716,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                goto out;
        }
 
-       error = nvme_read_ana_log(ctrl, true);
+       error = nvme_read_ana_log(ctrl);
        if (error)
                goto out_free_ana_log_buf;
        return 0;
index f19a28b..cb4c300 100644 (file)
@@ -2133,8 +2133,16 @@ err_unreg_client:
 
 static void __exit nvme_rdma_cleanup_module(void)
 {
+       struct nvme_rdma_ctrl *ctrl;
+
        nvmf_unregister_transport(&nvme_rdma_transport);
        ib_unregister_client(&nvme_rdma_ib_client);
+
+       mutex_lock(&nvme_rdma_ctrl_mutex);
+       list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
+               nvme_delete_ctrl(&ctrl->ctrl);
+       mutex_unlock(&nvme_rdma_ctrl_mutex);
+       flush_workqueue(nvme_delete_wq);
 }
 
 module_init(nvme_rdma_init_module);
index 770dbcb..7544be8 100644 (file)
@@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
        struct nvme_tcp_queue *queue = hctx->driver_data;
        struct sock *sk = queue->sock->sk;
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
                sk_busy_loop(sk, true);
        nvme_tcp_try_recv(queue);
        return queue->nr_cqe;
index 7989703..6bd610e 100644 (file)
@@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
        if (!target)
                return -ENODEV;
 
-       if (!of_device_is_available(target))
+       if (!of_device_is_available(target)) {
+               of_node_put(target);
                return 0;
+       }
 
        rmem = __find_rmem(target);
        of_node_put(target);
index 480a21e..92e895d 100644 (file)
@@ -1207,6 +1207,7 @@ static int __init unittest_data_add(void)
        of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
        if (!unittest_data_node) {
                pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               kfree(unittest_data);
                return -ENODATA;
        }
 
index 3b7ffd0..be7a7d3 100644 (file)
@@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
                        goto free_regulators;
                }
 
-               ret = regulator_enable(reg);
-               if (ret < 0) {
-                       regulator_put(reg);
-                       goto free_regulators;
-               }
-
                opp_table->regulators[i] = reg;
        }
 
@@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
        return opp_table;
 
 free_regulators:
-       while (i--) {
-               regulator_disable(opp_table->regulators[i]);
-               regulator_put(opp_table->regulators[i]);
-       }
+       while (i != 0)
+               regulator_put(opp_table->regulators[--i]);
 
        kfree(opp_table->regulators);
        opp_table->regulators = NULL;
@@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
-       for (i = opp_table->regulator_count - 1; i >= 0; i--) {
-               regulator_disable(opp_table->regulators[i]);
+       for (i = opp_table->regulator_count - 1; i >= 0; i--)
                regulator_put(opp_table->regulators[i]);
-       }
 
        _free_set_opp_data(opp_table);
 
@@ -2113,6 +2103,75 @@ put_table:
 }
 
 /**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
+ * @dev:               device for which we do this operation
+ * @freq:              OPP frequency to adjust voltage of
+ * @u_volt:            new OPP target voltage
+ * @u_volt_min:                new OPP min voltage
+ * @u_volt_max:                new OPP max voltage
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+                             unsigned long u_volt, unsigned long u_volt_min,
+                             unsigned long u_volt_max)
+
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       int r = 0;
+
+       /* Find the opp_table */
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               r = PTR_ERR(opp_table);
+               dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+               return r;
+       }
+
+       mutex_lock(&opp_table->lock);
+
+       /* Do we have the frequency? */
+       list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+               if (tmp_opp->rate == freq) {
+                       opp = tmp_opp;
+                       break;
+               }
+       }
+
+       if (IS_ERR(opp)) {
+               r = PTR_ERR(opp);
+               goto adjust_unlock;
+       }
+
+       /* Is update really needed? */
+       if (opp->supplies->u_volt == u_volt)
+               goto adjust_unlock;
+
+       opp->supplies->u_volt = u_volt;
+       opp->supplies->u_volt_min = u_volt_min;
+       opp->supplies->u_volt_max = u_volt_max;
+
+       dev_pm_opp_get(opp);
+       mutex_unlock(&opp_table->lock);
+
+       /* Notify the voltage change of the OPP */
+       blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
+                                    opp);
+
+       dev_pm_opp_put(opp);
+       goto adjust_put_table;
+
+adjust_unlock:
+       mutex_unlock(&opp_table->lock);
+adjust_put_table:
+       dev_pm_opp_put_opp_table(opp_table);
+       return r;
+}
+
+/**
  * dev_pm_opp_enable() - Enable a specific OPP
  * @dev:       device for which we do this operation
  * @freq:      OPP frequency to enable
index 1813f5a..1cbb582 100644 (file)
@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
 {
        struct dev_pm_opp *opp;
 
-       lockdep_assert_held(&opp_table_lock);
-
        mutex_lock(&opp_table->lock);
 
        list_for_each_entry(opp, &opp_table->opp_list, node) {
@@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                return 0;
        }
 
+       /*
+        * Re-initialize list_kref every time we add static OPPs to the OPP
+        * table as the reference count may be 0 after the last tie static OPPs
+        * were removed.
+        */
+       kref_init(&opp_table->list_kref);
+
        /* We have opp-table node now, iterate over it and add OPPs */
        for_each_available_child_of_node(opp_table->np, np) {
                opp = _opp_add_static_v2(opp_table, dev, np);
index 648ddb7..c6800d2 100644 (file)
@@ -87,7 +87,7 @@ FUNC_GROUP_DECL(MACLINK3, L23);
 
 #define K25 7
 SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7));
-SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7));
+SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7));
 PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14);
 FUNC_GROUP_DECL(MACLINK4, K25);
 
@@ -1262,13 +1262,13 @@ GROUP_DECL(SPI1, AB11, AC11, AA11);
 #define AD11 206
 SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14));
 SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13,
-                       SIG_DESC_SET(SCU438, 14));
+                       SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14));
 PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13);
 
 #define AF10 207
 SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15));
 SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
-                       SIG_DESC_SET(SCU438, 15));
+                       SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
 PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
 
 GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
@@ -1440,91 +1440,85 @@ FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1);
 FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1);
 
 #define AB4 232
-SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24));
-PIN_DECL_1(AB4, GPIO18D0, SD3CLK);
+SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24));
+PIN_DECL_1(AB4, GPIO18D0, EMMCCLK);
 
 #define AA4 233
-SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25));
-PIN_DECL_1(AA4, GPIO18D1, SD3CMD);
+SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25));
+PIN_DECL_1(AA4, GPIO18D1, EMMCCMD);
 
 #define AC4 234
-SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26));
-PIN_DECL_1(AC4, GPIO18D2, SD3DAT0);
+SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26));
+PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0);
 
 #define AA5 235
-SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27));
-PIN_DECL_1(AA5, GPIO18D3, SD3DAT1);
+SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27));
+PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1);
 
 #define Y5 236
-SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28));
-PIN_DECL_1(Y5, GPIO18D4, SD3DAT2);
+SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28));
+PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2);
 
 #define AB5 237
-SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29));
-PIN_DECL_1(AB5, GPIO18D5, SD3DAT3);
+SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29));
+PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3);
 
 #define AB6 238
-SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30));
-PIN_DECL_1(AB6, GPIO18D6, SD3CD);
+SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30));
+PIN_DECL_1(AB6, GPIO18D6, EMMCCD);
 
 #define AC5 239
-SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31));
-PIN_DECL_1(AC5, GPIO18D7, SD3WP);
+SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31));
+PIN_DECL_1(AC5, GPIO18D7, EMMCWP);
 
-FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
+GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5);
+GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
 
 #define Y1 240
 SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0));
-PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4);
-FUNC_GROUP_DECL(SD3DAT4, Y1);
+SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0));
+PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4);
 
 #define Y2 241
 SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1));
-PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5);
-FUNC_GROUP_DECL(SD3DAT5, Y2);
+SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1));
+PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5);
 
 #define Y3 242
 SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID,
                        SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2));
-PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6);
-FUNC_GROUP_DECL(SD3DAT6, Y3);
+SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2));
+PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6);
 
 #define Y4 243
 SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID,
                        SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3));
-PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7);
-FUNC_GROUP_DECL(SD3DAT7, Y4);
+SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
 
 GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
 GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
 FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
 FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
-
+FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
 /*
  * FIXME: Confirm bits and priorities are the right way around for the
  * following 4 pins
  */
 #define AF25 244
-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
-                       SIG_DESC_SET(SCU4D8, 20));
-SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20),
-                       SIG_DESC_SET(SCU4D8, 20));
+SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
+SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
 PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
          SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
 
 #define AE26 245
-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
-                       SIG_DESC_SET(SCU4D8, 21));
-SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21),
-                       SIG_DESC_SET(SCU4D8, 21));
+SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
+SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
 PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
          SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
 
@@ -1533,18 +1527,14 @@ FUNC_DECL_2(I3C3, HVI3C3, I3C3);
 FUNC_GROUP_DECL(FSI1, AF25, AE26);
 
 #define AE25 246
-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
-                       SIG_DESC_SET(SCU4D8, 22));
-SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22),
-                       SIG_DESC_SET(SCU4D8, 22));
+SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
+SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
 PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
          SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
 
 #define AF24 247
-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
-                       SIG_DESC_SET(SCU4D8, 23));
-SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23),
-                       SIG_DESC_SET(SCU4D8, 23));
+SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
+SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
 PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
          SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
 
@@ -1574,6 +1564,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(A3),
        ASPEED_PINCTRL_PIN(AA11),
        ASPEED_PINCTRL_PIN(AA12),
+       ASPEED_PINCTRL_PIN(AA16),
+       ASPEED_PINCTRL_PIN(AA17),
        ASPEED_PINCTRL_PIN(AA23),
        ASPEED_PINCTRL_PIN(AA24),
        ASPEED_PINCTRL_PIN(AA25),
@@ -1585,6 +1577,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AB11),
        ASPEED_PINCTRL_PIN(AB12),
        ASPEED_PINCTRL_PIN(AB15),
+       ASPEED_PINCTRL_PIN(AB16),
+       ASPEED_PINCTRL_PIN(AB17),
        ASPEED_PINCTRL_PIN(AB18),
        ASPEED_PINCTRL_PIN(AB19),
        ASPEED_PINCTRL_PIN(AB22),
@@ -1602,6 +1596,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AC11),
        ASPEED_PINCTRL_PIN(AC12),
        ASPEED_PINCTRL_PIN(AC15),
+       ASPEED_PINCTRL_PIN(AC16),
        ASPEED_PINCTRL_PIN(AC17),
        ASPEED_PINCTRL_PIN(AC18),
        ASPEED_PINCTRL_PIN(AC19),
@@ -1619,6 +1614,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AD12),
        ASPEED_PINCTRL_PIN(AD14),
        ASPEED_PINCTRL_PIN(AD15),
+       ASPEED_PINCTRL_PIN(AD16),
        ASPEED_PINCTRL_PIN(AD19),
        ASPEED_PINCTRL_PIN(AD20),
        ASPEED_PINCTRL_PIN(AD22),
@@ -1634,8 +1630,11 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AE12),
        ASPEED_PINCTRL_PIN(AE14),
        ASPEED_PINCTRL_PIN(AE15),
+       ASPEED_PINCTRL_PIN(AE16),
        ASPEED_PINCTRL_PIN(AE18),
        ASPEED_PINCTRL_PIN(AE19),
+       ASPEED_PINCTRL_PIN(AE25),
+       ASPEED_PINCTRL_PIN(AE26),
        ASPEED_PINCTRL_PIN(AE7),
        ASPEED_PINCTRL_PIN(AE8),
        ASPEED_PINCTRL_PIN(AF10),
@@ -1643,6 +1642,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AF12),
        ASPEED_PINCTRL_PIN(AF14),
        ASPEED_PINCTRL_PIN(AF15),
+       ASPEED_PINCTRL_PIN(AF24),
+       ASPEED_PINCTRL_PIN(AF25),
        ASPEED_PINCTRL_PIN(AF7),
        ASPEED_PINCTRL_PIN(AF8),
        ASPEED_PINCTRL_PIN(AF9),
@@ -1792,17 +1793,6 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(Y3),
        ASPEED_PINCTRL_PIN(Y4),
        ASPEED_PINCTRL_PIN(Y5),
-       ASPEED_PINCTRL_PIN(AB16),
-       ASPEED_PINCTRL_PIN(AA17),
-       ASPEED_PINCTRL_PIN(AB17),
-       ASPEED_PINCTRL_PIN(AE16),
-       ASPEED_PINCTRL_PIN(AC16),
-       ASPEED_PINCTRL_PIN(AA16),
-       ASPEED_PINCTRL_PIN(AD16),
-       ASPEED_PINCTRL_PIN(AF25),
-       ASPEED_PINCTRL_PIN(AE26),
-       ASPEED_PINCTRL_PIN(AE25),
-       ASPEED_PINCTRL_PIN(AF24),
 };
 
 static const struct aspeed_pin_group aspeed_g6_groups[] = {
@@ -1976,11 +1966,9 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
        ASPEED_PINCTRL_GROUP(SALT9G1),
        ASPEED_PINCTRL_GROUP(SD1),
        ASPEED_PINCTRL_GROUP(SD2),
-       ASPEED_PINCTRL_GROUP(SD3),
-       ASPEED_PINCTRL_GROUP(SD3DAT4),
-       ASPEED_PINCTRL_GROUP(SD3DAT5),
-       ASPEED_PINCTRL_GROUP(SD3DAT6),
-       ASPEED_PINCTRL_GROUP(SD3DAT7),
+       ASPEED_PINCTRL_GROUP(EMMCG1),
+       ASPEED_PINCTRL_GROUP(EMMCG4),
+       ASPEED_PINCTRL_GROUP(EMMCG8),
        ASPEED_PINCTRL_GROUP(SGPM1),
        ASPEED_PINCTRL_GROUP(SGPS1),
        ASPEED_PINCTRL_GROUP(SIOONCTRL),
@@ -2059,6 +2047,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(ADC8),
        ASPEED_PINCTRL_FUNC(ADC9),
        ASPEED_PINCTRL_FUNC(BMCINT),
+       ASPEED_PINCTRL_FUNC(EMMC),
        ASPEED_PINCTRL_FUNC(ESPI),
        ASPEED_PINCTRL_FUNC(ESPIALT),
        ASPEED_PINCTRL_FUNC(FSI1),
@@ -2191,11 +2180,6 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(SALT9),
        ASPEED_PINCTRL_FUNC(SD1),
        ASPEED_PINCTRL_FUNC(SD2),
-       ASPEED_PINCTRL_FUNC(SD3),
-       ASPEED_PINCTRL_FUNC(SD3DAT4),
-       ASPEED_PINCTRL_FUNC(SD3DAT5),
-       ASPEED_PINCTRL_FUNC(SD3DAT6),
-       ASPEED_PINCTRL_FUNC(SD3DAT7),
        ASPEED_PINCTRL_FUNC(SGPM1),
        ASPEED_PINCTRL_FUNC(SGPS1),
        ASPEED_PINCTRL_FUNC(SIOONCTRL),
index a2c0d52..140c5ce 100644 (file)
@@ -508,7 +508,7 @@ struct aspeed_pin_desc {
  * @idx: The bit index in the register
  */
 #define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
-#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0)
+#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 }
 
 #define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group
 #define SIG_DESC_LIST_DECL(sig, group, ...) \
@@ -738,6 +738,7 @@ struct aspeed_pin_desc {
        static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
 
 #define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
+#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
 
 #define FUNC_GROUP_DECL(func, ...) \
        GROUP_DECL(func, __VA_ARGS__); \
index 6f7d3a2..42f7ab3 100644 (file)
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 /*
@@ -853,7 +845,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
 
        /* optional GPIO interrupt support */
        irq = platform_get_irq(pdev, 0);
-       if (irq) {
+       if (irq > 0) {
                struct irq_chip *irqc;
                struct gpio_irq_chip *girq;
 
index 2bf6af7..9fabc45 100644 (file)
@@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
        const struct ns2_pin_function *func;
        const struct ns2_pin_group *grp;
 
-       if (grp_select > pinctrl->num_groups ||
-               func_select > pinctrl->num_functions)
+       if (grp_select >= pinctrl->num_groups ||
+               func_select >= pinctrl->num_functions)
                return -EINVAL;
 
        func = &pinctrl->functions[func_select];
index 44f8ccd..9dfdc27 100644 (file)
@@ -43,7 +43,7 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
                        BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
                        BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
                        BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
-                       BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */
                        BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
                        BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
        BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
index aae51c5..2c419fa 100644 (file)
@@ -147,6 +147,7 @@ struct chv_pin_context {
  * @pctldesc: Pin controller description
  * @pctldev: Pointer to the pin controller device
  * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
  * @regs: MMIO registers
  * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
  *             offset (in GPIO number space)
@@ -162,6 +163,7 @@ struct chv_pinctrl {
        struct pinctrl_desc pctldesc;
        struct pinctrl_dev *pctldev;
        struct gpio_chip chip;
+       struct irq_chip irqchip;
        void __iomem *regs;
        unsigned intr_lines[16];
        const struct chv_community *community;
@@ -1466,16 +1468,6 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
-static struct irq_chip chv_gpio_irqchip = {
-       .name = "chv-gpio",
-       .irq_startup = chv_gpio_irq_startup,
-       .irq_ack = chv_gpio_irq_ack,
-       .irq_mask = chv_gpio_irq_mask,
-       .irq_unmask = chv_gpio_irq_unmask,
-       .irq_set_type = chv_gpio_irq_type,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
 static void chv_gpio_irq_handler(struct irq_desc *desc)
 {
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1513,7 +1505,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1521,7 +1512,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1529,7 +1519,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1537,7 +1526,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {}
@@ -1563,7 +1551,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
                if (intsel >= community->nirqs)
-                       clear_bit(i, valid_mask);
+                       clear_bit(desc->number, valid_mask);
        }
 }
 
@@ -1629,7 +1617,15 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                }
        }
 
-       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+       pctrl->irqchip.name = "chv-gpio";
+       pctrl->irqchip.irq_startup = chv_gpio_irq_startup;
+       pctrl->irqchip.irq_ack = chv_gpio_irq_ack;
+       pctrl->irqchip.irq_mask = chv_gpio_irq_mask;
+       pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask;
+       pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
+       pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
+
+       ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
@@ -1646,7 +1642,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                }
        }
 
-       gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
+       gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq,
                                     chv_gpio_irq_handler);
        return 0;
 }
index 1f13bcd..83981ad 100644 (file)
@@ -52,6 +52,7 @@
 #define PADCFG0_GPIROUTNMI             BIT(17)
 #define PADCFG0_PMODE_SHIFT            10
 #define PADCFG0_PMODE_MASK             GENMASK(13, 10)
+#define PADCFG0_PMODE_GPIO             0
 #define PADCFG0_GPIORXDIS              BIT(9)
 #define PADCFG0_GPIOTXDIS              BIT(8)
 #define PADCFG0_GPIORXSTATE            BIT(1)
@@ -96,6 +97,7 @@ struct intel_pinctrl_context {
  * @pctldesc: Pin controller description
  * @pctldev: Pointer to the pin controller device
  * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
  * @soc: SoC/PCH specific pin configuration data
  * @communities: All communities in this pin controller
  * @ncommunities: Number of communities in this pin controller
@@ -108,6 +110,7 @@ struct intel_pinctrl {
        struct pinctrl_desc pctldesc;
        struct pinctrl_dev *pctldev;
        struct gpio_chip chip;
+       struct irq_chip irqchip;
        const struct intel_pinctrl_soc_data *soc;
        struct intel_community *communities;
        size_t ncommunities;
@@ -330,7 +333,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
        cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
 
        mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
-       if (!mode)
+       if (mode == PADCFG0_PMODE_GPIO)
                seq_puts(s, "GPIO ");
        else
                seq_printf(s, "mode %d ", mode);
@@ -456,6 +459,11 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
        writel(value, padcfg0);
 }
 
+static int intel_gpio_get_gpio_mode(void __iomem *padcfg0)
+{
+       return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+}
+
 static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
 {
        u32 value;
@@ -489,7 +497,20 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        }
 
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+       /*
+        * If pin is already configured in GPIO mode, we assume that
+        * firmware provides correct settings. In such case we avoid
+        * potential glitches on the pin. Otherwise, for the pin in
+        * alternative mode, consumer has to supply respective flags.
+        */
+       if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) {
+               raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+               return 0;
+       }
+
        intel_gpio_set_gpio_mode(padcfg0);
+
        /* Disable TX buffer and enable RX (this will be input) */
        __intel_gpio_set_direction(padcfg0, true);
 
@@ -1139,16 +1160,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
        return ret;
 }
 
-static struct irq_chip intel_gpio_irqchip = {
-       .name = "intel-gpio",
-       .irq_ack = intel_gpio_irq_ack,
-       .irq_mask = intel_gpio_irq_mask,
-       .irq_unmask = intel_gpio_irq_unmask,
-       .irq_set_type = intel_gpio_irq_type,
-       .irq_set_wake = intel_gpio_irq_wake,
-       .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
 static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
                                     const struct intel_community *community)
 {
@@ -1198,12 +1209,22 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 
        pctrl->chip = intel_gpio_chip;
 
+       /* Setup GPIO chip */
        pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
        pctrl->chip.label = dev_name(pctrl->dev);
        pctrl->chip.parent = pctrl->dev;
        pctrl->chip.base = -1;
        pctrl->irq = irq;
 
+       /* Setup IRQ chip */
+       pctrl->irqchip.name = dev_name(pctrl->dev);
+       pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
+       pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
+       pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
+       pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
+       pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
+       pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
        ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
        if (ret) {
                dev_err(pctrl->dev, "failed to register gpiochip\n");
@@ -1233,15 +1254,14 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
                return ret;
        }
 
-       ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+       ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add irqchip\n");
                return ret;
        }
 
-       gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
-                                    NULL);
+       gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
        return 0;
 }
 
index 6462d3c..f2f5fcd 100644 (file)
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
        PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
                      BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
                      18, 2, "gpio", "uart"),
-       PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
-       PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
-       PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
-       PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+       PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+       PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+       PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+       PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
 
 };
 
@@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
 };
 
 static inline void armada_37xx_update_reg(unsigned int *reg,
-                                         unsigned int offset)
+                                         unsigned int *offset)
 {
        /* We never have more than 2 registers */
-       if (offset >= GPIO_PER_REG) {
-               offset -= GPIO_PER_REG;
+       if (*offset >= GPIO_PER_REG) {
+               *offset -= GPIO_PER_REG;
                *reg += sizeof(u32);
        }
 }
@@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
 {
        int offset = irqd_to_hwirq(d);
 
-       armada_37xx_update_reg(reg, offset);
+       armada_37xx_update_reg(reg, &offset);
 }
 
 static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        regmap_read(info->regmap, reg, &val);
 
@@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask, val, ret;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
        unsigned int reg = INPUT_VAL;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        regmap_read(info->regmap, reg, &val);
@@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
        unsigned int reg = OUTPUT_VAL;
        unsigned int mask, val;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        val = value ? mask : 0;
 
index 9749737..ccdf0bb 100644 (file)
@@ -585,19 +585,6 @@ static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
        return stmfx_function_enable(pctl->stmfx, func);
 }
 
-static int stmfx_pinctrl_gpio_init_valid_mask(struct gpio_chip *gc,
-                                             unsigned long *valid_mask,
-                                             unsigned int ngpios)
-{
-       struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
-       u32 n;
-
-       for_each_clear_bit(n, &pctl->gpio_valid_mask, ngpios)
-               clear_bit(n, valid_mask);
-
-       return 0;
-}
-
 static int stmfx_pinctrl_probe(struct platform_device *pdev)
 {
        struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
@@ -660,7 +647,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
        pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
        pctl->gpio_chip.can_sleep = true;
        pctl->gpio_chip.of_node = np;
-       pctl->gpio_chip.init_valid_mask = stmfx_pinctrl_gpio_init_valid_mask;
 
        ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
        if (ret) {
@@ -705,7 +691,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
 
 static int stmfx_pinctrl_remove(struct platform_device *pdev)
 {
-       struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+       struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
 
        return stmfx_function_disable(stmfx,
                                      STMFX_FUNC_GPIO |
index 94ddd7d..a67701e 100644 (file)
@@ -978,6 +978,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
        INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
        INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
+       INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
+       INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
 
        INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
        INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
index 67d0199..9d72ab5 100644 (file)
@@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        err = -EFAULT;
                        break;
                }
-               if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
-                       req.extts.rsv[0] || req.extts.rsv[1]) &&
-                       cmd == PTP_EXTTS_REQUEST2) {
-                       err = -EINVAL;
-                       break;
+               if (cmd == PTP_EXTTS_REQUEST2) {
+                       /* Tell the drivers to check the flags carefully. */
+                       req.extts.flags |= PTP_STRICT_FLAGS;
+                       /* Make sure no reserved bit is set. */
+                       if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
+                           req.extts.rsv[0] || req.extts.rsv[1]) {
+                               err = -EINVAL;
+                               break;
+                       }
+                       /* Ensure one of the rising/falling edge bits is set. */
+                       if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
+                           (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
+                               err = -EINVAL;
+                               break;
+                       }
                } else if (cmd == PTP_EXTTS_REQUEST) {
                        req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
                        req.extts.rsv[0] = 0;
index 6ad51aa..f877e77 100644 (file)
@@ -472,14 +472,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
                if (err)
                        return err;
 
-               /*
-                * .apply might have to round some values in *state, if possible
-                * read the actually implemented value back.
-                */
-               if (chip->ops->get_state)
-                       chip->ops->get_state(chip, pwm, &pwm->state);
-               else
-                       pwm->state = *state;
+               pwm->state = *state;
        } else {
                /*
                 * FIXME: restore the initial state in case of error.
index 56c38cf..1f829ed 100644 (file)
@@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops iproc_pwm_ops = {
        .apply = iproc_pwmc_apply,
        .get_state = iproc_pwmc_get_state,
+       .owner = THIS_MODULE,
 };
 
 static int iproc_pwmc_probe(struct platform_device *pdev)
index afe9447..a46be22 100644 (file)
@@ -5053,6 +5053,19 @@ regulator_register(const struct regulator_desc *regulator_desc,
 
        init_data = regulator_of_get_init_data(dev, regulator_desc, config,
                                               &rdev->dev.of_node);
+
+       /*
+        * Sometimes not all resources are probed already so we need to take
+        * that into account. This happens most the time if the ena_gpiod comes
+        * from a gpio extender or something else.
+        */
+       if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+               kfree(config);
+               kfree(rdev);
+               ret = -EPROBE_DEFER;
+               goto rinse;
+       }
+
        /*
         * We need to keep track of any GPIO descriptor coming from the
         * device tree until we have handled it over to the core. If the
index 56f3f72..710e670 100644 (file)
@@ -136,7 +136,6 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
 static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
 {
        struct da9062_regulator *regl = rdev_get_drvdata(rdev);
-       struct regmap_field *field;
        unsigned int val, mode = 0;
        int ret;
 
@@ -158,18 +157,7 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_NORMAL;
        }
 
-       /* Detect current regulator state */
-       ret = regmap_field_read(regl->suspend, &val);
-       if (ret < 0)
-               return 0;
-
-       /* Read regulator mode from proper register, depending on state */
-       if (val)
-               field = regl->suspend_sleep;
-       else
-               field = regl->sleep;
-
-       ret = regmap_field_read(field, &val);
+       ret = regmap_field_read(regl->sleep, &val);
        if (ret < 0)
                return 0;
 
@@ -208,21 +196,9 @@ static int da9062_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
 static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev)
 {
        struct da9062_regulator *regl = rdev_get_drvdata(rdev);
-       struct regmap_field *field;
        int ret, val;
 
-       /* Detect current regulator state */
-       ret = regmap_field_read(regl->suspend, &val);
-       if (ret < 0)
-               return 0;
-
-       /* Read regulator mode from proper register, depending on state */
-       if (val)
-               field = regl->suspend_sleep;
-       else
-               field = regl->sleep;
-
-       ret = regmap_field_read(field, &val);
+       ret = regmap_field_read(regl->sleep, &val);
        if (ret < 0)
                return 0;
 
@@ -408,10 +384,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_BUCK2,
@@ -444,10 +420,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_BUCK3,
@@ -480,10 +456,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_LDO1,
@@ -509,10 +485,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO1_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -542,10 +518,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO2_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -575,10 +551,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO3_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -608,10 +584,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO4_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -652,10 +628,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK2,
@@ -688,10 +664,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK2_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK3,
@@ -724,10 +700,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK4,
@@ -760,10 +736,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_LDO1,
@@ -789,10 +765,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO1_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -822,10 +798,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO2_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -855,10 +831,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO3_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -888,10 +864,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO4_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
index d90a6fd..f815330 100644 (file)
@@ -144,8 +144,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct fixed_voltage_config *config;
        struct fixed_voltage_data *drvdata;
-       const struct fixed_dev_type *drvtype =
-               of_match_device(dev->driver->of_match_table, dev)->data;
+       const struct fixed_dev_type *drvtype = of_device_get_match_data(dev);
        struct regulator_config cfg = { };
        enum gpiod_flags gflags;
        int ret;
@@ -177,7 +176,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->desc.type = REGULATOR_VOLTAGE;
        drvdata->desc.owner = THIS_MODULE;
 
-       if (drvtype->has_enable_clock) {
+       if (drvtype && drvtype->has_enable_clock) {
                drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
 
                drvdata->enable_clock = devm_clk_get(dev, NULL);
index ff97cc5..9b05e03 100644 (file)
@@ -210,6 +210,7 @@ static const struct regulator_desc lochnagar_regulators[] = {
 
                .enable_time = 3000,
                .ramp_delay = 1000,
+               .off_on_delay = 15000,
 
                .owner = THIS_MODULE,
        },
index afefb29..87637eb 100644 (file)
@@ -231,12 +231,12 @@ static int of_get_regulation_constraints(struct device *dev,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
 
-               if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
-                                         &pval))
+               if (!of_property_read_u32(suspend_np,
+                               "regulator-suspend-min-microvolt", &pval))
                        suspend_state->min_uV = pval;
 
-               if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
-                                         &pval))
+               if (!of_property_read_u32(suspend_np,
+                               "regulator-suspend-max-microvolt", &pval))
                        suspend_state->max_uV = pval;
 
                if (!of_property_read_u32(suspend_np,
@@ -445,11 +445,20 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                goto error;
        }
 
-       if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
-               dev_err(dev,
-                       "driver callback failed to parse DT for regulator %pOFn\n",
-                       child);
-               goto error;
+       if (desc->of_parse_cb) {
+               int ret;
+
+               ret = desc->of_parse_cb(child, desc, config);
+               if (ret) {
+                       if (ret == -EPROBE_DEFER) {
+                               of_node_put(child);
+                               return ERR_PTR(-EPROBE_DEFER);
+                       }
+                       dev_err(dev,
+                               "driver callback failed to parse DT for regulator %pOFn\n",
+                               child);
+                       goto error;
+               }
        }
 
        *node = child;
index df5df1c..6895379 100644 (file)
@@ -788,7 +788,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
 
                /* SW2~SW4 high bit check and modify the voltage value table */
                if (i >= sw_check_start && i <= sw_check_end) {
-                       regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+                       ret = regmap_read(pfuze_chip->regmap,
+                                               desc->vsel_reg, &val);
+                       if (ret) {
+                               dev_err(&client->dev, "Fails to read from the register.\n");
+                               return ret;
+                       }
+
                        if (val & sw_hi) {
                                if (pfuze_chip->chip_id == PFUZE3000 ||
                                        pfuze_chip->chip_id == PFUZE3001) {
index db6c085..0246b6f 100644 (file)
@@ -735,8 +735,8 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
 static const struct rpmh_vreg_hw_data pmic5_bob = {
        .regulator_type = VRM,
        .ops = &rpmh_regulator_vrm_bypass_ops,
-       .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
-       .n_voltages = 136,
+       .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+       .n_voltages = 32,
        .pmic_mode_map = pmic_mode_map_pmic5_bob,
        .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
 };
index cced1ff..89b9314 100644 (file)
@@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb)
        while (timeout++ <= abb->settling_time) {
                status = ti_abb_check_txdone(abb);
                if (status)
-                       break;
+                       return 0;
 
                udelay(1);
        }
 
-       if (timeout > abb->settling_time) {
-               dev_warn_ratelimited(dev,
-                                    "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
-                                    __func__, timeout, readl(abb->int_base));
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+                            __func__, timeout, readl(abb->int_base));
+       return -ETIMEDOUT;
 }
 
 /**
@@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb)
 
                status = ti_abb_check_txdone(abb);
                if (!status)
-                       break;
+                       return 0;
 
                udelay(1);
        }
 
-       if (timeout > abb->settling_time) {
-               dev_warn_ratelimited(dev,
-                                    "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
-                                    __func__, timeout, readl(abb->int_base));
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+                            __func__, timeout, readl(abb->int_base));
+       return -ETIMEDOUT;
 }
 
 /**
index 213ff40..3c9a64c 100644 (file)
@@ -76,7 +76,6 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
  * of_reset_simple_xlate - translate reset_spec to the reset line number
  * @rcdev: a pointer to the reset controller device
  * @reset_spec: reset line specifier as found in the device tree
- * @flags: a flags pointer to fill in (optional)
  *
  * This simple translation function should be used for reset controllers
  * with 1:1 mapping, where reset lines can be indexed by number without gaps.
@@ -748,6 +747,7 @@ static void reset_control_array_put(struct reset_control_array *resets)
        for (i = 0; i < resets->num_rstcs; i++)
                __reset_control_put_internal(resets->rstc[i]);
        mutex_unlock(&reset_list_mutex);
+       kfree(resets);
 }
 
 /**
@@ -825,9 +825,10 @@ int __device_reset(struct device *dev, bool optional)
 }
 EXPORT_SYMBOL_GPL(__device_reset);
 
-/**
+/*
  * APIs to manage an array of reset controls.
  */
+
 /**
  * of_reset_control_get_count - Count number of resets available with a device
  *
index 45bdb47..9157e72 100644 (file)
@@ -522,8 +522,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
        if (filp->f_inode->i_cdev == &zcrypt_cdev) {
                struct zcdn_device *zcdndev;
 
-               if (mutex_lock_interruptible(&ap_perms_mutex))
-                       return -ERESTARTSYS;
+               mutex_lock(&ap_perms_mutex);
                zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
                mutex_unlock(&ap_perms_mutex);
                if (zcdndev) {
index 1b92f3c..90cf469 100644 (file)
@@ -898,7 +898,7 @@ config SCSI_SNI_53C710
 
 config 53C700_LE_ON_BE
        bool
-       depends on SCSI_LASI700
+       depends on SCSI_LASI700 || SCSI_SNI_53C710
        default y
 
 config SCSI_STEX
index 5f8153c..76751d6 100644 (file)
@@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file)
        scsi_changer *ch = file->private_data;
 
        scsi_device_put(ch->device);
-       ch->device = NULL;
        file->private_data = NULL;
        kref_put(&ch->ref, ch_destroy);
        return 0;
index 4971104..f32da0c 100644 (file)
@@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        unsigned int tpg_desc_tbl_off;
        unsigned char orig_transition_tmo;
        unsigned long flags;
+       bool transitioning_sense = false;
 
        if (!pg->expiry) {
                unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                        goto retry;
                }
                /*
-                * Retry on ALUA state transition or if any
-                * UNIT ATTENTION occurred.
+                * If the array returns with 'ALUA state transition'
+                * sense code here it cannot return RTPG data during
+                * transition. So set the state to 'transitioning' directly.
                 */
                if (sense_hdr.sense_key == NOT_READY &&
-                   sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
-                       err = SCSI_DH_RETRY;
-               else if (sense_hdr.sense_key == UNIT_ATTENTION)
+                   sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+                       transitioning_sense = true;
+                       goto skip_rtpg;
+               }
+               /*
+                * Retry on any other UNIT ATTENTION occurred.
+                */
+               if (sense_hdr.sense_key == UNIT_ATTENTION)
                        err = SCSI_DH_RETRY;
                if (err == SCSI_DH_RETRY &&
                    pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                off = 8 + (desc[7] * 4);
        }
 
+ skip_rtpg:
        spin_lock_irqsave(&pg->lock, flags);
+       if (transitioning_sense)
+               pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
        sdev_printk(KERN_INFO, sdev,
                    "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
                    ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
index ac39ed7..216e557 100644 (file)
@@ -5477,6 +5477,8 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
+       c->device = dev;
+
        enqueue_cmd_and_start_io(h, c);
        /* the cmd'll come back via intr handler in complete_scsi_command()  */
        return 0;
@@ -5548,6 +5550,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
                hpsa_cmd_init(h, c->cmdindex, c);
                c->cmd_type = CMD_SCSI;
                c->scsi_cmd = cmd;
+               c->device = dev;
                rc = hpsa_scsi_ioaccel_raid_map(h, c);
                if (rc < 0)     /* scsi_dma_map failed. */
                        rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5555,6 +5558,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
                hpsa_cmd_init(h, c->cmdindex, c);
                c->cmd_type = CMD_SCSI;
                c->scsi_cmd = cmd;
+               c->device = dev;
                rc = hpsa_scsi_ioaccel_direct_map(h, c);
                if (rc < 0)     /* scsi_dma_map failed. */
                        rc = SCSI_MLQUEUE_HOST_BUSY;
index e91377a..e8813d2 100644 (file)
@@ -9055,7 +9055,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                }
        }
 
-#if defined(BUILD_NVME)
        /* Clear NVME stats */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9063,7 +9062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                               sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
                }
        }
-#endif
 
        /* Clear SCSI stats */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
index f4b879d..fc6e454 100644 (file)
@@ -851,9 +851,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
        if (!(vport->fc_flag & FC_PT2PT)) {
                /* Check config parameter use-adisc or FCP-2 */
-               if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+               if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
                    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
-                    (ndlp->nlp_type & NLP_FCP_TARGET))) {
+                    (ndlp->nlp_type & NLP_FCP_TARGET)))) {
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_NPR_ADISC;
                        spin_unlock_irq(shost->host_lock);
index fe10976..6822cd9 100644 (file)
@@ -528,7 +528,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                        list_del_init(&psb->list);
                        psb->exch_busy = 0;
                        psb->status = IOSTAT_SUCCESS;
-#ifdef BUILD_NVME
                        if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
                                qp->abts_nvme_io_bufs--;
                                spin_unlock(&qp->abts_io_buf_list_lock);
@@ -536,7 +535,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                                lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
                                return;
                        }
-#endif
                        qp->abts_scsi_io_bufs--;
                        spin_unlock(&qp->abts_io_buf_list_lock);
 
index a0c6945..614f78d 100644 (file)
@@ -7866,7 +7866,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
        if (sli4_hba->hdwq) {
                for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
                        eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
-                       if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
+                       if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
                                fpeq = eq;
                                break;
                        }
index 30bafd9..7259bce 100644 (file)
@@ -440,9 +440,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                valid = 0;
                if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
                        valid = 1;
-               else if (start == (ha->flt_region_boot * 4) ||
-                   start == (ha->flt_region_fw * 4))
-                       valid = 1;
                else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                        valid = 1;
                if (!valid) {
@@ -489,8 +486,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                    "Writing flash region -- 0x%x/0x%x.\n",
                    ha->optrom_region_start, ha->optrom_region_size);
 
-               ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+               rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
                    ha->optrom_region_start, ha->optrom_region_size);
+               if (rval)
+                       rval = -EIO;
                break;
        default:
                rval = -EINVAL;
index 28d587a..99f0a1a 100644 (file)
@@ -253,7 +253,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
        srb_t *sp;
        const char *type;
        int req_sg_cnt, rsp_sg_cnt;
-       int rval =  (DRIVER_ERROR << 16);
+       int rval =  (DID_ERROR << 16);
        uint16_t nextlid = 0;
 
        if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
@@ -432,7 +432,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
        scsi_qla_host_t *vha = shost_priv(host);
        struct qla_hw_data *ha = vha->hw;
-       int rval = (DRIVER_ERROR << 16);
+       int rval = (DID_ERROR << 16);
        int req_sg_cnt, rsp_sg_cnt;
        uint16_t loop_id;
        struct fc_port *fcport;
@@ -1950,7 +1950,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
        scsi_qla_host_t *vha = shost_priv(host);
        struct qla_hw_data *ha = vha->hw;
-       int rval = (DRIVER_ERROR << 16);
+       int rval = (DID_ERROR << 16);
        struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
        srb_t *sp;
        int req_sg_cnt = 0, rsp_sg_cnt = 0;
index 1cc6913..4a1f21c 100644 (file)
@@ -702,6 +702,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                mcp->mb[2] = LSW(risc_addr);
                mcp->mb[3] = 0;
                mcp->mb[4] = 0;
+               mcp->mb[11] = 0;
                ha->flags.using_lr_setting = 0;
                if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
                    IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -746,7 +747,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                if (ha->flags.exchoffld_enabled)
                        mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
 
-               mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
+               mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
                mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
        } else {
                mcp->mb[1] = LSW(risc_addr);
index 6afad68..2382409 100644 (file)
@@ -76,9 +76,11 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
         * ensures no active vp_list traversal while the vport is removed
         * from the queue)
         */
-       for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
-               wait_event_timeout(vha->vref_waitq,
-                   atomic_read(&vha->vref_count), HZ);
+       for (i = 0; i < 10; i++) {
+               if (wait_event_timeout(vha->vref_waitq,
+                   !atomic_read(&vha->vref_count), HZ) > 0)
+                       break;
+       }
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        if (atomic_read(&vha->vref_count)) {
index 3568031..726ad4c 100644 (file)
@@ -1119,9 +1119,11 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 
        qla2x00_mark_all_devices_lost(vha, 0);
 
-       for (i = 0; i < 10; i++)
-               wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
-                   HZ);
+       for (i = 0; i < 10; i++) {
+               if (wait_event_timeout(vha->fcport_waitQ,
+                   test_fcport_count(vha), HZ) > 0)
+                       break;
+       }
 
        flush_workqueue(vha->hw->wq);
 }
@@ -3224,6 +3226,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
        ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+       if (unlikely(!ha->wq)) {
+               ret = -ENOMEM;
+               goto probe_failed;
+       }
 
        if (ha->isp_ops->initialize_adapter(base_vha)) {
                ql_log(ql_log_fatal, base_vha, 0x00d6,
@@ -3531,6 +3537,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
                qla2x00_try_to_stop_firmware(vha);
        }
 
+       /* Disable timer */
+       if (vha->timer_active)
+               qla2x00_stop_timer(vha);
+
        /* Turn adapter off line */
        vha->flags.online = 0;
 
index 5447738..91c007d 100644 (file)
@@ -1883,7 +1883,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
 {
        unsigned int cmd_size, sgl_size;
 
-       sgl_size = scsi_mq_inline_sgl_size(shost);
+       sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+                               scsi_mq_inline_sgl_size(shost));
        cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
        if (scsi_host_get_prot(shost))
                cmd_size += sizeof(struct scsi_data_buffer) +
index 64c96c7..6d7362e 100644 (file)
@@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
                  const char *buf, size_t count)
 {
        struct kernfs_node *kn;
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       /*
+        * We need to try to get module, avoiding the module been removed
+        * during delete.
+        */
+       if (scsi_device_get(sdev))
+               return -ENODEV;
 
        kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
        WARN_ON_ONCE(!kn);
@@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
         * state into SDEV_DEL.
         */
        device_remove_file(dev, attr);
-       scsi_remove_device(to_scsi_device(dev));
+       scsi_remove_device(sdev);
        if (kn)
                sysfs_unbreak_active_protection(kn);
+       scsi_device_put(sdev);
        return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
index 03163ac..ebb4016 100644 (file)
@@ -1166,11 +1166,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
        sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
        sector_t threshold;
        unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
-       bool dif, dix;
        unsigned int mask = logical_to_sectors(sdp, 1) - 1;
        bool write = rq_data_dir(rq) == WRITE;
        unsigned char protect, fua;
        blk_status_t ret;
+       unsigned int dif;
+       bool dix;
 
        ret = scsi_init_io(cmd);
        if (ret != BLK_STS_OK)
index de4019d..1efc69e 100644 (file)
@@ -263,25 +263,16 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
        int result = cmd->result;
        struct request *rq = cmd->request;
 
-       switch (req_op(rq)) {
-       case REQ_OP_ZONE_RESET:
-       case REQ_OP_ZONE_RESET_ALL:
-
-               if (result &&
-                   sshdr->sense_key == ILLEGAL_REQUEST &&
-                   sshdr->asc == 0x24)
-                       /*
-                        * INVALID FIELD IN CDB error: reset of a conventional
-                        * zone was attempted. Nothing to worry about, so be
-                        * quiet about the error.
-                        */
-                       rq->rq_flags |= RQF_QUIET;
-               break;
-
-       case REQ_OP_WRITE:
-       case REQ_OP_WRITE_ZEROES:
-       case REQ_OP_WRITE_SAME:
-               break;
+       if (req_op(rq) == REQ_OP_ZONE_RESET &&
+           result &&
+           sshdr->sense_key == ILLEGAL_REQUEST &&
+           sshdr->asc == 0x24) {
+               /*
+                * INVALID FIELD IN CDB error: reset of a conventional
+                * zone was attempted. Nothing to worry about, so be
+                * quiet about the error.
+                */
+               rq->rq_flags |= RQF_QUIET;
        }
 }
 
index aef4881..a85d52b 100644 (file)
@@ -66,10 +66,8 @@ static int snirm710_probe(struct platform_device *dev)
 
        base = res->start;
        hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
-       if (!hostdata) {
-               dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+       if (!hostdata)
                return -ENOMEM;
-       }
 
        hostdata->dev = &dev->dev;
        dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
index a9344eb..dc2f6d2 100644 (file)
@@ -98,6 +98,8 @@ static int ufs_bsg_request(struct bsg_job *job)
 
        bsg_reply->reply_payload_rcv_len = 0;
 
+       pm_runtime_get_sync(hba->dev);
+
        msgcode = bsg_request->msgcode;
        switch (msgcode) {
        case UPIU_TRANSACTION_QUERY_REQ:
@@ -135,6 +137,8 @@ static int ufs_bsg_request(struct bsg_job *job)
                break;
        }
 
+       pm_runtime_put_sync(hba->dev);
+
        if (!desc_buff)
                goto out;
 
index d9231bd..98b9d9a 100644 (file)
@@ -249,13 +249,13 @@ static struct genpd_power_state imx6_pm_domain_pu_state = {
 };
 
 static struct imx_pm_domain imx_gpc_domains[] = {
-       [GPC_PGC_DOMAIN_ARM] {
+       [GPC_PGC_DOMAIN_ARM] {
                .base = {
                        .name = "ARM",
                        .flags = GENPD_FLAG_ALWAYS_ON,
                },
        },
-       [GPC_PGC_DOMAIN_PU] {
+       [GPC_PGC_DOMAIN_PU] {
                .base = {
                        .name = "PU",
                        .power_off = imx6_pm_domain_power_off,
@@ -266,7 +266,7 @@ static struct imx_pm_domain imx_gpc_domains[] = {
                .reg_offs = 0x260,
                .cntr_pdn_bit = 0,
        },
-       [GPC_PGC_DOMAIN_DISPLAY] {
+       [GPC_PGC_DOMAIN_DISPLAY] {
                .base = {
                        .name = "DISPLAY",
                        .power_off = imx6_pm_domain_power_off,
@@ -275,7 +275,7 @@ static struct imx_pm_domain imx_gpc_domains[] = {
                .reg_offs = 0x240,
                .cntr_pdn_bit = 4,
        },
-       [GPC_PGC_DOMAIN_PCI] {
+       [GPC_PGC_DOMAIN_PCI] {
                .base = {
                        .name = "PCI",
                        .power_off = imx6_pm_domain_power_off,
index 50831eb..c68882e 100644 (file)
@@ -46,7 +46,7 @@ static ssize_t soc_uid_show(struct device *dev,
        hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
        hdr->size = 1;
 
-       ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+       ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
        if (ret) {
                pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
                return ret;
index f518273..c8c80df 100644 (file)
@@ -5,6 +5,7 @@
 
 menuconfig SOUNDWIRE
        tristate "SoundWire support"
+       depends on ACPI || OF
        help
          SoundWire is a 2-Pin interface with data and clock line ratified
          by the MIPI Alliance. SoundWire is used for transporting data
index f1e38a2..13c54ea 100644 (file)
@@ -900,7 +900,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
        /* Create PCM DAIs */
        stream = &cdns->pcm;
 
-       ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, stream->num_in,
+       ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
                               off, stream->num_ch_in, true);
        if (ret)
                return ret;
@@ -931,7 +931,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
        if (ret)
                return ret;
 
-       off += cdns->pdm.num_bd;
+       off += cdns->pdm.num_out;
        ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
                               off, stream->num_ch_bd, false);
        if (ret)
index 48a63ca..6473fa6 100644 (file)
@@ -128,7 +128,8 @@ int sdw_of_find_slaves(struct sdw_bus *bus)
        struct device_node *node;
 
        for_each_child_of_node(bus->dev->of_node, node) {
-               int link_id, sdw_version, ret, len;
+               int link_id, ret, len;
+               unsigned int sdw_version;
                const char *compat = NULL;
                struct sdw_slave_id id;
                const __be32 *addr;
index eee1998..fac38c8 100644 (file)
@@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
        /* Set the encryption - we only support wep */
        if (is_wep) {
                if (sme->key) {
-                       if (sme->key_idx >= NUM_WEPKEYS) {
-                               err = -EINVAL;
-                               goto exit;
-                       }
+                       if (sme->key_idx >= NUM_WEPKEYS)
+                               return -EINVAL;
 
                        result = prism2_domibset_uint32(wlandev,
                                DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
index c70caf4..a2b5c79 100644 (file)
@@ -1831,7 +1831,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
 
        while (credits) {
                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
-               const u32 csum = (__force u32)p->csum;
+               u32 csum;
 
                if (unlikely(!p)) {
                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
@@ -1840,6 +1840,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
                        break;
                }
 
+               csum = (__force u32)p->csum;
                if (unlikely(credits < csum)) {
                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
                                csk,  csk->tid,
index 04bf2ac..2d19f0e 100644 (file)
@@ -1075,27 +1075,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
        unsigned int size;
 
        /*
-        * Clear a lun set in the cdb if the initiator talking to use spoke
-        * and old standards version, as we can't assume the underlying device
-        * won't choke up on it.
-        */
-       switch (cdb[0]) {
-       case READ_10: /* SBC - RDProtect */
-       case READ_12: /* SBC - RDProtect */
-       case READ_16: /* SBC - RDProtect */
-       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
-       case VERIFY: /* SBC - VRProtect */
-       case VERIFY_16: /* SBC - VRProtect */
-       case WRITE_VERIFY: /* SBC - VRProtect */
-       case WRITE_VERIFY_12: /* SBC - VRProtect */
-       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
-               break;
-       default:
-               cdb[1] &= 0x1f; /* clear logical unit number */
-               break;
-       }
-
-       /*
         * For REPORT LUNS we always need to emulate the response, for everything
         * else, pass it up.
         */
index 391f397..6b9865c 100644 (file)
@@ -88,7 +88,7 @@ struct cpufreq_cooling_device {
        struct cpufreq_policy *policy;
        struct list_head node;
        struct time_in_idle *idle_time;
-       struct dev_pm_qos_request qos_req;
+       struct freq_qos_request qos_req;
 };
 
 static DEFINE_IDA(cpufreq_ida);
@@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        cpufreq_cdev->cpufreq_state = state;
 
-       return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+       return freq_qos_update_request(&cpufreq_cdev->qos_req,
                                cpufreq_cdev->freq_table[state].frequency);
 }
 
@@ -615,9 +615,9 @@ __cpufreq_cooling_register(struct device_node *np,
                cooling_ops = &cpufreq_cooling_ops;
        }
 
-       ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
-                                    DEV_PM_QOS_MAX_FREQUENCY,
-                                    cpufreq_cdev->freq_table[0].frequency);
+       ret = freq_qos_add_request(&policy->constraints,
+                                  &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
+                                  cpufreq_cdev->freq_table[0].frequency);
        if (ret < 0) {
                pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
                       ret);
@@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np,
        return cdev;
 
 remove_qos_req:
-       dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+       freq_qos_remove_request(&cpufreq_cdev->qos_req);
 remove_ida:
        ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 free_table:
@@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
        mutex_unlock(&cooling_list_lock);
 
        thermal_cooling_device_unregister(cdev);
-       dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+       freq_qos_remove_request(&cpufreq_cdev->qos_req);
        ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
        kfree(cpufreq_cdev->idle_time);
        kfree(cpufreq_cdev->freq_table);
index 61cd09c..6795851 100644 (file)
@@ -80,7 +80,6 @@ static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd c
 {
        u32 data;
 
-       pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
        data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
        pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
 }
index 410bf1b..5ea8db6 100644 (file)
@@ -896,12 +896,13 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
  */
 bool tb_dp_port_is_enabled(struct tb_port *port)
 {
-       u32 data;
+       u32 data[2];
 
-       if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+       if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+                        ARRAY_SIZE(data)))
                return false;
 
-       return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+       return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
 }
 
 /**
@@ -914,19 +915,21 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
  */
 int tb_dp_port_enable(struct tb_port *port, bool enable)
 {
-       u32 data;
+       u32 data[2];
        int ret;
 
-       ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1);
+       ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+                          ARRAY_SIZE(data));
        if (ret)
                return ret;
 
        if (enable)
-               data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+               data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
        else
-               data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+               data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
 
-       return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1);
+       return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
+                            ARRAY_SIZE(data));
 }
 
 /* switch utility functions */
@@ -1031,13 +1034,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
        if (sw->authorized)
                goto unlock;
 
-       /*
-        * Make sure there is no PCIe rescan ongoing when a new PCIe
-        * tunnel is created. Otherwise the PCIe rescan code might find
-        * the new tunnel too early.
-        */
-       pci_lock_rescan_remove();
-
        switch (val) {
        /* Approve switch */
        case 1:
@@ -1057,8 +1053,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
                break;
        }
 
-       pci_unlock_rescan_remove();
-
        if (!ret) {
                sw->authorized = val;
                /* Notify status change to the userspace */
index 02c5aff..8df89e9 100644 (file)
@@ -72,8 +72,8 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
 {
        struct serial_8250_men_mcb_data *data;
        struct resource *mem;
-       unsigned int num_ports;
-       unsigned int i;
+       int num_ports;
+       int i;
        void __iomem *membase;
 
        mem = mcb_get_resource(mdev, IORESOURCE_MEM);
@@ -88,7 +88,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
        dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
                mdev->id, num_ports);
 
-       if (num_ports == 0 || num_ports > 4) {
+       if (num_ports <= 0 || num_ports > 4) {
                dev_err(&mdev->dev, "unexpected number of ports: %u\n",
                        num_ports);
                return -ENODEV;
@@ -133,7 +133,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
 
 static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
 {
-       unsigned int num_ports, i;
+       int num_ports, i;
        struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
 
        if (!data)
index 1109dc5..c2123ef 100644 (file)
@@ -166,7 +166,6 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
                goto err;
 
        switch (cdns->dr_mode) {
-       case USB_DR_MODE_UNKNOWN:
        case USB_DR_MODE_OTG:
                ret = cdns3_hw_role_switch(cdns);
                if (ret)
@@ -182,6 +181,9 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
                if (ret)
                        goto err;
                break;
+       default:
+               ret = -EINVAL;
+               goto err;
        }
 
        return ret;
index 2ca280f..4c1e755 100644 (file)
@@ -1145,6 +1145,14 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
                request = cdns3_next_request(&priv_ep->pending_req_list);
                priv_req = to_cdns3_request(request);
 
+               trb = priv_ep->trb_pool + priv_ep->dequeue;
+
+               /* Request was dequeued and TRB was changed to TRB_LINK. */
+               if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
+                       trace_cdns3_complete_trb(priv_ep, trb);
+                       cdns3_move_deq_to_next_trb(priv_req);
+               }
+
                /* Re-select endpoint. It could be changed by other CPU during
                 * handling usb_gadget_giveback_request.
                 */
@@ -2067,6 +2075,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        struct usb_request *req, *req_temp;
        struct cdns3_request *priv_req;
        struct cdns3_trb *link_trb;
+       u8 req_on_hw_ring = 0;
        unsigned long flags;
        int ret = 0;
 
@@ -2083,8 +2092,10 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
 
        list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
                                 list) {
-               if (request == req)
+               if (request == req) {
+                       req_on_hw_ring = 1;
                        goto found;
+               }
        }
 
        list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
@@ -2096,27 +2107,21 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        goto not_found;
 
 found:
-
-       if (priv_ep->wa1_trb == priv_req->trb)
-               cdns3_wa1_restore_cycle_bit(priv_ep);
-
        link_trb = priv_req->trb;
-       cdns3_move_deq_to_next_trb(priv_req);
-       cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
-
-       /* Update ring */
-       request = cdns3_next_request(&priv_ep->deferred_req_list);
-       if (request) {
-               priv_req = to_cdns3_request(request);
 
+       /* Update ring only if removed request is on pending_req_list list */
+       if (req_on_hw_ring) {
                link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
                                              (priv_req->start_trb * TRB_SIZE));
                link_trb->control = (link_trb->control & TRB_CYCLE) |
-                                   TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
-       } else {
-               priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
+                                   TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+
+               if (priv_ep->wa1_trb == priv_req->trb)
+                       cdns3_wa1_restore_cycle_bit(priv_ep);
        }
 
+       cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+
 not_found:
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return ret;
@@ -2324,8 +2329,6 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev)
        writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
 
        cdns3_configure_dmult(priv_dev, NULL);
-
-       cdns3_gadget_pullup(&priv_dev->gadget, 1);
 }
 
 /**
@@ -2340,9 +2343,35 @@ static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
 {
        struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
        unsigned long flags;
+       enum usb_device_speed max_speed = driver->max_speed;
 
        spin_lock_irqsave(&priv_dev->lock, flags);
        priv_dev->gadget_driver = driver;
+
+       /* limit speed if necessary */
+       max_speed = min(driver->max_speed, gadget->max_speed);
+
+       switch (max_speed) {
+       case USB_SPEED_FULL:
+               writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+               break;
+       case USB_SPEED_HIGH:
+               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+               break;
+       case USB_SPEED_SUPER:
+               break;
+       default:
+               dev_err(priv_dev->dev,
+                       "invalid maximum_speed parameter %d\n",
+                       max_speed);
+               /* fall through */
+       case USB_SPEED_UNKNOWN:
+               /* default to superspeed */
+               max_speed = USB_SPEED_SUPER;
+               break;
+       }
+
        cdns3_gadget_config(priv_dev);
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return 0;
@@ -2376,6 +2405,8 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
                writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
                readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
                                          !(val & EP_CMD_EPRST), 1, 100);
+
+               priv_ep->flags &= ~EP_CLAIMED;
        }
 
        /* disable interrupt for device */
@@ -2570,12 +2601,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
        /* Check the maximum_speed parameter */
        switch (max_speed) {
        case USB_SPEED_FULL:
-               writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
-               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
-               break;
        case USB_SPEED_HIGH:
-               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
-               break;
        case USB_SPEED_SUPER:
                break;
        default:
@@ -2708,8 +2734,6 @@ static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
        /* disable interrupt for device */
        writel(0, &priv_dev->regs->usb_ien);
 
-       cdns3_gadget_pullup(&priv_dev->gadget, 0);
-
        return 0;
 }
 
index b498a17..ae11810 100644 (file)
@@ -12,7 +12,6 @@
 #ifdef CONFIG_USB_CDNS3_HOST
 
 int cdns3_host_init(struct cdns3 *cdns);
-void cdns3_host_exit(struct cdns3 *cdns);
 
 #else
 
index 2733a8f..ad788bf 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/platform_device.h>
 #include "core.h"
 #include "drd.h"
+#include "host-export.h"
 
 static int __cdns3_host_init(struct cdns3 *cdns)
 {
index fb8bd60..0d8e3f3 100644 (file)
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
        kfree(usblp->readbuf);
        kfree(usblp->device_id_string);
        kfree(usblp->statusbuf);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 }
 
@@ -1113,7 +1114,7 @@ static int usblp_probe(struct usb_interface *intf,
        init_waitqueue_head(&usblp->wwait);
        init_usb_anchor(&usblp->urbs);
        usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-       usblp->intf = intf;
+       usblp->intf = usb_get_intf(intf);
 
        /* Malloc device ID string buffer to the largest expected length,
         * since we can re-query it on an ioctl and a dynamic string
@@ -1198,6 +1199,7 @@ abort:
        kfree(usblp->readbuf);
        kfree(usblp->statusbuf);
        kfree(usblp->device_id_string);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 abort_ret:
        return retval;
index 151a74a..1ac1095 100644 (file)
@@ -348,6 +348,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 
        /* Validate the wMaxPacketSize field */
        maxp = usb_endpoint_maxp(&endpoint->desc);
+       if (maxp == 0) {
+               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+                   cfgno, inum, asnum, d->bEndpointAddress);
+               goto skip_to_next_endpoint_or_interface_descriptor;
+       }
 
        /* Find the highest legal maxpacket size for this endpoint */
        i = 0;          /* additional transactions per microframe */
index 89abc60..556a876 100644 (file)
@@ -102,6 +102,7 @@ config USB_DWC3_MESON_G12A
        depends on ARCH_MESON || COMPILE_TEST
        default USB_DWC3
        select USB_ROLE_SWITCH
+       select REGMAP_MMIO
        help
          Support USB2/3 functionality in Amlogic G12A platforms.
         Say 'Y' or 'M' if you have one such device.
index 999ce5e..97d6ae3 100644 (file)
@@ -312,8 +312,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
 
        reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
        dft = reg & DWC3_GFLADJ_30MHZ_MASK;
-       if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
-           "request value same as default, ignoring\n")) {
+       if (dft != dwc->fladj) {
                reg &= ~DWC3_GFLADJ_30MHZ_MASK;
                reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
                dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
index 5e8e182..023f035 100644 (file)
@@ -258,7 +258,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
 
        ret = platform_device_add_properties(dwc->dwc3, p);
        if (ret < 0)
-               return ret;
+               goto err;
 
        ret = dwc3_pci_quirks(dwc);
        if (ret)
index 86dc1db..a9aba71 100644 (file)
@@ -707,6 +707,12 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 
                dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
        }
+
+       while (!list_empty(&dep->cancelled_list)) {
+               req = next_request(&dep->cancelled_list);
+
+               dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+       }
 }
 
 /**
index d516e8d..5ec54b6 100644 (file)
@@ -2170,14 +2170,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
                        usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
 
                kfree(cdev->os_desc_req->buf);
+               cdev->os_desc_req->buf = NULL;
                usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
+               cdev->os_desc_req = NULL;
        }
        if (cdev->req) {
                if (cdev->setup_pending)
                        usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
 
                kfree(cdev->req->buf);
+               cdev->req->buf = NULL;
                usb_ep_free_request(cdev->gadget->ep0, cdev->req);
+               cdev->req = NULL;
        }
        cdev->next_string_id = 0;
        device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
index 0251299..33852c2 100644 (file)
@@ -61,6 +61,8 @@ struct gadget_info {
        bool use_os_desc;
        char b_vendor_code;
        char qw_sign[OS_STRING_QW_SIGN_LEN];
+       spinlock_t spinlock;
+       bool unbind;
 };
 
 static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -1244,6 +1246,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
        int                             ret;
 
        /* the gi->lock is hold by the caller */
+       gi->unbind = 0;
        cdev->gadget = gadget;
        set_gadget_data(gadget, cdev);
        ret = composite_dev_prepare(composite, cdev);
@@ -1376,31 +1379,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
 {
        struct usb_composite_dev        *cdev;
        struct gadget_info              *gi;
+       unsigned long flags;
 
        /* the gi->lock is hold by the caller */
 
        cdev = get_gadget_data(gadget);
        gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       gi->unbind = 1;
+       spin_unlock_irqrestore(&gi->spinlock, flags);
 
        kfree(otg_desc[0]);
        otg_desc[0] = NULL;
        purge_configs_funcs(gi);
        composite_dev_cleanup(cdev);
        usb_ep_autoconfig_reset(cdev->gadget);
+       spin_lock_irqsave(&gi->spinlock, flags);
        cdev->gadget = NULL;
        set_gadget_data(gadget, NULL);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static int configfs_composite_setup(struct usb_gadget *gadget,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+       int ret;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return 0;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return 0;
+       }
+
+       ret = composite_setup(gadget, ctrl);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+       return ret;
+}
+
+static void configfs_composite_disconnect(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return;
+       }
+
+       composite_disconnect(gadget);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_suspend(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return;
+       }
+
+       composite_suspend(gadget);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_resume(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return;
+       }
+
+       composite_resume(gadget);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
 }
 
 static const struct usb_gadget_driver configfs_driver_template = {
        .bind           = configfs_composite_bind,
        .unbind         = configfs_composite_unbind,
 
-       .setup          = composite_setup,
-       .reset          = composite_disconnect,
-       .disconnect     = composite_disconnect,
+       .setup          = configfs_composite_setup,
+       .reset          = configfs_composite_disconnect,
+       .disconnect     = configfs_composite_disconnect,
 
-       .suspend        = composite_suspend,
-       .resume         = composite_resume,
+       .suspend        = configfs_composite_suspend,
+       .resume         = configfs_composite_resume,
 
        .max_speed      = USB_SPEED_SUPER,
        .driver = {
index 86ffc83..1d0d895 100644 (file)
@@ -449,9 +449,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req)
                next_fifo_transaction(ep, req);
                if (req->last_transaction) {
                        usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
-                       usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+                       if (ep_is_control(ep))
+                               usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
                } else {
-                       usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+                       if (ep_is_control(ep))
+                               usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
                        usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
                }
        }
index 92af8dc..51fa614 100644 (file)
@@ -98,6 +98,17 @@ int usb_ep_enable(struct usb_ep *ep)
        if (ep->enabled)
                goto out;
 
+       /* UDC drivers can't handle endpoints with maxpacket size 0 */
+       if (usb_endpoint_maxp(ep->desc) == 0) {
+               /*
+                * We should log an error message here, but we can't call
+                * dev_err() because there's no way to find the gadget
+                * given only ep.
+                */
+               ret = -EINVAL;
+               goto out;
+       }
+
        ret = ep->ops->enable(ep, ep->desc);
        if (ret)
                goto out;
index 20141c3..9a05863 100644 (file)
@@ -2576,7 +2576,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
        dma_pool_destroy(udc_controller->td_pool);
        free_irq(udc_controller->irq, udc_controller);
        iounmap(dr_regs);
-       if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+       if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
                release_mem_region(res->start, resource_size(res));
 
        /* free udc --wait for the release() finished */
index 2b1f3cc..bf6c81e 100644 (file)
@@ -1177,11 +1177,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
                        tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
 
                        bl = bytes - n;
-                       if (bl > 3)
-                               bl = 3;
+                       if (bl > 4)
+                               bl = 4;
 
                        for (i = 0; i < bl; i++)
-                               data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+                               data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
                }
                break;
 
index e098f16..3370314 100644 (file)
@@ -1544,10 +1544,10 @@ static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr)
 static bool usb3_std_req_set_address(struct renesas_usb3 *usb3,
                                     struct usb_ctrlrequest *ctrl)
 {
-       if (ctrl->wValue >= 128)
+       if (le16_to_cpu(ctrl->wValue) >= 128)
                return true;    /* stall */
 
-       usb3_set_device_address(usb3, ctrl->wValue);
+       usb3_set_device_address(usb3, le16_to_cpu(ctrl->wValue));
        usb3_set_p0_con_for_no_data(usb3);
 
        return false;
@@ -1582,6 +1582,7 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
        struct renesas_usb3_ep *usb3_ep;
        int num;
        u16 status = 0;
+       __le16 tx_data;
 
        switch (ctrl->bRequestType & USB_RECIP_MASK) {
        case USB_RECIP_DEVICE:
@@ -1604,10 +1605,10 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
        }
 
        if (!stall) {
-               status = cpu_to_le16(status);
+               tx_data = cpu_to_le16(status);
                dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n",
                        usb_req_to_usb3_req(usb3->ep0_req));
-               usb3_pipe0_internal_xfer(usb3, &status, sizeof(status),
+               usb3_pipe0_internal_xfer(usb3, &tx_data, sizeof(tx_data),
                                         usb3_pipe0_get_status_completion);
        }
 
@@ -1772,7 +1773,7 @@ static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3,
 static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3,
                                           struct usb_ctrlrequest *ctrl)
 {
-       if (ctrl->wValue > 0)
+       if (le16_to_cpu(ctrl->wValue) > 0)
                usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
        else
                usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
index 7ba6afc..76c3f29 100644 (file)
@@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s,
                trb = &seg->trbs[i];
                dma = seg->dma + i * sizeof(*trb);
                seq_printf(s, "%pad: %s\n", &dma,
-                          xhci_decode_trb(trb->generic.field[0],
-                                          trb->generic.field[1],
-                                          trb->generic.field[2],
-                                          trb->generic.field[3]));
+                          xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
+                                          le32_to_cpu(trb->generic.field[1]),
+                                          le32_to_cpu(trb->generic.field[2]),
+                                          le32_to_cpu(trb->generic.field[3])));
        }
 }
 
@@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
        xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
        slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
        seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
-                  xhci_decode_slot_context(slot_ctx->dev_info,
-                                           slot_ctx->dev_info2,
-                                           slot_ctx->tt_info,
-                                           slot_ctx->dev_state));
+                  xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info),
+                                           le32_to_cpu(slot_ctx->dev_info2),
+                                           le32_to_cpu(slot_ctx->tt_info),
+                                           le32_to_cpu(slot_ctx->dev_state)));
 
        return 0;
 }
@@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
                ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
                dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
                seq_printf(s, "%pad: %s\n", &dma,
-                          xhci_decode_ep_context(ep_ctx->ep_info,
-                                                 ep_ctx->ep_info2,
-                                                 ep_ctx->deq,
-                                                 ep_ctx->tx_info));
+                          xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
+                                                 le32_to_cpu(ep_ctx->ep_info2),
+                                                 le64_to_cpu(ep_ctx->deq),
+                                                 le32_to_cpu(ep_ctx->tx_info)));
        }
 
        return 0;
index 85ceb43..e7aab31 100644 (file)
@@ -3330,6 +3330,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        if (xhci_urb_suitable_for_idt(urb)) {
                                memcpy(&send_addr, urb->transfer_buffer,
                                       trb_buff_len);
+                               le64_to_cpus(&send_addr);
                                field |= TRB_IDT;
                        }
                }
@@ -3475,6 +3476,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                if (xhci_urb_suitable_for_idt(urb)) {
                        memcpy(&addr, urb->transfer_buffer,
                               urb->transfer_buffer_length);
+                       le64_to_cpus(&addr);
                        field |= TRB_IDT;
                } else {
                        addr = (u64) urb->transfer_dma;
index 517ec32..6c17e3f 100644 (file)
@@ -3071,6 +3071,48 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
        }
 }
 
+static void xhci_endpoint_disable(struct usb_hcd *hcd,
+                                 struct usb_host_endpoint *host_ep)
+{
+       struct xhci_hcd         *xhci;
+       struct xhci_virt_device *vdev;
+       struct xhci_virt_ep     *ep;
+       struct usb_device       *udev;
+       unsigned long           flags;
+       unsigned int            ep_index;
+
+       xhci = hcd_to_xhci(hcd);
+rescan:
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       udev = (struct usb_device *)host_ep->hcpriv;
+       if (!udev || !udev->slot_id)
+               goto done;
+
+       vdev = xhci->devs[udev->slot_id];
+       if (!vdev)
+               goto done;
+
+       ep_index = xhci_get_endpoint_index(&host_ep->desc);
+       ep = &vdev->eps[ep_index];
+       if (!ep)
+               goto done;
+
+       /* wait for hub_tt_work to finish clearing hub TT */
+       if (ep->ep_state & EP_CLEARING_TT) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               schedule_timeout_uninterruptible(1);
+               goto rescan;
+       }
+
+       if (ep->ep_state)
+               xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
+                        ep->ep_state);
+done:
+       host_ep->hcpriv = NULL;
+       spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
 /*
  * Called after usb core issues a clear halt control message.
  * The host side of the halt should already be cleared by a reset endpoint
@@ -5238,20 +5280,13 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
        unsigned int ep_index;
        unsigned long flags;
 
-       /*
-        * udev might be NULL if tt buffer is cleared during a failed device
-        * enumeration due to a halted control endpoint. Usb core might
-        * have allocated a new udev for the next enumeration attempt.
-        */
-
        xhci = hcd_to_xhci(hcd);
+
+       spin_lock_irqsave(&xhci->lock, flags);
        udev = (struct usb_device *)ep->hcpriv;
-       if (!udev)
-               return;
        slot_id = udev->slot_id;
        ep_index = xhci_get_endpoint_index(&ep->desc);
 
-       spin_lock_irqsave(&xhci->lock, flags);
        xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
        xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        spin_unlock_irqrestore(&xhci->lock, flags);
@@ -5288,6 +5323,7 @@ static const struct hc_driver xhci_hc_driver = {
        .free_streams =         xhci_free_streams,
        .add_endpoint =         xhci_add_endpoint,
        .drop_endpoint =        xhci_drop_endpoint,
+       .endpoint_disable =     xhci_endpoint_disable,
        .endpoint_reset =       xhci_endpoint_reset,
        .check_bandwidth =      xhci_check_bandwidth,
        .reset_bandwidth =      xhci_reset_bandwidth,
index f3108d8..8f86b4e 100644 (file)
@@ -380,10 +380,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
                goto exit;
        }
 
-       if (mutex_lock_interruptible(&dev->mutex)) {
-               retval = -ERESTARTSYS;
-               goto exit;
-       }
+       mutex_lock(&dev->mutex);
 
        if (dev->open_count != 1) {
                retval = -ENODEV;
@@ -467,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 
        /* wait for data */
        spin_lock_irq(&dev->rbsl);
-       if (dev->ring_head == dev->ring_tail) {
+       while (dev->ring_head == dev->ring_tail) {
                dev->interrupt_in_done = 0;
                spin_unlock_irq(&dev->rbsl);
                if (file->f_flags & O_NONBLOCK) {
@@ -477,15 +474,20 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
                retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
                if (retval < 0)
                        goto unlock_exit;
-       } else {
-               spin_unlock_irq(&dev->rbsl);
+
+               spin_lock_irq(&dev->rbsl);
        }
+       spin_unlock_irq(&dev->rbsl);
 
        /* actual_buffer contains actual_length + interrupt_in_buffer */
        actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+       if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+               retval = -EIO;
+               goto unlock_exit;
+       }
        bytes_to_read = min(count, *actual_buffer);
        if (bytes_to_read < *actual_buffer)
-               dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
+               dev_warn(&dev->intf->dev, "Read buffer overflow, %zu bytes dropped\n",
                         *actual_buffer-bytes_to_read);
 
        /* copy one interrupt_in_buffer from ring_buffer into userspace */
@@ -493,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
                retval = -EFAULT;
                goto unlock_exit;
        }
-       dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
-
        retval = bytes_to_read;
 
        spin_lock_irq(&dev->rbsl);
+       dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
+
        if (dev->buffer_overflow) {
                dev->buffer_overflow = 0;
                spin_unlock_irq(&dev->rbsl);
@@ -560,8 +562,9 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
        /* write the data into interrupt_out_buffer from userspace */
        bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
        if (bytes_to_write < count)
-               dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n", count-bytes_to_write);
-       dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
+               dev_warn(&dev->intf->dev, "Write buffer overflow, %zu bytes dropped\n",
+                       count - bytes_to_write);
+       dev_dbg(&dev->intf->dev, "%s: count = %zu, bytes_to_write = %zu\n",
                __func__, count, bytes_to_write);
 
        if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
@@ -578,7 +581,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
                                         1 << 8, 0,
                                         dev->interrupt_out_buffer,
                                         bytes_to_write,
-                                        USB_CTRL_SET_TIMEOUT * HZ);
+                                        USB_CTRL_SET_TIMEOUT);
                if (retval < 0)
                        dev_err(&dev->intf->dev,
                                "Couldn't submit HID_REQ_SET_REPORT %d\n",
@@ -693,10 +696,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
                dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
 
        dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
-       dev->ring_buffer =
-               kmalloc_array(ring_buffer_size,
-                             sizeof(size_t) + dev->interrupt_in_endpoint_size,
-                             GFP_KERNEL);
+       dev->ring_buffer = kcalloc(ring_buffer_size,
+                       sizeof(size_t) + dev->interrupt_in_endpoint_size,
+                       GFP_KERNEL);
        if (!dev->ring_buffer)
                goto error;
        dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
index 9d4c52a..23061f1 100644 (file)
@@ -419,10 +419,7 @@ static int tower_release (struct inode *inode, struct file *file)
                goto exit;
        }
 
-       if (mutex_lock_interruptible(&dev->lock)) {
-               retval = -ERESTARTSYS;
-               goto exit;
-       }
+       mutex_lock(&dev->lock);
 
        if (dev->open_count != 1) {
                dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
@@ -881,7 +878,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
                                  get_version_reply,
                                  sizeof(*get_version_reply),
                                  1000);
-       if (result < sizeof(*get_version_reply)) {
+       if (result != sizeof(*get_version_reply)) {
                if (result >= 0)
                        result = -EIO;
                dev_err(idev, "get version request failed: %d\n", result);
index c3d5c12..9dd0216 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 
 #include "mtu3.h"
+#include "mtu3_dr.h"
 #include "mtu3_debug.h"
 #include "mtu3_trace.h"
 
index 4c3de77..a3c30b6 100644 (file)
@@ -162,17 +162,17 @@ void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
        req->bRequest           = (val >> 8) & 0xFF;
        req->bRequestType       = (val >> 0) & 0xFF;
 
-       req->wValue     = usbhs_read(priv, USBVAL);
-       req->wIndex     = usbhs_read(priv, USBINDX);
-       req->wLength    = usbhs_read(priv, USBLENG);
+       req->wValue     = cpu_to_le16(usbhs_read(priv, USBVAL));
+       req->wIndex     = cpu_to_le16(usbhs_read(priv, USBINDX));
+       req->wLength    = cpu_to_le16(usbhs_read(priv, USBLENG));
 }
 
 void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
 {
        usbhs_write(priv, USBREQ,  (req->bRequest << 8) | req->bRequestType);
-       usbhs_write(priv, USBVAL,  req->wValue);
-       usbhs_write(priv, USBINDX, req->wIndex);
-       usbhs_write(priv, USBLENG, req->wLength);
+       usbhs_write(priv, USBVAL,  le16_to_cpu(req->wValue));
+       usbhs_write(priv, USBINDX, le16_to_cpu(req->wIndex));
+       usbhs_write(priv, USBLENG, le16_to_cpu(req->wLength));
 
        usbhs_bset(priv, DCPCTR, SUREQ, SUREQ);
 }
index e5ef569..cd38d74 100644 (file)
@@ -265,7 +265,7 @@ static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
        case USB_DEVICE_TEST_MODE:
                usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
                udelay(100);
-               usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8));
+               usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex) >> 8);
                break;
        default:
                usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
@@ -315,7 +315,7 @@ static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
        struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
        struct device *dev = usbhsg_gpriv_to_dev(gpriv);
        struct usb_request *req;
-       unsigned short *buf;
+       __le16 *buf;
 
        /* alloc new usb_request for recip */
        req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
index dd0ad67..ef23acc 100644 (file)
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
        struct ti_port *tport;
        int port_number;
        int status;
-       int do_unlock;
        unsigned long flags;
 
        tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
                        "%s - cannot send close port command, %d\n"
                                                        , __func__, status);
 
-       /* if mutex_lock is interrupted, continue anyway */
-       do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
-       --tport->tp_tdev->td_open_port_count;
-       if (tport->tp_tdev->td_open_port_count <= 0) {
+       mutex_lock(&tdev->td_open_close_lock);
+       --tdev->td_open_port_count;
+       if (tdev->td_open_port_count == 0) {
                /* last port is closed, shut down interrupt urb */
                usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
-               tport->tp_tdev->td_open_port_count = 0;
        }
-       if (do_unlock)
-               mutex_unlock(&tdev->td_open_close_lock);
+       mutex_unlock(&tdev->td_open_close_lock);
 }
 
 
index 79314d8..ca3bd58 100644 (file)
@@ -559,6 +559,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
 
        command_port = port->serial->port[COMMAND_PORT];
        command_info = usb_get_serial_port_data(command_port);
+
+       if (command_port->bulk_out_size < datasize + 1)
+               return -EIO;
+
        mutex_lock(&command_info->mutex);
        command_info->command_finished = false;
 
@@ -632,6 +636,7 @@ static void firm_setup_port(struct tty_struct *tty)
        struct device *dev = &port->dev;
        struct whiteheat_port_settings port_settings;
        unsigned int cflag = tty->termios.c_cflag;
+       speed_t baud;
 
        port_settings.port = port->port_number + 1;
 
@@ -692,11 +697,13 @@ static void firm_setup_port(struct tty_struct *tty)
        dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
 
        /* get the baud rate wanted */
-       port_settings.baud = tty_get_baud_rate(tty);
-       dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
+       baud = tty_get_baud_rate(tty);
+       port_settings.baud = cpu_to_le32(baud);
+       dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
 
        /* fixme: should set validated settings */
-       tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
+       tty_encode_baud_rate(tty, baud, baud);
+
        /* handle any settings that aren't specified in the tty structure */
        port_settings.lloop = 0;
 
index 0039814..269e727 100644 (file)
@@ -87,7 +87,7 @@ struct whiteheat_simple {
 
 struct whiteheat_port_settings {
        __u8    port;           /* port number (1 to N) */
-       __u32   baud;           /* any value 7 - 460800, firmware calculates
+       __le32  baud;           /* any value 7 - 460800, firmware calculates
                                   best fit; arrives little endian */
        __u8    bits;           /* 5, 6, 7, or 8 */
        __u8    stop;           /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */
index 6737fab..54a3c81 100644 (file)
@@ -68,7 +68,6 @@ static const char* host_info(struct Scsi_Host *host)
 static int slave_alloc (struct scsi_device *sdev)
 {
        struct us_data *us = host_to_us(sdev->host);
-       int maxp;
 
        /*
         * Set the INQUIRY transfer length to 36.  We don't use any of
@@ -78,15 +77,6 @@ static int slave_alloc (struct scsi_device *sdev)
        sdev->inquiry_len = 36;
 
        /*
-        * USB has unusual scatter-gather requirements: the length of each
-        * scatterlist element except the last must be divisible by the
-        * Bulk maxpacket value.  Fortunately this value is always a
-        * power of 2.  Inform the block layer about this requirement.
-        */
-       maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
-       blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
-
-       /*
         * Some host controllers may have alignment requirements.
         * We'll play it safe by requiring 512-byte alignment always.
         */
index bf80d6f..3453825 100644 (file)
@@ -789,30 +789,10 @@ static int uas_slave_alloc(struct scsi_device *sdev)
 {
        struct uas_dev_info *devinfo =
                (struct uas_dev_info *)sdev->host->hostdata;
-       int maxp;
 
        sdev->hostdata = devinfo;
 
        /*
-        * We have two requirements here. We must satisfy the requirements
-        * of the physical HC and the demands of the protocol, as we
-        * definitely want no additional memory allocation in this path
-        * ruling out using bounce buffers.
-        *
-        * For a transmission on USB to continue we must never send
-        * a package that is smaller than maxpacket. Hence the length of each
-         * scatterlist element except the last must be divisible by the
-         * Bulk maxpacket value.
-        * If the HC does not ensure that through SG,
-        * the upper layer must do that. We must assume nothing
-        * about the capabilities off the HC, so we use the most
-        * pessimistic requirement.
-        */
-
-       maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
-       blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
-
-       /*
         * The protocol has no requirements on alignment in the strict sense.
         * Controllers may or may not have alignment restrictions.
         * As this is not exported, we use an extremely conservative guess.
index c380378..0ae40a1 100644 (file)
@@ -147,7 +147,10 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
                }
 
                kfree(iov);
+               /* This is only for isochronous case */
                kfree(iso_buffer);
+               iso_buffer = NULL;
+
                usbip_dbg_vhci_tx("send txdata\n");
 
                total_size += txsize;
index 96fddc1..d864277 100644 (file)
@@ -1658,7 +1658,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        struct bus_type *bus = NULL;
        int ret;
        bool resv_msi, msi_remap;
-       phys_addr_t resv_msi_base;
+       phys_addr_t resv_msi_base = 0;
        struct iommu_domain_geometry geo;
        LIST_HEAD(iova_copy);
        LIST_HEAD(group_resv_regions);
index 08ad0d1..a0a2d74 100644 (file)
@@ -852,6 +852,12 @@ static inline int xfer_kern(void *src, void *dst, size_t len)
        return 0;
 }
 
+static inline int kern_xfer(void *dst, void *src, size_t len)
+{
+       memcpy(dst, src, len);
+       return 0;
+}
+
 /**
  * vringh_init_kern - initialize a vringh for a kernelspace vring.
  * @vrh: the vringh to initialize.
@@ -958,7 +964,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
                             const void *src, size_t len)
 {
-       return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
+       return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
 }
 EXPORT_SYMBOL(vringh_iov_push_kern);
 
index e1035a8..45a6d89 100644 (file)
@@ -29,7 +29,7 @@ static inline void _transp(u32 d[], unsigned int i1, unsigned int i2,
 
 extern void c2p_unsupported(void);
 
-static inline u32 get_mask(unsigned int n)
+static __always_inline u32 get_mask(unsigned int n)
 {
        switch (n) {
        case 1:
@@ -57,7 +57,7 @@ static inline u32 get_mask(unsigned int n)
      *  Transpose operations on 8 32-bit words
      */
 
-static inline void transp8(u32 d[], unsigned int n, unsigned int m)
+static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m)
 {
        u32 mask = get_mask(n);
 
@@ -99,7 +99,7 @@ static inline void transp8(u32 d[], unsigned int n, unsigned int m)
      *  Transpose operations on 4 32-bit words
      */
 
-static inline void transp4(u32 d[], unsigned int n, unsigned int m)
+static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m)
 {
        u32 mask = get_mask(n);
 
@@ -126,7 +126,7 @@ static inline void transp4(u32 d[], unsigned int n, unsigned int m)
      *  Transpose operations on 4 32-bit words (reverse order)
      */
 
-static inline void transp4x(u32 d[], unsigned int n, unsigned int m)
+static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m)
 {
        u32 mask = get_mask(n);
 
index bdc0824..a8041e4 100644 (file)
@@ -1499,9 +1499,6 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
                 * counter first before updating event flags.
                 */
                virtio_wmb(vq->weak_barriers);
-       } else {
-               used_idx = vq->last_used_idx;
-               wrap_counter = vq->packed.used_wrap_counter;
        }
 
        if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
@@ -1518,7 +1515,9 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
         */
        virtio_mb(vq->weak_barriers);
 
-       if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
+       if (is_used_desc_packed(vq,
+                               vq->last_used_idx,
+                               vq->packed.used_wrap_counter)) {
                END_USE(vq);
                return false;
        }
index b0152fe..bc60e03 100644 (file)
@@ -288,3 +288,4 @@ module_platform_driver(bd70528_wdt);
 MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
 MODULE_DESCRIPTION("BD70528 watchdog driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-wdt");
index 9393be5..808eeb4 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/timer.h>
+#include <linux/compat.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/io.h>
@@ -473,6 +474,11 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       return cpwd_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+
 static ssize_t cpwd_write(struct file *file, const char __user *buf,
                          size_t count, loff_t *ppos)
 {
@@ -497,7 +503,7 @@ static ssize_t cpwd_read(struct file *file, char __user *buffer,
 static const struct file_operations cpwd_fops = {
        .owner =                THIS_MODULE,
        .unlocked_ioctl =       cpwd_ioctl,
-       .compat_ioctl =         compat_ptr_ioctl,
+       .compat_ioctl =         cpwd_compat_ioctl,
        .open =                 cpwd_open,
        .write =                cpwd_write,
        .read =                 cpwd_read,
index 7ea5cf5..8ed89f0 100644 (file)
@@ -99,8 +99,14 @@ static int imx_sc_wdt_set_pretimeout(struct watchdog_device *wdog,
 {
        struct arm_smccc_res res;
 
+       /*
+        * SCU firmware calculates pretimeout based on current time
+        * stamp instead of watchdog timeout stamp, need to convert
+        * the pretimeout to SCU firmware's timeout value.
+        */
        arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_PRETIME_WDOG,
-                     pretimeout * 1000, 0, 0, 0, 0, 0, &res);
+                     (wdog->timeout - pretimeout) * 1000, 0, 0, 0,
+                     0, 0, &res);
        if (res.a0)
                return -EACCES;
 
index d17c1a6..5a9ca10 100644 (file)
@@ -89,8 +89,8 @@ static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
 
        reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
 
-       return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) -
-               (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000;
+       return ((reg & GXBB_WDT_TCNT_SETUP_MASK) -
+               (reg >> GXBB_WDT_TCNT_CNT_SHIFT)) / 1000;
 }
 
 static const struct watchdog_ops meson_gxbb_wdt_ops = {
index 2d36520..1213179 100644 (file)
@@ -163,9 +163,17 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq > 0) {
-               if (devm_request_irq(dev, irq, pm8916_wdt_isr, 0, "pm8916_wdt",
-                                    wdt))
-                       irq = 0;
+               err = devm_request_irq(dev, irq, pm8916_wdt_isr, 0,
+                                      "pm8916_wdt", wdt);
+               if (err)
+                       return err;
+
+               wdt->wdev.info = &pm8916_wdt_pt_ident;
+       } else {
+               if (irq == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               wdt->wdev.info = &pm8916_wdt_ident;
        }
 
        /* Configure watchdog to hard-reset mode */
@@ -177,7 +185,6 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
                return err;
        }
 
-       wdt->wdev.info = (irq > 0) ? &pm8916_wdt_pt_ident : &pm8916_wdt_ident,
        wdt->wdev.ops = &pm8916_wdt_ops,
        wdt->wdev.parent = dev;
        wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
index cc12772..497f979 100644 (file)
@@ -803,7 +803,12 @@ success:
                        continue;
 
                if (cookie->inodes[i]) {
-                       afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]),
+                       struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
+
+                       if (test_bit(AFS_VNODE_UNSET, &iv->flags))
+                               continue;
+
+                       afs_vnode_commit_status(&fc, iv,
                                                scb->cb_break, NULL, scb);
                        continue;
                }
index 01e0fb9..0d9a559 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
 #ifdef CONFIG_COMPAT
 
 struct __compat_aio_sigset {
-       compat_sigset_t __user  *sigmask;
+       compat_uptr_t           sigmask;
        compat_size_t           sigsetsize;
 };
 
@@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
                struct old_timespec32 __user *, timeout,
                const struct __compat_aio_sigset __user *, usig)
 {
-       struct __compat_aio_sigset ksig = { NULL, };
+       struct __compat_aio_sigset ksig = { 0, };
        struct timespec64 t;
        bool interrupted;
        int ret;
@@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
        if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
                return -EFAULT;
 
-       ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+       ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
        if (ret)
                return ret;
 
@@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
                struct __kernel_timespec __user *, timeout,
                const struct __compat_aio_sigset __user *, usig)
 {
-       struct __compat_aio_sigset ksig = { NULL, };
+       struct __compat_aio_sigset ksig = { 0, };
        struct timespec64 t;
        bool interrupted;
        int ret;
@@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
        if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
                return -EFAULT;
 
-       ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+       ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
        if (ret)
                return ret;
 
index 2866fab..91f5787 100644 (file)
@@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
                 */
                how &= ~AUTOFS_EXP_LEAVES;
                found = should_expire(expired, mnt, timeout, how);
-               if (!found || found != expired)
-                       /* Something has changed, continue */
+               if (found != expired) { // something has changed, continue
+                       dput(found);
                        goto next;
+               }
 
                if (expired != dentry)
                        dput(dentry);
index bf7e3f2..670700c 100644 (file)
@@ -1761,6 +1761,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
                        btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
                                  cache->key.objectid);
+                       btrfs_put_block_group(cache);
                        ret = -EINVAL;
                        goto error;
                }
index 19d669d..fe2b876 100644 (file)
@@ -734,8 +734,6 @@ struct btrfs_fs_info {
        struct btrfs_workqueue *fixup_workers;
        struct btrfs_workqueue *delayed_workers;
 
-       /* the extent workers do delayed refs on the extent allocation tree */
-       struct btrfs_workqueue *extent_workers;
        struct task_struct *transaction_kthread;
        struct task_struct *cleaner_kthread;
        u32 thread_pool_size;
@@ -2489,8 +2487,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
                                     int nitems, bool use_global_rsv);
 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
                                      struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
-                                   bool qgroup_free);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
 
 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
index d949d7d..db9f2c5 100644 (file)
@@ -381,7 +381,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
 out_qgroup:
        btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
 out_fail:
-       btrfs_inode_rsv_release(inode, true);
        if (delalloc_lock)
                mutex_unlock(&inode->delalloc_mutex);
        return ret;
@@ -418,7 +417,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  * btrfs_delalloc_release_extents - release our outstanding_extents
  * @inode: the inode to balance the reservation for.
  * @num_bytes: the number of bytes we originally reserved with
- * @qgroup_free: do we need to free qgroup meta reservation or convert them.
  *
  * When we reserve space we increase outstanding_extents for the extents we may
  * add.  Once we've set the range as delalloc or created our ordered extents we
@@ -426,8 +424,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  * temporarily tracked outstanding_extents.  This _must_ be used in conjunction
  * with btrfs_delalloc_reserve_metadata.
  */
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
-                                   bool qgroup_free)
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        unsigned num_extents;
@@ -441,7 +438,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
        if (btrfs_is_testing(fs_info))
                return;
 
-       btrfs_inode_rsv_release(inode, qgroup_free);
+       btrfs_inode_rsv_release(inode, true);
 }
 
 /**
index 044981c..402b61b 100644 (file)
@@ -2008,7 +2008,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
        btrfs_destroy_workqueue(fs_info->readahead_workers);
        btrfs_destroy_workqueue(fs_info->flush_workers);
        btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
-       btrfs_destroy_workqueue(fs_info->extent_workers);
        /*
         * Now that all other work queues are destroyed, we can safely destroy
         * the queues used for metadata I/O, since tasks from those other work
@@ -2214,10 +2213,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
                                      max_active, 2);
        fs_info->qgroup_rescan_workers =
                btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
-       fs_info->extent_workers =
-               btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
-                                     min_t(u64, fs_devices->num_devices,
-                                           max_active), 8);
 
        if (!(fs_info->workers && fs_info->delalloc_workers &&
              fs_info->submit_workers && fs_info->flush_workers &&
@@ -2228,7 +2223,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
              fs_info->endio_freespace_worker && fs_info->rmw_workers &&
              fs_info->caching_workers && fs_info->readahead_workers &&
              fs_info->fixup_workers && fs_info->delayed_workers &&
-             fs_info->extent_workers &&
              fs_info->qgroup_rescan_workers)) {
                return -ENOMEM;
        }
index 27e5b26..435a502 100644 (file)
@@ -1692,7 +1692,7 @@ again:
                                    force_page_uptodate);
                if (ret) {
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      reserve_bytes, true);
+                                                      reserve_bytes);
                        break;
                }
 
@@ -1704,7 +1704,7 @@ again:
                        if (extents_locked == -EAGAIN)
                                goto again;
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      reserve_bytes, true);
+                                                      reserve_bytes);
                        ret = extents_locked;
                        break;
                }
@@ -1772,8 +1772,7 @@ again:
                else
                        free_extent_state(cached_state);
 
-               btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
-                                              true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
                if (ret) {
                        btrfs_drop_pages(pages, num_pages);
                        break;
@@ -2068,25 +2067,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_trans_handle *trans;
        struct btrfs_log_ctx ctx;
        int ret = 0, err;
-       u64 len;
 
-       /*
-        * If the inode needs a full sync, make sure we use a full range to
-        * avoid log tree corruption, due to hole detection racing with ordered
-        * extent completion for adjacent ranges, and assertion failures during
-        * hole detection.
-        */
-       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                    &BTRFS_I(inode)->runtime_flags)) {
-               start = 0;
-               end = LLONG_MAX;
-       }
-
-       /*
-        * The range length can be represented by u64, we have to do the typecasts
-        * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
-        */
-       len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
        btrfs_init_log_ctx(&ctx, inode);
@@ -2113,6 +2094,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        atomic_inc(&root->log_batch);
 
        /*
+        * If the inode needs a full sync, make sure we use a full range to
+        * avoid log tree corruption, due to hole detection racing with ordered
+        * extent completion for adjacent ranges, and assertion failures during
+        * hole detection. Do this while holding the inode lock, to avoid races
+        * with other tasks.
+        */
+       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                    &BTRFS_I(inode)->runtime_flags)) {
+               start = 0;
+               end = LLONG_MAX;
+       }
+
+       /*
         * Before we acquired the inode's lock, someone may have dirtied more
         * pages in the target range. We need to make sure that writeback for
         * any such pages does not start while we are logging the inode, because
@@ -2139,8 +2133,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        /*
         * We have to do this here to avoid the priority inversion of waiting on
         * IO of a lower priority task while holding a transaction open.
+        *
+        * Also, the range length can be represented by u64, we have to do the
+        * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
         */
-       ret = btrfs_wait_ordered_range(inode, start, len);
+       ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
        if (ret) {
                up_write(&BTRFS_I(inode)->dio_sem);
                inode_unlock(inode);
index 63cad78..37345fb 100644 (file)
@@ -501,13 +501,13 @@ again:
        ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
                                              prealloc, prealloc, &alloc_hint);
        if (ret) {
-               btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
                btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
                goto out_put;
        }
 
        ret = btrfs_write_out_ino_cache(root, trans, path, inode);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
 out_put:
        iput(inode);
 out_release:
index 0f2754e..0159100 100644 (file)
@@ -474,6 +474,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
        u64 start = async_chunk->start;
        u64 end = async_chunk->end;
        u64 actual_end;
+       u64 i_size;
        int ret = 0;
        struct page **pages = NULL;
        unsigned long nr_pages;
@@ -488,7 +489,19 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
        inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
                        SZ_16K);
 
-       actual_end = min_t(u64, i_size_read(inode), end + 1);
+       /*
+        * We need to save i_size before now because it could change in between
+        * us evaluating the size and assigning it.  This is because we lock and
+        * unlock the page in truncate and fallocate, and then modify the i_size
+        * later on.
+        *
+        * The barriers are to emulate READ_ONCE, remove that once i_size_read
+        * does that for us.
+        */
+       barrier();
+       i_size = i_size_read(inode);
+       barrier();
+       actual_end = min_t(u64, i_size, end + 1);
 again:
        will_compress = 0;
        nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
@@ -2206,7 +2219,7 @@ again:
 
        ClearPageChecked(page);
        set_page_dirty(page);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
 out:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
                             &cached_state);
@@ -4951,7 +4964,7 @@ again:
        if (!page) {
                btrfs_delalloc_release_space(inode, data_reserved,
                                             block_start, blocksize, true);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
                ret = -ENOMEM;
                goto out;
        }
@@ -5018,7 +5031,7 @@ out_unlock:
        if (ret)
                btrfs_delalloc_release_space(inode, data_reserved, block_start,
                                             blocksize, true);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
+       btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
        unlock_page(page);
        put_page(page);
 out:
@@ -8709,7 +8722,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                } else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode, data_reserved,
                                        offset, count - (size_t)ret, true);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), count);
        }
 out:
        if (wakeup)
@@ -9059,7 +9072,7 @@ again:
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
        if (!ret2) {
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
@@ -9068,7 +9081,7 @@ again:
 out_unlock:
        unlock_page(page);
 out:
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
+       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
        btrfs_delalloc_release_space(inode, data_reserved, page_start,
                                     reserved_space, (ret != 0));
 out_noreserve:
@@ -9731,6 +9744,18 @@ out_fail:
                        commit_transaction = true;
        }
        if (commit_transaction) {
+               /*
+                * We may have set commit_transaction when logging the new name
+                * in the destination root, in which case we left the source
+                * root context in the list of log contextes. So make sure we
+                * remove it to avoid invalid memory accesses, since the context
+                * was allocated in our stack frame.
+                */
+               if (sync_log_root) {
+                       mutex_lock(&root->log_mutex);
+                       list_del_init(&ctx_root.list);
+                       mutex_unlock(&root->log_mutex);
+               }
                ret = btrfs_commit_transaction(trans);
        } else {
                int ret2;
@@ -9744,6 +9769,9 @@ out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
 
+       ASSERT(list_empty(&ctx_root.list));
+       ASSERT(list_empty(&ctx_dest.list));
+
        return ret;
 }
 
index de730e5..23272d9 100644 (file)
@@ -1360,8 +1360,7 @@ again:
                unlock_page(pages[i]);
                put_page(pages[i]);
        }
-       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
-                                      false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return i_done;
 out:
@@ -1372,8 +1371,7 @@ out:
        btrfs_delalloc_release_space(inode, data_reserved,
                        start_index << PAGE_SHIFT,
                        page_cnt << PAGE_SHIFT, true);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
-                                      true);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return ret;
 
@@ -4197,9 +4195,6 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
        u64 transid;
        int ret;
 
-       btrfs_warn(root->fs_info,
-       "START_SYNC ioctl is deprecated and will be removed in kernel 5.7");
-
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
                if (PTR_ERR(trans) != -ENOENT)
@@ -4227,9 +4222,6 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
 {
        u64 transid;
 
-       btrfs_warn(fs_info,
-               "WAIT_SYNC ioctl is deprecated and will be removed in kernel 5.7");
-
        if (argp) {
                if (copy_from_user(&transid, argp, sizeof(transid)))
                        return -EFAULT;
index c4bb699..3ad1516 100644 (file)
@@ -3629,7 +3629,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
                return 0;
 
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
+       trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
        ret = qgroup_reserve(root, num_bytes, enforce, type);
        if (ret < 0)
                return ret;
@@ -3676,7 +3676,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
         */
        num_bytes = sub_root_meta_rsv(root, num_bytes, type);
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
+       trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
        btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
                                  num_bytes, type);
 }
index 0050465..5cd42b6 100644 (file)
@@ -3277,6 +3277,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        if (!page) {
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
+                               btrfs_delalloc_release_extents(BTRFS_I(inode),
+                                                       PAGE_SIZE);
                                ret = -ENOMEM;
                                goto out;
                        }
@@ -3297,7 +3299,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
                                btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                              PAGE_SIZE, true);
+                                                              PAGE_SIZE);
                                ret = -EIO;
                                goto out;
                        }
@@ -3326,7 +3328,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                         PAGE_SIZE, true);
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      PAGE_SIZE, true);
+                                                      PAGE_SIZE);
 
                        clear_extent_bits(&BTRFS_I(inode)->io_tree,
                                          page_start, page_end,
@@ -3342,8 +3344,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                put_page(page);
 
                index++;
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
-                                              false);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
                balance_dirty_pages_ratelimited(inode->i_mapping);
                btrfs_throttle(fs_info);
        }
index 98dc092..e8a4b0e 100644 (file)
@@ -893,6 +893,15 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
        while (ticket->bytes > 0 && ticket->error == 0) {
                ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
                if (ret) {
+                       /*
+                        * Delete us from the list. After we unlock the space
+                        * info, we don't want the async reclaim job to reserve
+                        * space for this ticket. If that would happen, then the
+                        * ticket's task would not known that space was reserved
+                        * despite getting an error, resulting in a space leak
+                        * (bytes_may_use counter of our space_info).
+                        */
+                       list_del_init(&ticket->list);
                        ticket->error = -EINTR;
                        break;
                }
@@ -945,12 +954,24 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
        spin_lock(&space_info->lock);
        ret = ticket->error;
        if (ticket->bytes || ticket->error) {
+               /*
+                * Need to delete here for priority tickets. For regular tickets
+                * either the async reclaim job deletes the ticket from the list
+                * or we delete it ourselves at wait_reserve_ticket().
+                */
                list_del_init(&ticket->list);
                if (!ret)
                        ret = -ENOSPC;
        }
        spin_unlock(&space_info->lock);
        ASSERT(list_empty(&ticket->list));
+       /*
+        * Check that we can't have an error set if the reservation succeeded,
+        * as that would confuse tasks and lead them to error out without
+        * releasing reserved space (if an error happens the expectation is that
+        * space wasn't reserved at all).
+        */
+       ASSERT(!(ticket->bytes == 0 && ticket->error));
        return ret;
 }
 
index 43e488f..076d5b8 100644 (file)
@@ -686,9 +686,7 @@ static void dev_item_err(const struct extent_buffer *eb, int slot,
 static int check_dev_item(struct extent_buffer *leaf,
                          struct btrfs_key *key, int slot)
 {
-       struct btrfs_fs_info *fs_info = leaf->fs_info;
        struct btrfs_dev_item *ditem;
-       u64 max_devid = max(BTRFS_MAX_DEVS(fs_info), BTRFS_MAX_DEVS_SYS_CHUNK);
 
        if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
                dev_item_err(leaf, slot,
@@ -696,12 +694,6 @@ static int check_dev_item(struct extent_buffer *leaf,
                             key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
                return -EUCLEAN;
        }
-       if (key->offset > max_devid) {
-               dev_item_err(leaf, slot,
-                            "invalid devid: has=%llu expect=[0, %llu]",
-                            key->offset, max_devid);
-               return -EUCLEAN;
-       }
        ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
        if (btrfs_device_id(leaf, ditem) != key->offset) {
                dev_item_err(leaf, slot,
index bdfe449..e04409f 100644 (file)
@@ -4967,6 +4967,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
                max_stripe_size = SZ_32M;
                max_chunk_size = 2 * max_stripe_size;
+               devs_max = min_t(int, devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
        } else {
                btrfs_err(info, "invalid chunk type 0x%llx requested",
                       type);
index d3b9c9d..f5a3891 100644 (file)
@@ -1058,6 +1058,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 
        dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
 
+       /* remove from inode's cap rbtree, and clear auth cap */
+       rb_erase(&cap->ci_node, &ci->i_caps);
+       if (ci->i_auth_cap == cap)
+               ci->i_auth_cap = NULL;
+
        /* remove from session list */
        spin_lock(&session->s_cap_lock);
        if (session->s_cap_iterator == cap) {
@@ -1091,11 +1096,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 
        spin_unlock(&session->s_cap_lock);
 
-       /* remove from inode list */
-       rb_erase(&cap->ci_node, &ci->i_caps);
-       if (ci->i_auth_cap == cap)
-               ci->i_auth_cap = NULL;
-
        if (removed)
                ceph_put_cap(mdsc, cap);
 
index 4ca0b8f..d17a789 100644 (file)
@@ -1553,36 +1553,37 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
        int valid = 0;
        struct dentry *parent;
-       struct inode *dir;
+       struct inode *dir, *inode;
 
        if (flags & LOOKUP_RCU) {
                parent = READ_ONCE(dentry->d_parent);
                dir = d_inode_rcu(parent);
                if (!dir)
                        return -ECHILD;
+               inode = d_inode_rcu(dentry);
        } else {
                parent = dget_parent(dentry);
                dir = d_inode(parent);
+               inode = d_inode(dentry);
        }
 
        dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
-            dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
+            dentry, inode, ceph_dentry(dentry)->offset);
 
        /* always trust cached snapped dentries, snapdir dentry */
        if (ceph_snap(dir) != CEPH_NOSNAP) {
                dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
-                    dentry, d_inode(dentry));
+                    dentry, inode);
                valid = 1;
-       } else if (d_really_is_positive(dentry) &&
-                  ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
+       } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
                valid = 1;
        } else {
                valid = dentry_lease_is_valid(dentry, flags);
                if (valid == -ECHILD)
                        return valid;
                if (valid || dir_lease_is_valid(dir, dentry)) {
-                       if (d_really_is_positive(dentry))
-                               valid = ceph_is_any_caps(d_inode(dentry));
+                       if (inode)
+                               valid = ceph_is_any_caps(inode);
                        else
                                valid = 1;
                }
index d277f71..8de6339 100644 (file)
@@ -462,6 +462,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
                err = ceph_security_init_secctx(dentry, mode, &as_ctx);
                if (err < 0)
                        goto out_ctx;
+       } else if (!d_in_lookup(dentry)) {
+               /* If it's not being looked up, it's negative */
+               return -ENOENT;
        }
 
        /* do the open */
@@ -750,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
        if (!atomic_dec_and_test(&aio_req->pending_reqs))
                return;
 
+       if (aio_req->iocb->ki_flags & IOCB_DIRECT)
+               inode_dio_end(inode);
+
        ret = aio_req->error;
        if (!ret)
                ret = aio_req->total_len;
@@ -1088,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                                              CEPH_CAP_FILE_RD);
 
                list_splice(&aio_req->osd_reqs, &osd_reqs);
+               inode_dio_begin(inode);
                while (!list_empty(&osd_reqs)) {
                        req = list_first_entry(&osd_reqs,
                                               struct ceph_osd_request,
@@ -1261,14 +1268,24 @@ again:
        dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
             inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
 
+       if (iocb->ki_flags & IOCB_DIRECT)
+               ceph_start_io_direct(inode);
+       else
+               ceph_start_io_read(inode);
+
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
        else
                want = CEPH_CAP_FILE_CACHE;
        ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
                            &got, &pinned_page);
-       if (ret < 0)
+       if (ret < 0) {
+               if (iocb->ki_flags & IOCB_DIRECT)
+                       ceph_end_io_direct(inode);
+               else
+                       ceph_end_io_read(inode);
                return ret;
+       }
 
        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_flags & IOCB_DIRECT) ||
@@ -1280,16 +1297,12 @@ again:
 
                if (ci->i_inline_version == CEPH_INLINE_NONE) {
                        if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
-                               ceph_start_io_direct(inode);
                                ret = ceph_direct_read_write(iocb, to,
                                                             NULL, NULL);
-                               ceph_end_io_direct(inode);
                                if (ret >= 0 && ret < len)
                                        retry_op = CHECK_EOF;
                        } else {
-                               ceph_start_io_read(inode);
                                ret = ceph_sync_read(iocb, to, &retry_op);
-                               ceph_end_io_read(inode);
                        }
                } else {
                        retry_op = READ_INLINE;
@@ -1300,11 +1313,10 @@ again:
                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
                     ceph_cap_string(got));
                ceph_add_rw_context(fi, &rw_ctx);
-               ceph_start_io_read(inode);
                ret = generic_file_read_iter(iocb, to);
-               ceph_end_io_read(inode);
                ceph_del_rw_context(fi, &rw_ctx);
        }
+
        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
        if (pinned_page) {
@@ -1312,6 +1324,12 @@ again:
                pinned_page = NULL;
        }
        ceph_put_cap_refs(ci, got);
+
+       if (iocb->ki_flags & IOCB_DIRECT)
+               ceph_end_io_direct(inode);
+       else
+               ceph_end_io_read(inode);
+
        if (retry_op > HAVE_RETRIED && ret >= 0) {
                int statret;
                struct page *page = NULL;
@@ -1956,10 +1974,18 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
        if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
                return -EOPNOTSUPP;
 
+       /*
+        * Striped file layouts require that we copy partial objects, but the
+        * OSD copy-from operation only supports full-object copies.  Limit
+        * this to non-striped file layouts for now.
+        */
        if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
-           (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
-           (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
+           (src_ci->i_layout.stripe_count != 1) ||
+           (dst_ci->i_layout.stripe_count != 1) ||
+           (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
+               dout("Invalid src/dst files layout\n");
                return -EOPNOTSUPP;
+       }
 
        if (len < src_ci->i_layout.object_size)
                return -EOPNOTSUPP; /* no remote copy will be done */
index 9f13562..c074075 100644 (file)
@@ -1434,6 +1434,7 @@ retry_lookup:
                dout(" final dn %p\n", dn);
        } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
                    req->r_op == CEPH_MDS_OP_MKSNAP) &&
+                  test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
                   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
                struct inode *dir = req->r_parent;
 
index edfd643..b47f43f 100644 (file)
@@ -268,6 +268,7 @@ static int parse_fsopt_token(char *c, void *private)
                }
                break;
        case Opt_fscache_uniq:
+#ifdef CONFIG_CEPH_FSCACHE
                kfree(fsopt->fscache_uniq);
                fsopt->fscache_uniq = kstrndup(argstr[0].from,
                                               argstr[0].to-argstr[0].from,
@@ -276,7 +277,10 @@ static int parse_fsopt_token(char *c, void *private)
                        return -ENOMEM;
                fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
                break;
-               /* misc */
+#else
+               pr_err("fscache support is disabled\n");
+               return -EINVAL;
+#endif
        case Opt_wsize:
                if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
                        return -EINVAL;
@@ -353,10 +357,15 @@ static int parse_fsopt_token(char *c, void *private)
                fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
                break;
        case Opt_fscache:
+#ifdef CONFIG_CEPH_FSCACHE
                fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
                kfree(fsopt->fscache_uniq);
                fsopt->fscache_uniq = NULL;
                break;
+#else
+               pr_err("fscache support is disabled\n");
+               return -EINVAL;
+#endif
        case Opt_nofscache:
                fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
                kfree(fsopt->fscache_uniq);
index c049c7b..1a135d1 100644 (file)
@@ -169,7 +169,13 @@ cifs_read_super(struct super_block *sb)
        else
                sb->s_maxbytes = MAX_NON_LFS;
 
-       /* Some very old servers like DOS and OS/2 used 2 second granularity */
+       /*
+        * Some very old servers like DOS and OS/2 used 2 second granularity
+        * (while all current servers use 100ns granularity - see MS-DTYP)
+        * but 1 second is the maximum allowed granularity for the VFS
+        * so for old servers set time granularity to 1 second while for
+        * everything else (current servers) set it to 100ns.
+        */
        if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
            ((tcon->ses->capabilities &
              tcon->ses->server->vals->cap_nt_find) == 0) &&
index 50dfd90..d78bfcc 100644 (file)
@@ -1391,6 +1391,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 struct cifsInodeInfo {
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
+       /*
+        * NOTE: Some code paths call down_read(lock_sem) twice, so
+        * we must always use use cifs_down_write() instead of down_write()
+        * for this semaphore to avoid deadlocks.
+        */
        struct rw_semaphore lock_sem;   /* protect the fields above */
        /* BB add in lists for dirty pages i.e. write caching info for oplock */
        struct list_head openFileList;
index e53e9f6..fe597d3 100644 (file)
@@ -170,6 +170,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
                             struct file_lock *flock, const unsigned int xid);
 extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
 
+extern void cifs_down_write(struct rw_semaphore *sem);
 extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
                                              struct file *file,
                                              struct tcon_link *tlink,
index a64dfa9..ccaa8ba 100644 (file)
@@ -564,9 +564,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
        spin_lock(&GlobalMid_Lock);
        list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+               kref_get(&mid_entry->refcount);
                if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
                        mid_entry->mid_state = MID_RETRY_NEEDED;
                list_move(&mid_entry->qhead, &retry_list);
+               mid_entry->mid_flags |= MID_DELETED;
        }
        spin_unlock(&GlobalMid_Lock);
        mutex_unlock(&server->srv_mutex);
@@ -576,6 +578,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                list_del_init(&mid_entry->qhead);
                mid_entry->callback(mid_entry);
+               cifs_mid_q_entry_release(mid_entry);
        }
 
        if (cifs_rdma_enabled(server)) {
@@ -895,8 +898,10 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
        if (mid->mid_flags & MID_DELETED)
                printk_once(KERN_WARNING
                            "trying to dequeue a deleted mid\n");
-       else
+       else {
                list_del_init(&mid->qhead);
+               mid->mid_flags |= MID_DELETED;
+       }
        spin_unlock(&GlobalMid_Lock);
 }
 
@@ -966,8 +971,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
                list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                        mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                        cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
+                       kref_get(&mid_entry->refcount);
                        mid_entry->mid_state = MID_SHUTDOWN;
                        list_move(&mid_entry->qhead, &dispose_list);
+                       mid_entry->mid_flags |= MID_DELETED;
                }
                spin_unlock(&GlobalMid_Lock);
 
@@ -977,6 +984,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
                        cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
                        list_del_init(&mid_entry->qhead);
                        mid_entry->callback(mid_entry);
+                       cifs_mid_q_entry_release(mid_entry);
                }
                /* 1/8th of sec is more than enough time for them to exit */
                msleep(125);
@@ -3882,8 +3890,12 @@ generic_ip_connect(struct TCP_Server_Info *server)
 
        rc = socket->ops->connect(socket, saddr, slen,
                                  server->noblockcnt ? O_NONBLOCK : 0);
-
-       if (rc == -EINPROGRESS)
+       /*
+        * When mounting SMB root file systems, we do not want to block in
+        * connect. Otherwise bail out and then let cifs_reconnect() perform
+        * reconnect failover - if possible.
+        */
+       if (server->noblockcnt && rc == -EINPROGRESS)
                rc = 0;
        if (rc < 0) {
                cifs_dbg(FYI, "Error %d connecting to server\n", rc);
index 5ad15de..fa7b0fa 100644 (file)
@@ -281,6 +281,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
        return has_locks;
 }
 
+void
+cifs_down_write(struct rw_semaphore *sem)
+{
+       while (!down_write_trylock(sem))
+               msleep(10);
+}
+
 struct cifsFileInfo *
 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
                  struct tcon_link *tlink, __u32 oplock)
@@ -306,7 +313,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
        INIT_LIST_HEAD(&fdlocks->locks);
        fdlocks->cfile = cfile;
        cfile->llist = fdlocks;
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_add(&fdlocks->llist, &cinode->llist);
        up_write(&cinode->lock_sem);
 
@@ -405,10 +412,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        bool oplock_break_cancelled;
 
        spin_lock(&tcon->open_file_lock);
-
+       spin_lock(&cifsi->open_file_lock);
        spin_lock(&cifs_file->file_info_lock);
        if (--cifs_file->count > 0) {
                spin_unlock(&cifs_file->file_info_lock);
+               spin_unlock(&cifsi->open_file_lock);
                spin_unlock(&tcon->open_file_lock);
                return;
        }
@@ -421,9 +429,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
 
        /* remove it from the lists */
-       spin_lock(&cifsi->open_file_lock);
        list_del(&cifs_file->flist);
-       spin_unlock(&cifsi->open_file_lock);
        list_del(&cifs_file->tlist);
        atomic_dec(&tcon->num_local_opens);
 
@@ -440,6 +446,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
                cifs_set_oplock_level(cifsi, 0);
        }
 
+       spin_unlock(&cifsi->open_file_lock);
        spin_unlock(&tcon->open_file_lock);
 
        oplock_break_cancelled = wait_oplock_handler ?
@@ -464,7 +471,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
         * Delete any outstanding lock records. We'll lose them when the file
         * is closed anyway.
         */
-       down_write(&cifsi->lock_sem);
+       cifs_down_write(&cifsi->lock_sem);
        list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
                list_del(&li->llist);
                cifs_del_lock_waiters(li);
@@ -1027,7 +1034,7 @@ static void
 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
 {
        struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_add_tail(&lock->llist, &cfile->llist->locks);
        up_write(&cinode->lock_sem);
 }
@@ -1049,7 +1056,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
 
 try_again:
        exist = false;
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
 
        exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
                                        lock->type, lock->flags, &conf_lock,
@@ -1072,7 +1079,7 @@ try_again:
                                        (lock->blist.next == &lock->blist));
                if (!rc)
                        goto try_again;
-               down_write(&cinode->lock_sem);
+               cifs_down_write(&cinode->lock_sem);
                list_del_init(&lock->blist);
        }
 
@@ -1125,7 +1132,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
                return rc;
 
 try_again:
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
                return rc;
@@ -1331,7 +1338,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
        int rc = 0;
 
        /* we are going to update can_cache_brlcks here - need a write access */
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
                return rc;
@@ -1522,7 +1529,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
        if (!buf)
                return -ENOMEM;
 
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        for (i = 0; i < 2; i++) {
                cur = buf;
                num = 0;
index 5dcc95b..df93778 100644 (file)
@@ -2475,9 +2475,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
                        rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
                        cifsFileInfo_put(wfile);
                        if (rc)
-                               return rc;
+                               goto cifs_setattr_exit;
                } else if (rc != -EBADF)
-                       return rc;
+                       goto cifs_setattr_exit;
                else
                        rc = 0;
        }
index b7421a0..5148106 100644 (file)
@@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
        /* we do not want to loop forever */
        last_mid = cur_mid;
        cur_mid++;
+       /* avoid 0xFFFF MID */
+       if (cur_mid == 0xffff)
+               cur_mid++;
 
        /*
         * This nested loop looks more expensive than it is.
index e6a1fc7..8b0b512 100644 (file)
@@ -145,7 +145,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
 
        cur = buf;
 
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
                if (flock->fl_start > li->offset ||
                    (flock->fl_start + length) <
index 4c09225..cd55af9 100644 (file)
@@ -4084,6 +4084,7 @@ free_pages:
 
        kfree(dw->ppages);
        cifs_small_buf_release(dw->buf);
+       kfree(dw);
 }
 
 
@@ -4157,7 +4158,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
                dw->server = server;
                dw->ppages = pages;
                dw->len = len;
-               queue_work(cifsiod_wq, &dw->decrypt);
+               queue_work(decrypt_wq, &dw->decrypt);
                *num_mids = 0; /* worker thread takes care of finding mid */
                return -1;
        }
index ea735d5..0abfde6 100644 (file)
@@ -838,6 +838,7 @@ struct create_durable_handle_reconnect_v2 {
        struct create_context ccontext;
        __u8   Name[8];
        struct durable_reconnect_context_v2 dcontext;
+       __u8   Pad[4];
 } __packed;
 
 /* See MS-SMB2 2.2.13.2.5 */
index 308ad0f..ca3de62 100644 (file)
@@ -86,22 +86,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 
 static void _cifs_mid_q_entry_release(struct kref *refcount)
 {
-       struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
-                                              refcount);
-
-       mempool_free(mid, cifs_mid_poolp);
-}
-
-void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
-{
-       spin_lock(&GlobalMid_Lock);
-       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
-       spin_unlock(&GlobalMid_Lock);
-}
-
-void
-DeleteMidQEntry(struct mid_q_entry *midEntry)
-{
+       struct mid_q_entry *midEntry =
+                       container_of(refcount, struct mid_q_entry, refcount);
 #ifdef CONFIG_CIFS_STATS2
        __le16 command = midEntry->server->vals->lock_cmd;
        __u16 smb_cmd = le16_to_cpu(midEntry->command);
@@ -166,6 +152,19 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
                }
        }
 #endif
+
+       mempool_free(midEntry, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+       spin_lock(&GlobalMid_Lock);
+       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+       spin_unlock(&GlobalMid_Lock);
+}
+
+void DeleteMidQEntry(struct mid_q_entry *midEntry)
+{
        cifs_mid_q_entry_release(midEntry);
 }
 
@@ -173,8 +172,10 @@ void
 cifs_delete_mid(struct mid_q_entry *mid)
 {
        spin_lock(&GlobalMid_Lock);
-       list_del_init(&mid->qhead);
-       mid->mid_flags |= MID_DELETED;
+       if (!(mid->mid_flags & MID_DELETED)) {
+               list_del_init(&mid->qhead);
+               mid->mid_flags |= MID_DELETED;
+       }
        spin_unlock(&GlobalMid_Lock);
 
        DeleteMidQEntry(mid);
@@ -872,7 +873,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
                rc = -EHOSTDOWN;
                break;
        default:
-               list_del_init(&mid->qhead);
+               if (!(mid->mid_flags & MID_DELETED)) {
+                       list_del_init(&mid->qhead);
+                       mid->mid_flags |= MID_DELETED;
+               }
                cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
                         __func__, mid->mid, mid->mid_state);
                rc = -EIO;
index dc5dbf6..cb61467 100644 (file)
@@ -101,7 +101,7 @@ static int create_link(struct config_item *parent_item,
        }
        target_sd->s_links++;
        spin_unlock(&configfs_dirent_lock);
-       ret = configfs_get_target_path(item, item, body);
+       ret = configfs_get_target_path(parent_item, item, body);
        if (!ret)
                ret = configfs_create_link(target_sd, parent_item->ci_dentry,
                                           dentry, body);
index 6bf81f9..2cc43cd 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 
        for (;;) {
                entry = xas_find_conflict(xas);
+               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+                       return entry;
                if (dax_entry_order(entry) < order)
                        return XA_RETRY_ENTRY;
-               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
-                               !dax_is_locked(entry))
+               if (!dax_is_locked(entry))
                        return entry;
 
                wq = dax_entry_waitqueue(xas, entry, &ewait.key);
index 18426f4..e23752d 100644 (file)
@@ -128,13 +128,20 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
                              struct inode *inode)
 {
        struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
-       struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
        struct dentry *lower_dir_dentry;
+       struct inode *lower_dir_inode;
        int rc;
 
-       dget(lower_dentry);
-       lower_dir_dentry = lock_parent(lower_dentry);
-       rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+       lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+       lower_dir_inode = d_inode(lower_dir_dentry);
+       inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+       dget(lower_dentry);     // don't even try to make the lower negative
+       if (lower_dentry->d_parent != lower_dir_dentry)
+               rc = -EINVAL;
+       else if (d_unhashed(lower_dentry))
+               rc = -EINVAL;
+       else
+               rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
        if (rc) {
                printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
                goto out_unlock;
@@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
        fsstack_copy_attr_times(dir, lower_dir_inode);
        set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
        inode->i_ctime = dir->i_ctime;
-       d_drop(dentry);
 out_unlock:
-       unlock_dir(lower_dir_dentry);
        dput(lower_dentry);
+       inode_unlock(lower_dir_inode);
+       if (!rc)
+               d_drop(dentry);
        return rc;
 }
 
@@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
 static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
                                     struct dentry *lower_dentry)
 {
-       struct inode *inode, *lower_inode = d_inode(lower_dentry);
+       struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+       struct inode *inode, *lower_inode;
        struct ecryptfs_dentry_info *dentry_info;
-       struct vfsmount *lower_mnt;
        int rc = 0;
 
        dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
@@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
                return ERR_PTR(-ENOMEM);
        }
 
-       lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
        fsstack_copy_attr_atime(d_inode(dentry->d_parent),
-                               d_inode(lower_dentry->d_parent));
+                               d_inode(path->dentry));
        BUG_ON(!d_count(lower_dentry));
 
        ecryptfs_set_dentry_private(dentry, dentry_info);
-       dentry_info->lower_path.mnt = lower_mnt;
+       dentry_info->lower_path.mnt = mntget(path->mnt);
        dentry_info->lower_path.dentry = lower_dentry;
 
-       if (d_really_is_negative(lower_dentry)) {
+       /*
+        * negative dentry can go positive under us here - its parent is not
+        * locked.  That's OK and that could happen just as we return from
+        * ecryptfs_lookup() anyway.  Just need to be careful and fetch
+        * ->d_inode only once - it's not stable here.
+        */
+       lower_inode = READ_ONCE(lower_dentry->d_inode);
+
+       if (!lower_inode) {
                /* We want to add because we couldn't find in lower */
                d_add(dentry, NULL);
                return NULL;
@@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
        struct dentry *lower_dentry;
        struct dentry *lower_dir_dentry;
+       struct inode *lower_dir_inode;
        int rc;
 
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
-       dget(dentry);
-       lower_dir_dentry = lock_parent(lower_dentry);
-       dget(lower_dentry);
-       rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
-       dput(lower_dentry);
-       if (!rc && d_really_is_positive(dentry))
+       lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+       lower_dir_inode = d_inode(lower_dir_dentry);
+
+       inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+       dget(lower_dentry);     // don't even try to make the lower negative
+       if (lower_dentry->d_parent != lower_dir_dentry)
+               rc = -EINVAL;
+       else if (d_unhashed(lower_dentry))
+               rc = -EINVAL;
+       else
+               rc = vfs_rmdir(lower_dir_inode, lower_dentry);
+       if (!rc) {
                clear_nlink(d_inode(dentry));
-       fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
-       set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
-       unlock_dir(lower_dir_dentry);
+               fsstack_copy_attr_times(dir, lower_dir_inode);
+               set_nlink(dir, lower_dir_inode->i_nlink);
+       }
+       dput(lower_dentry);
+       inode_unlock(lower_dir_inode);
        if (!rc)
                d_drop(dentry);
-       dput(dentry);
        return rc;
 }
 
@@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *lower_new_dentry;
        struct dentry *lower_old_dir_dentry;
        struct dentry *lower_new_dir_dentry;
-       struct dentry *trap = NULL;
+       struct dentry *trap;
        struct inode *target_inode;
 
        if (flags)
                return -EINVAL;
 
+       lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
+       lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
+
        lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
        lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
-       dget(lower_old_dentry);
-       dget(lower_new_dentry);
-       lower_old_dir_dentry = dget_parent(lower_old_dentry);
-       lower_new_dir_dentry = dget_parent(lower_new_dentry);
+
        target_inode = d_inode(new_dentry);
+
        trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+       dget(lower_new_dentry);
        rc = -EINVAL;
        if (lower_old_dentry->d_parent != lower_old_dir_dentry)
                goto out_lock;
@@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (new_dir != old_dir)
                fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
 out_lock:
-       unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
-       dput(lower_new_dir_dentry);
-       dput(lower_old_dir_dentry);
        dput(lower_new_dentry);
-       dput(lower_old_dentry);
+       unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
        return rc;
 }
 
index 09bc687..2dd55b1 100644 (file)
@@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
                 * inode is actually connected to the parent.
                 */
                err = exportfs_get_name(mnt, target_dir, nbuf, result);
-               if (!err) {
-                       inode_lock(target_dir->d_inode);
-                       nresult = lookup_one_len(nbuf, target_dir,
-                                                strlen(nbuf));
-                       inode_unlock(target_dir->d_inode);
-                       if (!IS_ERR(nresult)) {
-                               if (nresult->d_inode) {
-                                       dput(result);
-                                       result = nresult;
-                               } else
-                                       dput(nresult);
-                       }
+               if (err) {
+                       dput(target_dir);
+                       goto err_result;
                }
 
+               inode_lock(target_dir->d_inode);
+               nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+               if (!IS_ERR(nresult)) {
+                       if (unlikely(nresult->d_inode != result->d_inode)) {
+                               dput(nresult);
+                               nresult = ERR_PTR(-ESTALE);
+                       }
+               }
+               inode_unlock(target_dir->d_inode);
                /*
                 * At this point we are done with the parent, but it's pinned
                 * by the child dentry anyway.
                 */
                dput(target_dir);
 
+               if (IS_ERR(nresult)) {
+                       err = PTR_ERR(nresult);
+                       goto err_result;
+               }
+               dput(result);
+               result = nresult;
+
                /*
                 * And finally make sure the dentry is actually acceptable
                 * to NFSD.
index 8461a63..335607b 100644 (file)
@@ -576,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
        spin_unlock(&inode->i_lock);
 
        /*
-        * A dying wb indicates that the memcg-blkcg mapping has changed
-        * and a new wb is already serving the memcg.  Switch immediately.
+        * A dying wb indicates that either the blkcg associated with the
+        * memcg changed or the associated memcg is dying.  In the first
+        * case, a replacement wb should already be available and we should
+        * refresh the wb immediately.  In the second case, trying to
+        * refresh will keep failing.
         */
-       if (unlikely(wb_dying(wbc->wb)))
+       if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
                inode_switch_wbs(inode, wbc->wb_id);
 }
 EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
index 6419a2b..3e8cebf 100644 (file)
@@ -5,6 +5,7 @@
 
 obj-$(CONFIG_FUSE_FS) += fuse.o
 obj-$(CONFIG_CUSE) += cuse.o
-obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o
+obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
 
 fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
+virtiofs-y += virtio_fs.o
index dadd617..ed1abc9 100644 (file)
@@ -276,10 +276,12 @@ static void flush_bg_queue(struct fuse_conn *fc)
 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        struct fuse_iqueue *fiq = &fc->iq;
-       bool async = req->args->end;
+       bool async;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
+
+       async = req->args->end;
        /*
         * test_and_set_bit() implies smp_mb() between bit
         * changing and below intr_entry check. Pairs with
index d572c90..54d638f 100644 (file)
@@ -405,7 +405,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        else
                fuse_invalidate_entry_cache(entry);
 
-       fuse_advise_use_readdirplus(dir);
+       if (inode)
+               fuse_advise_use_readdirplus(dir);
        return newent;
 
  out_iput:
@@ -1521,6 +1522,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
                is_truncate = true;
        }
 
+       /* Flush dirty data/metadata before non-truncate SETATTR */
+       if (is_wb && S_ISREG(inode->i_mode) &&
+           attr->ia_valid &
+                       (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
+                        ATTR_TIMES_SET)) {
+               err = write_inode_now(inode, true);
+               if (err)
+                       return err;
+
+               fuse_set_nowrite(inode);
+               fuse_release_nowrite(inode);
+       }
+
        if (is_truncate) {
                fuse_set_nowrite(inode);
                set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
index 0f02256..db48a5c 100644 (file)
@@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
-       bool lock_inode = (file->f_flags & O_TRUNC) &&
+       bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
                          fc->atomic_o_trunc &&
                          fc->writeback_cache;
 
@@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
        if (err)
                return err;
 
-       if (lock_inode)
+       if (is_wb_truncate) {
                inode_lock(inode);
+               fuse_set_nowrite(inode);
+       }
 
        err = fuse_do_open(fc, get_node_id(inode), file, isdir);
 
        if (!err)
                fuse_finish_open(inode, file);
 
-       if (lock_inode)
+       if (is_wb_truncate) {
+               fuse_release_nowrite(inode);
                inode_unlock(inode);
+       }
 
        return err;
 }
@@ -1997,7 +2001,7 @@ static int fuse_writepages_fill(struct page *page,
 
        if (!data->ff) {
                err = -EIO;
-               data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+               data->ff = fuse_write_file_get(fc, fi);
                if (!data->ff)
                        goto out_unlock;
        }
@@ -2042,8 +2046,6 @@ static int fuse_writepages_fill(struct page *page,
         * under writeback, so we can release the page lock.
         */
        if (data->wpa == NULL) {
-               struct fuse_inode *fi = get_fuse_inode(inode);
-
                err = -ENOMEM;
                wpa = fuse_writepage_args_alloc();
                if (!wpa) {
index 956aeaf..d148188 100644 (file)
@@ -479,6 +479,7 @@ struct fuse_fs_context {
        bool destroy:1;
        bool no_control:1;
        bool no_force_umount:1;
+       bool no_mount_options:1;
        unsigned int max_read;
        unsigned int blksize;
        const char *subtype;
@@ -713,6 +714,9 @@ struct fuse_conn {
        /** Do not allow MNT_FORCE umount */
        unsigned int no_force_umount:1;
 
+       /* Do not show mount options */
+       unsigned int no_mount_options:1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
index e040e2a..16aec32 100644 (file)
@@ -558,6 +558,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
        struct super_block *sb = root->d_sb;
        struct fuse_conn *fc = get_fuse_conn_super(sb);
 
+       if (fc->no_mount_options)
+               return 0;
+
        seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
        seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
        if (fc->default_permissions)
@@ -1180,6 +1183,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
        fc->destroy = ctx->destroy;
        fc->no_control = ctx->no_control;
        fc->no_force_umount = ctx->no_force_umount;
+       fc->no_mount_options = ctx->no_mount_options;
 
        err = -ENOMEM;
        root = fuse_get_root_inode(sb, ctx->rootmode);
index 6af3f13..a5c8604 100644 (file)
@@ -30,6 +30,7 @@ struct virtio_fs_vq {
        struct virtqueue *vq;     /* protected by ->lock */
        struct work_struct done_work;
        struct list_head queued_reqs;
+       struct list_head end_reqs;      /* End these requests */
        struct delayed_work dispatch_work;
        struct fuse_dev *fud;
        bool connected;
@@ -54,6 +55,9 @@ struct virtio_fs_forget {
        struct list_head list;
 };
 
+static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
+                                struct fuse_req *req, bool in_flight);
+
 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
 {
        struct virtio_fs *fs = vq->vdev->priv;
@@ -66,6 +70,19 @@ static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
        return &vq_to_fsvq(vq)->fud->pq;
 }
 
+/* Should be called with fsvq->lock held. */
+static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+       fsvq->in_flight++;
+}
+
+/* Should be called with fsvq->lock held. */
+static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+       WARN_ON(fsvq->in_flight <= 0);
+       fsvq->in_flight--;
+}
+
 static void release_virtio_fs_obj(struct kref *ref)
 {
        struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
@@ -109,22 +126,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
        flush_delayed_work(&fsvq->dispatch_work);
 }
 
-static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
-{
-       struct virtio_fs_forget *forget;
-
-       spin_lock(&fsvq->lock);
-       while (1) {
-               forget = list_first_entry_or_null(&fsvq->queued_reqs,
-                                               struct virtio_fs_forget, list);
-               if (!forget)
-                       break;
-               list_del(&forget->list);
-               kfree(forget);
-       }
-       spin_unlock(&fsvq->lock);
-}
-
 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
 {
        struct virtio_fs_vq *fsvq;
@@ -132,9 +133,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
 
        for (i = 0; i < fs->nvqs; i++) {
                fsvq = &fs->vqs[i];
-               if (i == VQ_HIPRIO)
-                       drain_hiprio_queued_reqs(fsvq);
-
                virtio_fs_drain_queue(fsvq);
        }
 }
@@ -253,14 +251,66 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
 
                while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
                        kfree(req);
-                       fsvq->in_flight--;
+                       dec_in_flight_req(fsvq);
                }
        } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
        spin_unlock(&fsvq->lock);
 }
 
-static void virtio_fs_dummy_dispatch_work(struct work_struct *work)
+static void virtio_fs_request_dispatch_work(struct work_struct *work)
 {
+       struct fuse_req *req;
+       struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+                                                dispatch_work.work);
+       struct fuse_conn *fc = fsvq->fud->fc;
+       int ret;
+
+       pr_debug("virtio-fs: worker %s called.\n", __func__);
+       while (1) {
+               spin_lock(&fsvq->lock);
+               req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
+                                              list);
+               if (!req) {
+                       spin_unlock(&fsvq->lock);
+                       break;
+               }
+
+               list_del_init(&req->list);
+               spin_unlock(&fsvq->lock);
+               fuse_request_end(fc, req);
+       }
+
+       /* Dispatch pending requests */
+       while (1) {
+               spin_lock(&fsvq->lock);
+               req = list_first_entry_or_null(&fsvq->queued_reqs,
+                                              struct fuse_req, list);
+               if (!req) {
+                       spin_unlock(&fsvq->lock);
+                       return;
+               }
+               list_del_init(&req->list);
+               spin_unlock(&fsvq->lock);
+
+               ret = virtio_fs_enqueue_req(fsvq, req, true);
+               if (ret < 0) {
+                       if (ret == -ENOMEM || ret == -ENOSPC) {
+                               spin_lock(&fsvq->lock);
+                               list_add_tail(&req->list, &fsvq->queued_reqs);
+                               schedule_delayed_work(&fsvq->dispatch_work,
+                                                     msecs_to_jiffies(1));
+                               spin_unlock(&fsvq->lock);
+                               return;
+                       }
+                       req->out.h.error = ret;
+                       spin_lock(&fsvq->lock);
+                       dec_in_flight_req(fsvq);
+                       spin_unlock(&fsvq->lock);
+                       pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
+                              ret);
+                       fuse_request_end(fc, req);
+               }
+       }
 }
 
 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
@@ -286,6 +336,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
 
                list_del(&forget->list);
                if (!fsvq->connected) {
+                       dec_in_flight_req(fsvq);
                        spin_unlock(&fsvq->lock);
                        kfree(forget);
                        continue;
@@ -307,13 +358,13 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
                        } else {
                                pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
                                         ret);
+                               dec_in_flight_req(fsvq);
                                kfree(forget);
                        }
                        spin_unlock(&fsvq->lock);
                        return;
                }
 
-               fsvq->in_flight++;
                notify = virtqueue_kick_prepare(vq);
                spin_unlock(&fsvq->lock);
 
@@ -452,7 +503,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
 
                fuse_request_end(fc, req);
                spin_lock(&fsvq->lock);
-               fsvq->in_flight--;
+               dec_in_flight_req(fsvq);
                spin_unlock(&fsvq->lock);
        }
 }
@@ -502,6 +553,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
        names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
        INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
        INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
+       INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
        INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
                        virtio_fs_hiprio_dispatch_work);
        spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
@@ -511,8 +563,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
                spin_lock_init(&fs->vqs[i].lock);
                INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
                INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
-                                       virtio_fs_dummy_dispatch_work);
+                                 virtio_fs_request_dispatch_work);
                INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
+               INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
                snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
                         "requests.%u", i - VQ_REQUEST);
                callbacks[i] = virtio_fs_vq_done;
@@ -708,6 +761,7 @@ __releases(fiq->lock)
                        list_add_tail(&forget->list, &fsvq->queued_reqs);
                        schedule_delayed_work(&fsvq->dispatch_work,
                                        msecs_to_jiffies(1));
+                       inc_in_flight_req(fsvq);
                } else {
                        pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
                                 ret);
@@ -717,7 +771,7 @@ __releases(fiq->lock)
                goto out;
        }
 
-       fsvq->in_flight++;
+       inc_in_flight_req(fsvq);
        notify = virtqueue_kick_prepare(vq);
 
        spin_unlock(&fsvq->lock);
@@ -819,7 +873,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
 
 /* Add a request to a virtqueue and kick the device */
 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-                                struct fuse_req *req)
+                                struct fuse_req *req, bool in_flight)
 {
        /* requests need at least 4 elements */
        struct scatterlist *stack_sgs[6];
@@ -835,6 +889,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
        unsigned int i;
        int ret;
        bool notify;
+       struct fuse_pqueue *fpq;
 
        /* Does the sglist fit on the stack? */
        total_sgs = sg_count_fuse_req(req);
@@ -889,7 +944,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
                goto out;
        }
 
-       fsvq->in_flight++;
+       /* Request successfully sent. */
+       fpq = &fsvq->fud->pq;
+       spin_lock(&fpq->lock);
+       list_add_tail(&req->list, fpq->processing);
+       spin_unlock(&fpq->lock);
+       set_bit(FR_SENT, &req->flags);
+       /* matches barrier in request_wait_answer() */
+       smp_mb__after_atomic();
+
+       if (!in_flight)
+               inc_in_flight_req(fsvq);
        notify = virtqueue_kick_prepare(vq);
 
        spin_unlock(&fsvq->lock);
@@ -915,9 +980,8 @@ __releases(fiq->lock)
 {
        unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
        struct virtio_fs *fs;
-       struct fuse_conn *fc;
        struct fuse_req *req;
-       struct fuse_pqueue *fpq;
+       struct virtio_fs_vq *fsvq;
        int ret;
 
        WARN_ON(list_empty(&fiq->pending));
@@ -928,44 +992,36 @@ __releases(fiq->lock)
        spin_unlock(&fiq->lock);
 
        fs = fiq->priv;
-       fc = fs->vqs[queue_id].fud->fc;
 
        pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
                  __func__, req->in.h.opcode, req->in.h.unique,
                 req->in.h.nodeid, req->in.h.len,
                 fuse_len_args(req->args->out_numargs, req->args->out_args));
 
-       fpq = &fs->vqs[queue_id].fud->pq;
-       spin_lock(&fpq->lock);
-       if (!fpq->connected) {
-               spin_unlock(&fpq->lock);
-               req->out.h.error = -ENODEV;
-               pr_err("virtio-fs: %s disconnected\n", __func__);
-               fuse_request_end(fc, req);
-               return;
-       }
-       list_add_tail(&req->list, fpq->processing);
-       spin_unlock(&fpq->lock);
-       set_bit(FR_SENT, &req->flags);
-       /* matches barrier in request_wait_answer() */
-       smp_mb__after_atomic();
-
-retry:
-       ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req);
+       fsvq = &fs->vqs[queue_id];
+       ret = virtio_fs_enqueue_req(fsvq, req, false);
        if (ret < 0) {
                if (ret == -ENOMEM || ret == -ENOSPC) {
-                       /* Virtqueue full. Retry submission */
-                       /* TODO use completion instead of timeout */
-                       usleep_range(20, 30);
-                       goto retry;
+                       /*
+                        * Virtqueue full. Retry submission from worker
+                        * context as we might be holding fc->bg_lock.
+                        */
+                       spin_lock(&fsvq->lock);
+                       list_add_tail(&req->list, &fsvq->queued_reqs);
+                       inc_in_flight_req(fsvq);
+                       schedule_delayed_work(&fsvq->dispatch_work,
+                                               msecs_to_jiffies(1));
+                       spin_unlock(&fsvq->lock);
+                       return;
                }
                req->out.h.error = ret;
                pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
-               spin_lock(&fpq->lock);
-               clear_bit(FR_SENT, &req->flags);
-               list_del_init(&req->list);
-               spin_unlock(&fpq->lock);
-               fuse_request_end(fc, req);
+
+               /* Can't end request in submission context. Use a worker */
+               spin_lock(&fsvq->lock);
+               list_add_tail(&req->list, &fsvq->end_reqs);
+               schedule_delayed_work(&fsvq->dispatch_work, 0);
+               spin_unlock(&fsvq->lock);
                return;
        }
 }
@@ -992,6 +1048,7 @@ static int virtio_fs_fill_super(struct super_block *sb)
                .destroy = true,
                .no_control = true,
                .no_force_umount = true,
+               .no_mount_options = true,
        };
 
        mutex_lock(&virtio_fs_mutex);
index 681b446..18daf49 100644 (file)
@@ -1540,17 +1540,23 @@ static int gfs2_init_fs_context(struct fs_context *fc)
 {
        struct gfs2_args *args;
 
-       args = kzalloc(sizeof(*args), GFP_KERNEL);
+       args = kmalloc(sizeof(*args), GFP_KERNEL);
        if (args == NULL)
                return -ENOMEM;
 
-       args->ar_quota = GFS2_QUOTA_DEFAULT;
-       args->ar_data = GFS2_DATA_DEFAULT;
-       args->ar_commit = 30;
-       args->ar_statfs_quantum = 30;
-       args->ar_quota_quantum = 60;
-       args->ar_errors = GFS2_ERRORS_DEFAULT;
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+               struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
 
+               *args = sdp->sd_args;
+       } else {
+               memset(args, 0, sizeof(*args));
+               args->ar_quota = GFS2_QUOTA_DEFAULT;
+               args->ar_data = GFS2_DATA_DEFAULT;
+               args->ar_commit = 30;
+               args->ar_statfs_quantum = 30;
+               args->ar_quota_quantum = 60;
+               args->ar_errors = GFS2_ERRORS_DEFAULT;
+       }
        fc->fs_private = args;
        fc->ops = &gfs2_context_ops;
        return 0;
@@ -1600,6 +1606,7 @@ static int gfs2_meta_get_tree(struct fs_context *fc)
 }
 
 static const struct fs_context_operations gfs2_meta_context_ops = {
+       .free        = gfs2_fc_free,
        .get_tree    = gfs2_meta_get_tree,
 };
 
index 67dbe02..2c819c3 100644 (file)
@@ -197,6 +197,7 @@ struct io_ring_ctx {
                unsigned                sq_entries;
                unsigned                sq_mask;
                unsigned                sq_thread_idle;
+               unsigned                cached_sq_dropped;
                struct io_uring_sqe     *sq_sqes;
 
                struct list_head        defer_list;
@@ -212,6 +213,7 @@ struct io_ring_ctx {
 
        struct {
                unsigned                cached_cq_tail;
+               atomic_t                cached_cq_overflow;
                unsigned                cq_entries;
                unsigned                cq_mask;
                struct wait_queue_head  cq_wait;
@@ -324,6 +326,7 @@ struct io_kiocb {
 #define REQ_F_TIMEOUT          1024    /* timeout request */
 #define REQ_F_ISREG            2048    /* regular file */
 #define REQ_F_MUST_PUNT                4096    /* must be punted even for NONBLOCK */
+#define REQ_F_TIMEOUT_NOSEQ    8192    /* no timeout sequence */
        u64                     user_data;
        u32                     result;
        u32                     sequence;
@@ -420,7 +423,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
                                       struct io_kiocb *req)
 {
-       return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+       return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+                                       + atomic_read(&ctx->cached_cq_overflow);
 }
 
 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
@@ -450,9 +454,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
        struct io_kiocb *req;
 
        req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
-       if (req && !__io_sequence_defer(ctx, req)) {
-               list_del_init(&req->list);
-               return req;
+       if (req) {
+               if (req->flags & REQ_F_TIMEOUT_NOSEQ)
+                       return NULL;
+               if (!__io_sequence_defer(ctx, req)) {
+                       list_del_init(&req->list);
+                       return req;
+               }
        }
 
        return NULL;
@@ -567,9 +575,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, 0);
        } else {
-               unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
-
-               WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
+               WRITE_ONCE(ctx->rings->cq_overflow,
+                               atomic_inc_return(&ctx->cached_cq_overflow));
        }
 }
 
@@ -735,6 +742,14 @@ static unsigned io_cqring_events(struct io_rings *rings)
        return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
 }
 
+static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+{
+       struct io_rings *rings = ctx->rings;
+
+       /* make sure SQ entry isn't read before tail */
+       return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+}
+
 /*
  * Find and free completed poll iocbs
  */
@@ -864,19 +879,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
        mutex_unlock(&ctx->uring_lock);
 }
 
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
-                          long min)
+static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+                           long min)
 {
-       int iters, ret = 0;
-
-       /*
-        * We disallow the app entering submit/complete with polling, but we
-        * still need to lock the ring to prevent racing with polled issue
-        * that got punted to a workqueue.
-        */
-       mutex_lock(&ctx->uring_lock);
+       int iters = 0, ret = 0;
 
-       iters = 0;
        do {
                int tmin = 0;
 
@@ -912,6 +919,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
                ret = 0;
        } while (min && !*nr_events && !need_resched());
 
+       return ret;
+}
+
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+                          long min)
+{
+       int ret;
+
+       /*
+        * We disallow the app entering submit/complete with polling, but we
+        * still need to lock the ring to prevent racing with polled issue
+        * that got punted to a workqueue.
+        */
+       mutex_lock(&ctx->uring_lock);
+       ret = __io_iopoll_check(ctx, nr_events, min);
        mutex_unlock(&ctx->uring_lock);
        return ret;
 }
@@ -1107,6 +1129,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
 
                kiocb->ki_flags |= IOCB_HIPRI;
                kiocb->ki_complete = io_complete_rw_iopoll;
+               req->result = 0;
        } else {
                if (kiocb->ki_flags & IOCB_HIPRI)
                        return -EINVAL;
@@ -1207,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
                }
        }
 
-       return 0;
+       return len;
 }
 
 static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
@@ -1877,7 +1900,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 {
        struct io_ring_ctx *ctx;
-       struct io_kiocb *req;
+       struct io_kiocb *req, *prev;
        unsigned long flags;
 
        req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1885,6 +1908,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        atomic_inc(&ctx->cq_timeouts);
 
        spin_lock_irqsave(&ctx->completion_lock, flags);
+       /*
+        * Adjust the reqs sequence before the current one because it
+        * will consume a slot in the cq_ring and the the cq_tail pointer
+        * will be increased, otherwise other timeout reqs may return in
+        * advance without waiting for enough wait_nr.
+        */
+       prev = req;
+       list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+               prev->sequence++;
        list_del(&req->list);
 
        io_cqring_fill_event(ctx, req->user_data, -ETIME);
@@ -1903,6 +1935,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        struct io_ring_ctx *ctx = req->ctx;
        struct list_head *entry;
        struct timespec64 ts;
+       unsigned span = 0;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -1913,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
                return -EFAULT;
 
+       req->flags |= REQ_F_TIMEOUT;
+
        /*
         * sqe->off holds how many events that need to occur for this
-        * timeout event to be satisfied.
+        * timeout event to be satisfied. If it isn't set, then this is
+        * a pure timeout request, sequence isn't used.
         */
        count = READ_ONCE(sqe->off);
-       if (!count)
-               count = 1;
+       if (!count) {
+               req->flags |= REQ_F_TIMEOUT_NOSEQ;
+               spin_lock_irq(&ctx->completion_lock);
+               entry = ctx->timeout_list.prev;
+               goto add;
+       }
 
        req->sequence = ctx->cached_sq_head + count - 1;
        /* reuse it to store the count */
        req->submit.sequence = count;
-       req->flags |= REQ_F_TIMEOUT;
 
        /*
         * Insertion sort, ensuring the first entry in the list is always
@@ -1936,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                unsigned nxt_sq_head;
                long long tmp, tmp_nxt;
 
+               if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+                       continue;
+
                /*
                 * Since cached_sq_head + count - 1 can overflow, use type long
                 * long to store it.
@@ -1951,9 +1993,18 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                if (ctx->cached_sq_head < nxt_sq_head)
                        tmp += UINT_MAX;
 
-               if (tmp >= tmp_nxt)
+               if (tmp > tmp_nxt)
                        break;
+
+               /*
+                * Sequence of reqs after the insert one and itself should
+                * be adjusted because each timeout req consumes a slot.
+                */
+               span++;
+               nxt->sequence++;
        }
+       req->sequence -= span;
+add:
        list_add(&req->list, entry);
        spin_unlock_irq(&ctx->completion_lock);
 
@@ -2247,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
        switch (op) {
        case IORING_OP_NOP:
        case IORING_OP_POLL_REMOVE:
+       case IORING_OP_TIMEOUT:
                return false;
        default:
                return true;
@@ -2292,11 +2344,11 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
 }
 
 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       struct sqe_submit *s, bool force_nonblock)
+                       struct sqe_submit *s)
 {
        int ret;
 
-       ret = __io_submit_sqe(ctx, req, s, force_nonblock);
+       ret = __io_submit_sqe(ctx, req, s, true);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2343,7 +2395,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 }
 
 static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       struct sqe_submit *s, bool force_nonblock)
+                       struct sqe_submit *s)
 {
        int ret;
 
@@ -2356,18 +2408,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
                return 0;
        }
 
-       return __io_queue_sqe(ctx, req, s, force_nonblock);
+       return __io_queue_sqe(ctx, req, s);
 }
 
 static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                             struct sqe_submit *s, struct io_kiocb *shadow,
-                             bool force_nonblock)
+                             struct sqe_submit *s, struct io_kiocb *shadow)
 {
        int ret;
        int need_submit = false;
 
        if (!shadow)
-               return io_queue_sqe(ctx, req, s, force_nonblock);
+               return io_queue_sqe(ctx, req, s);
 
        /*
         * Mark the first IO in link list as DRAIN, let all the following
@@ -2379,6 +2430,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_free_req(req);
+                       __io_free_req(shadow);
                        io_cqring_add_event(ctx, s->sqe->user_data, ret);
                        return 0;
                }
@@ -2396,7 +2448,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
        spin_unlock_irq(&ctx->completion_lock);
 
        if (need_submit)
-               return __io_queue_sqe(ctx, req, s, force_nonblock);
+               return __io_queue_sqe(ctx, req, s);
 
        return 0;
 }
@@ -2404,8 +2456,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
 
 static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
-                         struct io_submit_state *state, struct io_kiocb **link,
-                         bool force_nonblock)
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_uring_sqe *sqe_copy;
        struct io_kiocb *req;
@@ -2432,6 +2483,8 @@ err:
                return;
        }
 
+       req->user_data = s->sqe->user_data;
+
        /*
         * If we already have a head request, queue this one for async
         * submittal once the head completes. If we don't have a head but
@@ -2458,7 +2511,7 @@ err:
                INIT_LIST_HEAD(&req->link_list);
                *link = req;
        } else {
-               io_queue_sqe(ctx, req, s, force_nonblock);
+               io_queue_sqe(ctx, req, s);
        }
 }
 
@@ -2538,12 +2591,13 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
 
        /* drop invalid entries */
        ctx->cached_sq_head++;
-       rings->sq_dropped++;
+       ctx->cached_sq_dropped++;
+       WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
        return false;
 }
 
-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
-                         unsigned int nr, bool has_user, bool mm_fault)
+static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+                         bool has_user, bool mm_fault)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
@@ -2557,19 +2611,23 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
        }
 
        for (i = 0; i < nr; i++) {
+               struct sqe_submit s;
+
+               if (!io_get_sqring(ctx, &s))
+                       break;
+
                /*
                 * If previous wasn't linked and we have a linked command,
                 * that's the end of the chain. Submit the previous link.
                 */
                if (!prev_was_link && link) {
-                       io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                               true);
+                       io_queue_link_head(ctx, link, &link->submit, shadow_req);
                        link = NULL;
                        shadow_req = NULL;
                }
-               prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
+               prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
 
-               if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
+               if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
                        if (!shadow_req) {
                                shadow_req = io_get_req(ctx, NULL);
                                if (unlikely(!shadow_req))
@@ -2577,24 +2635,24 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
                                shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
                                refcount_dec(&shadow_req->refs);
                        }
-                       shadow_req->sequence = sqes[i].sequence;
+                       shadow_req->sequence = s.sequence;
                }
 
 out:
                if (unlikely(mm_fault)) {
-                       io_cqring_add_event(ctx, sqes[i].sqe->user_data,
+                       io_cqring_add_event(ctx, s.sqe->user_data,
                                                -EFAULT);
                } else {
-                       sqes[i].has_user = has_user;
-                       sqes[i].needs_lock = true;
-                       sqes[i].needs_fixed_file = true;
-                       io_submit_sqe(ctx, &sqes[i], statep, &link, true);
+                       s.has_user = has_user;
+                       s.needs_lock = true;
+                       s.needs_fixed_file = true;
+                       io_submit_sqe(ctx, &s, statep, &link);
                        submitted++;
                }
        }
 
        if (link)
-               io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
+               io_queue_link_head(ctx, link, &link->submit, shadow_req);
        if (statep)
                io_submit_state_end(&state);
 
@@ -2603,7 +2661,6 @@ out:
 
 static int io_sq_thread(void *data)
 {
-       struct sqe_submit sqes[IO_IOPOLL_BATCH];
        struct io_ring_ctx *ctx = data;
        struct mm_struct *cur_mm = NULL;
        mm_segment_t old_fs;
@@ -2618,14 +2675,27 @@ static int io_sq_thread(void *data)
 
        timeout = inflight = 0;
        while (!kthread_should_park()) {
-               bool all_fixed, mm_fault = false;
-               int i;
+               bool mm_fault = false;
+               unsigned int to_submit;
 
                if (inflight) {
                        unsigned nr_events = 0;
 
                        if (ctx->flags & IORING_SETUP_IOPOLL) {
-                               io_iopoll_check(ctx, &nr_events, 0);
+                               /*
+                                * inflight is the count of the maximum possible
+                                * entries we submitted, but it can be smaller
+                                * if we dropped some of them. If we don't have
+                                * poll entries available, then we know that we
+                                * have nothing left to poll for. Reset the
+                                * inflight count to zero in that case.
+                                */
+                               mutex_lock(&ctx->uring_lock);
+                               if (!list_empty(&ctx->poll_list))
+                                       __io_iopoll_check(ctx, &nr_events, 0);
+                               else
+                                       inflight = 0;
+                               mutex_unlock(&ctx->uring_lock);
                        } else {
                                /*
                                 * Normal IO, just pretend everything completed.
@@ -2639,7 +2709,8 @@ static int io_sq_thread(void *data)
                                timeout = jiffies + ctx->sq_thread_idle;
                }
 
-               if (!io_get_sqring(ctx, &sqes[0])) {
+               to_submit = io_sqring_entries(ctx);
+               if (!to_submit) {
                        /*
                         * We're polling. If we're within the defined idle
                         * period, then let us spin without work before going
@@ -2670,7 +2741,8 @@ static int io_sq_thread(void *data)
                        /* make sure to read SQ tail after writing flags */
                        smp_mb();
 
-                       if (!io_get_sqring(ctx, &sqes[0])) {
+                       to_submit = io_sqring_entries(ctx);
+                       if (!to_submit) {
                                if (kthread_should_park()) {
                                        finish_wait(&ctx->sqo_wait, &wait);
                                        break;
@@ -2688,19 +2760,8 @@ static int io_sq_thread(void *data)
                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
                }
 
-               i = 0;
-               all_fixed = true;
-               do {
-                       if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
-                               all_fixed = false;
-
-                       i++;
-                       if (i == ARRAY_SIZE(sqes))
-                               break;
-               } while (io_get_sqring(ctx, &sqes[i]));
-
                /* Unless all new commands are FIXED regions, grab mm */
-               if (!all_fixed && !cur_mm) {
+               if (!cur_mm) {
                        mm_fault = !mmget_not_zero(ctx->sqo_mm);
                        if (!mm_fault) {
                                use_mm(ctx->sqo_mm);
@@ -2708,8 +2769,9 @@ static int io_sq_thread(void *data)
                        }
                }
 
-               inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
-                                               mm_fault);
+               to_submit = min(to_submit, ctx->sq_entries);
+               inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
+                                          mm_fault);
 
                /* Commit SQ ring head once we've consumed all SQEs */
                io_commit_sqring(ctx);
@@ -2726,8 +2788,7 @@ static int io_sq_thread(void *data)
        return 0;
 }
 
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
-                         bool block_for_last)
+static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
@@ -2741,7 +2802,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
        }
 
        for (i = 0; i < to_submit; i++) {
-               bool force_nonblock = true;
                struct sqe_submit s;
 
                if (!io_get_sqring(ctx, &s))
@@ -2752,8 +2812,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
                 * that's the end of the chain. Submit the previous link.
                 */
                if (!prev_was_link && link) {
-                       io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                               force_nonblock);
+                       io_queue_link_head(ctx, link, &link->submit, shadow_req);
                        link = NULL;
                        shadow_req = NULL;
                }
@@ -2775,27 +2834,16 @@ out:
                s.needs_lock = false;
                s.needs_fixed_file = false;
                submit++;
-
-               /*
-                * The caller will block for events after submit, submit the
-                * last IO non-blocking. This is either the only IO it's
-                * submitting, or it already submitted the previous ones. This
-                * improves performance by avoiding an async punt that we don't
-                * need to do.
-                */
-               if (block_for_last && submit == to_submit)
-                       force_nonblock = false;
-
-               io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
+               io_submit_sqe(ctx, &s, statep, &link);
        }
-       io_commit_sqring(ctx);
 
        if (link)
-               io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                       !block_for_last);
+               io_queue_link_head(ctx, link, &link->submit, shadow_req);
        if (statep)
                io_submit_state_end(statep);
 
+       io_commit_sqring(ctx);
+
        return submit;
 }
 
@@ -3636,21 +3684,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        wake_up(&ctx->sqo_wait);
                submitted = to_submit;
        } else if (to_submit) {
-               bool block_for_last = false;
-
                to_submit = min(to_submit, ctx->sq_entries);
 
-               /*
-                * Allow last submission to block in a series, IFF the caller
-                * asked to wait for events and we don't currently have
-                * enough. This potentially avoids an async punt.
-                */
-               if (to_submit == min_complete &&
-                   io_cqring_events(ctx->rings) < min_complete)
-                       block_for_last = true;
-
                mutex_lock(&ctx->uring_lock);
-               submitted = io_ring_submit(ctx, to_submit, block_for_last);
+               submitted = io_ring_submit(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
        }
        if (flags & IORING_ENTER_GETEVENTS) {
@@ -3809,10 +3846,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        if (ret)
                goto err;
 
-       ret = io_uring_get_fd(ctx);
-       if (ret < 0)
-               goto err;
-
        memset(&p->sq_off, 0, sizeof(p->sq_off));
        p->sq_off.head = offsetof(struct io_rings, sq.head);
        p->sq_off.tail = offsetof(struct io_rings, sq.tail);
@@ -3830,6 +3863,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
        p->cq_off.cqes = offsetof(struct io_rings, cqes);
 
+       /*
+        * Install ring fd as the very last thing, so we don't risk someone
+        * having closed it before we finish setup
+        */
+       ret = io_uring_get_fd(ctx);
+       if (ret < 0)
+               goto err;
+
        p->features = IORING_FEAT_SINGLE_MMAP;
        return ret;
 err:
index fe0e9e1..2adfe7b 100644 (file)
@@ -2478,8 +2478,10 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
 
                time64_to_tm(sb->s_time_max, 0, &tm);
 
-               pr_warn("Mounted %s file system at %s supports timestamps until %04ld (0x%llx)\n",
-                       sb->s_type->name, mntpath,
+               pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
+                       sb->s_type->name,
+                       is_mounted(mnt) ? "remounted" : "mounted",
+                       mntpath,
                        tm.tm_year+1900, (unsigned long long)sb->s_time_max);
 
                free_page((unsigned long)buf);
@@ -2764,14 +2766,11 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
        if (IS_ERR(mnt))
                return PTR_ERR(mnt);
 
-       error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
-       if (error < 0) {
-               mntput(mnt);
-               return error;
-       }
-
        mnt_warn_timestamp_expiry(mountpoint, mnt);
 
+       error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
+       if (error < 0)
+               mntput(mnt);
        return error;
 }
 
index 071b90a..af549d7 100644 (file)
@@ -53,6 +53,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
        return false;
 }
 
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+
+       delegation = rcu_dereference(NFS_I(inode)->delegation);
+       if (nfs4_is_valid_delegation(delegation, 0))
+               return delegation;
+       return NULL;
+}
+
 static int
 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
 {
@@ -1181,7 +1191,7 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
        if (delegation != NULL &&
            nfs4_stateid_match_other(dst, &delegation->stateid)) {
                dst->seqid = delegation->stateid.seqid;
-               return ret;
+               ret = true;
        }
        rcu_read_unlock();
 out:
index 9eb87ae..8b14d44 100644 (file)
@@ -68,6 +68,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
 
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
 int nfs4_check_delegation(struct inode *inode, fmode_t flags);
index ab8ca20..caacf5e 100644 (file)
@@ -1440,8 +1440,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
                return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
-       if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
-               return 0;
        switch (claim) {
        case NFS4_OPEN_CLAIM_NULL:
        case NFS4_OPEN_CLAIM_FH:
@@ -1810,7 +1808,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
 {
        struct nfs4_state *state = opendata->state;
-       struct nfs_inode *nfsi = NFS_I(state->inode);
        struct nfs_delegation *delegation;
        int open_mode = opendata->o_arg.open_flags;
        fmode_t fmode = opendata->o_arg.fmode;
@@ -1827,7 +1824,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                spin_unlock(&state->owner->so_lock);
                rcu_read_lock();
-               delegation = rcu_dereference(nfsi->delegation);
+               delegation = nfs4_get_valid_delegation(state->inode);
                if (!can_open_delegated(delegation, fmode, claim)) {
                        rcu_read_unlock();
                        break;
@@ -2371,7 +2368,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                                        data->o_arg.open_flags, claim))
                        goto out_no_action;
                rcu_read_lock();
-               delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+               delegation = nfs4_get_valid_delegation(data->state->inode);
                if (can_open_delegated(delegation, data->o_arg.fmode, claim))
                        goto unlock_no_action;
                rcu_read_unlock();
index 53939bf..9876db5 100644 (file)
@@ -2098,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
        return 0;
 }
 
-static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
-                                           struct file *file,
-                                           loff_t pos, size_t count,
-                                           int *meta_level)
+static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
+                                           struct buffer_head **di_bh,
+                                           int meta_level,
+                                           int overwrite_io,
+                                           int write_sem,
+                                           int wait)
 {
-       int ret;
-       struct buffer_head *di_bh = NULL;
-       u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-       u32 clusters =
-               ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+       int ret = 0;
 
-       ret = ocfs2_inode_lock(inode, &di_bh, 1);
-       if (ret) {
-               mlog_errno(ret);
+       if (wait)
+               ret = ocfs2_inode_lock(inode, NULL, meta_level);
+       else
+               ret = ocfs2_try_inode_lock(inode,
+                       overwrite_io ? NULL : di_bh, meta_level);
+       if (ret < 0)
                goto out;
+
+       if (wait) {
+               if (write_sem)
+                       down_write(&OCFS2_I(inode)->ip_alloc_sem);
+               else
+                       down_read(&OCFS2_I(inode)->ip_alloc_sem);
+       } else {
+               if (write_sem)
+                       ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+               else
+                       ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+
+               if (!ret) {
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
        }
 
-       *meta_level = 1;
+       return ret;
 
-       ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
-       if (ret)
-               mlog_errno(ret);
+out_unlock:
+       brelse(*di_bh);
+       ocfs2_inode_unlock(inode, meta_level);
 out:
-       brelse(di_bh);
        return ret;
 }
 
+static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
+                                              struct buffer_head **di_bh,
+                                              int meta_level,
+                                              int write_sem)
+{
+       if (write_sem)
+               up_write(&OCFS2_I(inode)->ip_alloc_sem);
+       else
+               up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       brelse(*di_bh);
+       *di_bh = NULL;
+
+       if (meta_level >= 0)
+               ocfs2_inode_unlock(inode, meta_level);
+}
+
 static int ocfs2_prepare_inode_for_write(struct file *file,
                                         loff_t pos, size_t count, int wait)
 {
        int ret = 0, meta_level = 0, overwrite_io = 0;
+       int write_sem = 0;
        struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = d_inode(dentry);
        struct buffer_head *di_bh = NULL;
+       u32 cpos;
+       u32 clusters;
 
        /*
         * We start with a read level meta lock and only jump to an ex
         * if we need to make modifications here.
         */
        for(;;) {
-               if (wait)
-                       ret = ocfs2_inode_lock(inode, NULL, meta_level);
-               else
-                       ret = ocfs2_try_inode_lock(inode,
-                               overwrite_io ? NULL : &di_bh, meta_level);
+               ret = ocfs2_inode_lock_for_extent_tree(inode,
+                                                      &di_bh,
+                                                      meta_level,
+                                                      overwrite_io,
+                                                      write_sem,
+                                                      wait);
                if (ret < 0) {
-                       meta_level = -1;
                        if (ret != -EAGAIN)
                                mlog_errno(ret);
                        goto out;
@@ -2156,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                 */
                if (!wait && !overwrite_io) {
                        overwrite_io = 1;
-                       if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
-                               ret = -EAGAIN;
-                               goto out_unlock;
-                       }
 
                        ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
-                       brelse(di_bh);
-                       di_bh = NULL;
-                       up_read(&OCFS2_I(inode)->ip_alloc_sem);
                        if (ret < 0) {
                                if (ret != -EAGAIN)
                                        mlog_errno(ret);
@@ -2183,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                 * set inode->i_size at the end of a write. */
                if (should_remove_suid(dentry)) {
                        if (meta_level == 0) {
-                               ocfs2_inode_unlock(inode, meta_level);
+                               ocfs2_inode_unlock_for_extent_tree(inode,
+                                                                  &di_bh,
+                                                                  meta_level,
+                                                                  write_sem);
                                meta_level = 1;
                                continue;
                        }
@@ -2197,18 +2229,32 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
 
                ret = ocfs2_check_range_for_refcount(inode, pos, count);
                if (ret == 1) {
-                       ocfs2_inode_unlock(inode, meta_level);
-                       meta_level = -1;
-
-                       ret = ocfs2_prepare_inode_for_refcount(inode,
-                                                              file,
-                                                              pos,
-                                                              count,
-                                                              &meta_level);
+                       ocfs2_inode_unlock_for_extent_tree(inode,
+                                                          &di_bh,
+                                                          meta_level,
+                                                          write_sem);
+                       ret = ocfs2_inode_lock_for_extent_tree(inode,
+                                                              &di_bh,
+                                                              meta_level,
+                                                              overwrite_io,
+                                                              1,
+                                                              wait);
+                       write_sem = 1;
+                       if (ret < 0) {
+                               if (ret != -EAGAIN)
+                                       mlog_errno(ret);
+                               goto out;
+                       }
+
+                       cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+                       clusters =
+                               ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+                       ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
                }
 
                if (ret < 0) {
-                       mlog_errno(ret);
+                       if (ret != -EAGAIN)
+                               mlog_errno(ret);
                        goto out_unlock;
                }
 
@@ -2219,10 +2265,10 @@ out_unlock:
        trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
                                            pos, count, wait);
 
-       brelse(di_bh);
-
-       if (meta_level >= 0)
-               ocfs2_inode_unlock(inode, meta_level);
+       ocfs2_inode_unlock_for_extent_tree(inode,
+                                          &di_bh,
+                                          meta_level,
+                                          write_sem);
 
 out:
        return ret;
index f936033..4780517 100644 (file)
@@ -232,8 +232,8 @@ struct acpi_processor {
        struct acpi_processor_limit limit;
        struct thermal_cooling_device *cdev;
        struct device *dev; /* Processor device. */
-       struct dev_pm_qos_request perflib_req;
-       struct dev_pm_qos_request thermal_req;
+       struct freq_qos_request perflib_req;
+       struct freq_qos_request thermal_req;
 };
 
 struct acpi_processor_errata {
@@ -302,8 +302,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
 #ifdef CONFIG_CPU_FREQ
 extern bool acpi_processor_cpufreq_init;
 void acpi_processor_ignore_ppc_init(void);
-void acpi_processor_ppc_init(int cpu);
-void acpi_processor_ppc_exit(int cpu);
+void acpi_processor_ppc_init(struct cpufreq_policy *policy);
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
 extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
 #else
@@ -311,11 +311,11 @@ static inline void acpi_processor_ignore_ppc_init(void)
 {
        return;
 }
-static inline void acpi_processor_ppc_init(int cpu)
+static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
 {
        return;
 }
-static inline void acpi_processor_ppc_exit(int cpu)
+static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
 {
        return;
 }
@@ -431,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
 #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
-void acpi_thermal_cpufreq_init(int cpu);
-void acpi_thermal_cpufreq_exit(int cpu);
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
 #else
-static inline void acpi_thermal_cpufreq_init(int cpu)
+static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 {
        return;
 }
-static inline void acpi_thermal_cpufreq_exit(int cpu)
+static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 {
        return;
 }
index e94b197..ce41032 100644 (file)
@@ -25,13 +25,6 @@ static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
 }
 #endif /* __arch_get_clock_mode */
 
-#ifndef __arch_use_vsyscall
-static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata)
-{
-       return 1;
-}
-#endif /* __arch_use_vsyscall */
-
 #ifndef __arch_update_vsyscall
 static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
                                                   struct timekeeper *tk)
index 01f5145..7865e6b 100644 (file)
@@ -44,7 +44,20 @@ struct drm_gem_shmem_object {
         */
        unsigned int pages_use_count;
 
+       /**
+        * @madv: State for madvise
+        *
+        * 0 is active/inuse.
+        * A negative value is the object is purged.
+        * Positive values are driver specific and not used by the helpers.
+        */
        int madv;
+
+       /**
+        * @madv_list: List entry for madvise tracking
+        *
+        * Typically used by drivers to track purgeable objects
+        */
        struct list_head madv_list;
 
        /**
index 5b79d25..520235c 100644 (file)
@@ -13,7 +13,8 @@ struct drm_crtc;
 
 void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
 void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
-                                             unsigned int commit_time_ms);
+                                       unsigned int commit_time_ms,
+                                       unsigned int new_self_refresh_mask);
 
 int drm_self_refresh_helper_init(struct drm_crtc *crtc);
 void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
diff --git a/include/dt-bindings/pmu/exynos_ppmu.h b/include/dt-bindings/pmu/exynos_ppmu.h
new file mode 100644 (file)
index 0000000..8724abe
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Samsung Exynos PPMU event types for counting in regs
+ *
+ * Copyright (c) 2019, Samsung Electronics
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+#define __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+
+#define PPMU_RO_BUSY_CYCLE_CNT         0x0
+#define PPMU_WO_BUSY_CYCLE_CNT         0x1
+#define PPMU_RW_BUSY_CYCLE_CNT         0x2
+#define PPMU_RO_REQUEST_CNT            0x3
+#define PPMU_WO_REQUEST_CNT            0x4
+#define PPMU_RO_DATA_CNT               0x5
+#define PPMU_WO_DATA_CNT               0x6
+#define PPMU_RO_LATENCY                        0x12
+#define PPMU_WO_LATENCY                        0x16
+#define PPMU_V2_RO_DATA_CNT            0x4
+#define PPMU_V2_WO_DATA_CNT            0x5
+#define PPMU_V2_EVT3_RW_DATA_CNT       0x22
+
+#endif
index 5b9d223..3bf3835 100644 (file)
@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
+int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
 void bpf_map_charge_finish(struct bpf_map_memory *mem);
 void bpf_map_charge_move(struct bpf_map_memory *dst,
                         struct bpf_map_memory *src);
-void *bpf_map_area_alloc(size_t size, int numa_node);
+void *bpf_map_area_alloc(u64 size, int numa_node);
 void bpf_map_area_free(void *base);
 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 
index 8339071..e20a0cd 100644 (file)
@@ -65,5 +65,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
                              void *data);
 
 extern int can_send(struct sk_buff *skb, int loop);
+void can_sock_destruct(struct sock *sk);
 
 #endif /* !_CAN_CORE_H */
index d0633eb..1ca2baf 100644 (file)
@@ -59,6 +59,11 @@ extern ssize_t cpu_show_l1tf(struct device *dev,
                             struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_mds(struct device *dev,
                            struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+                                     struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -179,7 +184,12 @@ void arch_cpu_idle_dead(void);
 int cpu_report_state(int cpu);
 int cpu_check_up_prepare(int cpu);
 void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_us);
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
+
+static inline void play_idle(unsigned long duration_us)
+{
+       play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+}
 
 #ifdef CONFIG_HOTPLUG_CPU
 bool cpu_wait_death(unsigned int cpu, int seconds);
@@ -213,28 +223,7 @@ static inline int cpuhp_smt_enable(void) { return 0; }
 static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
 #endif
 
-/*
- * These are used for a global "mitigations=" cmdline option for toggling
- * optional CPU mitigations.
- */
-enum cpu_mitigations {
-       CPU_MITIGATIONS_OFF,
-       CPU_MITIGATIONS_AUTO,
-       CPU_MITIGATIONS_AUTO_NOSMT,
-};
-
-extern enum cpu_mitigations cpu_mitigations;
-
-/* mitigations=off */
-static inline bool cpu_mitigations_off(void)
-{
-       return cpu_mitigations == CPU_MITIGATIONS_OFF;
-}
-
-/* mitigations=auto,nosmt */
-static inline bool cpu_mitigations_auto_nosmt(void)
-{
-       return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
-}
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
 
 #endif /* _LINUX_CPU_H_ */
index c57e88e..92d5fdc 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/completion.h>
 #include <linux/kobject.h>
 #include <linux/notifier.h>
+#include <linux/pm_qos.h>
 #include <linux/spinlock.h>
 #include <linux/sysfs.h>
 
@@ -76,8 +77,10 @@ struct cpufreq_policy {
        struct work_struct      update; /* if update_policy() needs to be
                                         * called, but you're in IRQ context */
 
-       struct dev_pm_qos_request *min_freq_req;
-       struct dev_pm_qos_request *max_freq_req;
+       struct freq_constraints constraints;
+       struct freq_qos_request *min_freq_req;
+       struct freq_qos_request *max_freq_req;
+
        struct cpufreq_frequency_table  *freq_table;
        enum cpufreq_table_sorting freq_table_sorted;
 
index 4b6b5be..2dbe46b 100644 (file)
@@ -29,10 +29,13 @@ struct cpuidle_driver;
  * CPUIDLE DEVICE INTERFACE *
  ****************************/
 
+#define CPUIDLE_STATE_DISABLED_BY_USER         BIT(0)
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER       BIT(1)
+
 struct cpuidle_state_usage {
        unsigned long long      disable;
        unsigned long long      usage;
-       unsigned long long      time; /* in US */
+       u64                     time_ns;
        unsigned long long      above; /* Number of times it's been too deep */
        unsigned long long      below; /* Number of times it's been too shallow */
 #ifdef CONFIG_SUSPEND
@@ -45,6 +48,8 @@ struct cpuidle_state {
        char            name[CPUIDLE_NAME_LEN];
        char            desc[CPUIDLE_DESC_LEN];
 
+       u64             exit_latency_ns;
+       u64             target_residency_ns;
        unsigned int    flags;
        unsigned int    exit_latency; /* in US */
        int             power_usage; /* in mW */
@@ -80,14 +85,14 @@ struct cpuidle_driver_kobj;
 struct cpuidle_device {
        unsigned int            registered:1;
        unsigned int            enabled:1;
-       unsigned int            use_deepest_state:1;
        unsigned int            poll_time_limit:1;
        unsigned int            cpu;
        ktime_t                 next_hrtimer;
 
        int                     last_state_idx;
-       int                     last_residency;
+       u64                     last_residency_ns;
        u64                     poll_limit_ns;
+       u64                     forced_idle_latency_limit_ns;
        struct cpuidle_state_usage      states_usage[CPUIDLE_STATE_MAX];
        struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
        struct cpuidle_driver_kobj *kobj_driver;
@@ -144,6 +149,8 @@ extern int cpuidle_register_driver(struct cpuidle_driver *drv);
 extern struct cpuidle_driver *cpuidle_get_driver(void);
 extern struct cpuidle_driver *cpuidle_driver_ref(void);
 extern void cpuidle_driver_unref(void);
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+                                       bool disable);
 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
 extern int cpuidle_register_device(struct cpuidle_device *dev);
 extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -181,6 +188,8 @@ static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
 static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
 static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
+                                              int idx, bool disable) { }
 static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
 static inline int cpuidle_register_device(struct cpuidle_device *dev)
 {return -ENODEV; }
@@ -204,18 +213,20 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
 
 #ifdef CONFIG_CPU_IDLE
 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
-                                     struct cpuidle_device *dev);
+                                     struct cpuidle_device *dev,
+                                     u64 latency_limit_ns);
 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
                                struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
 #else
 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
-                                            struct cpuidle_device *dev)
+                                            struct cpuidle_device *dev,
+                                            u64 latency_limit_ns)
 {return -ENODEV; }
 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
                                       struct cpuidle_device *dev)
 {return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
 {
 }
 #endif
@@ -260,7 +271,7 @@ struct cpuidle_governor {
 
 #ifdef CONFIG_CPU_IDLE
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern int cpuidle_governor_latency_req(unsigned int cpu);
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
 #else
 static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
 {return 0;}
index 6c80944..4cf02ec 100644 (file)
@@ -204,6 +204,12 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
        do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
 #define dynamic_dev_dbg(dev, fmt, ...)                                 \
        do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize,             \
+                        groupsize, buf, len, ascii)                    \
+       do { if (0)                                                     \
+               print_hex_dump(KERN_DEBUG, prefix_str, prefix_type,     \
+                               rowsize, groupsize, buf, len, ascii);   \
+       } while (0)
 #endif
 
 #endif
index bd38370..d87acf6 100644 (file)
@@ -1579,9 +1579,22 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
                                struct efi_boot_memmap *map);
 
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+                                unsigned long size, unsigned long align,
+                                unsigned long *addr, unsigned long min);
+
+static inline
 efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
-                          unsigned long *addr);
+                          unsigned long *addr)
+{
+       /*
+        * Don't allocate at 0x0. It will confuse code that
+        * checks pointers against NULL. Skip the first 8
+        * bytes so we start at a nice even number.
+        */
+       return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
+}
 
 efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
                            unsigned long size, unsigned long align,
@@ -1592,7 +1605,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
-                                unsigned long alignment);
+                                unsigned long alignment,
+                                unsigned long min_addr);
 
 efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                                  efi_loaded_image_t *image,
index 621158e..941d075 100644 (file)
@@ -18,8 +18,6 @@ extern struct module __this_module;
 #define THIS_MODULE ((struct module *)0)
 #endif
 
-#define NS_SEPARATOR "."
-
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
@@ -48,11 +46,11 @@ extern struct module __this_module;
  * absolute relocations that require runtime processing on relocatable
  * kernels.
  */
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
+#define __KSYMTAB_ENTRY_NS(sym, sec)                                   \
        __ADDRESSABLE(sym)                                              \
        asm("   .section \"___ksymtab" sec "+" #sym "\", \"a\"  \n"     \
            "   .balign 4                                       \n"     \
-           "__ksymtab_" #ns NS_SEPARATOR #sym ":               \n"     \
+           "__ksymtab_" #sym ":                                \n"     \
            "   .long   " #sym "- .                             \n"     \
            "   .long   __kstrtab_" #sym "- .                   \n"     \
            "   .long   __kstrtabns_" #sym "- .                 \n"     \
@@ -74,16 +72,14 @@ struct kernel_symbol {
        int namespace_offset;
 };
 #else
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
-       static const struct kernel_symbol __ksymtab_##sym##__##ns       \
-       asm("__ksymtab_" #ns NS_SEPARATOR #sym)                         \
+#define __KSYMTAB_ENTRY_NS(sym, sec)                                   \
+       static const struct kernel_symbol __ksymtab_##sym               \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
        = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
 
 #define __KSYMTAB_ENTRY(sym, sec)                                      \
        static const struct kernel_symbol __ksymtab_##sym               \
-       asm("__ksymtab_" #sym)                                          \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
        = { (unsigned long)&sym, __kstrtab_##sym, NULL }
@@ -115,7 +111,7 @@ struct kernel_symbol {
        static const char __kstrtabns_##sym[]                           \
        __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
        = #ns;                                                          \
-       __KSYMTAB_ENTRY_NS(sym, sec, ns)
+       __KSYMTAB_ENTRY_NS(sym, sec)
 
 #define ___EXPORT_SYMBOL(sym, sec)                                     \
        ___export_symbol_common(sym, sec);                              \
index 2ce5764..0367a75 100644 (file)
@@ -1099,7 +1099,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 
 #endif /* CONFIG_BPF_JIT */
 
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
 
 #define BPF_ANC                BIT(15)
index fb07b50..61f2f6f 100644 (file)
@@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
        return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
 }
 
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context.  When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation.  The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+       return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+               __GFP_DIRECT_RECLAIM;
+}
+
 #ifdef CONFIG_HIGHMEM
 #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
 #else
index 4ec8986..ac6e946 100644 (file)
@@ -185,7 +185,7 @@ static inline void idr_preload_end(void)
  * is convenient for a "not found" value.
  */
 #define idr_for_each_entry(idr, entry, id)                     \
-       for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
+       for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
 
 /**
  * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
index 2e55e4c..a367ead 100644 (file)
@@ -29,7 +29,6 @@ struct macvlan_dev {
        netdev_features_t       set_features;
        enum macvlan_mode       mode;
        u16                     flags;
-       int                     nest_level;
        unsigned int            macaddr_count;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll          *netpoll;
index 06faa06..ec7e4bd 100644 (file)
@@ -223,6 +223,7 @@ struct team {
                atomic_t count_pending;
                struct delayed_work dw;
        } mcast_rejoin;
+       struct lock_class_key team_lock_key;
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
index 244278d..b05e855 100644 (file)
@@ -182,7 +182,6 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
-       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
 
 extern bool vlan_uses_dev(const struct net_device *dev);
 
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
-       BUG_ON(!is_vlan_dev(dev));
-       return vlan_dev_priv(dev)->nest_level;
-}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
-       BUG();
-       return 0;
-}
 #endif
 
 /**
index ed11ef5..6d8bf4b 100644 (file)
@@ -336,7 +336,8 @@ enum {
 #define QI_DEV_IOTLB_SID(sid)  ((u64)((sid) & 0xffff) << 32)
 #define QI_DEV_IOTLB_QDEP(qdep)        (((qdep) & 0x1f) << 16)
 #define QI_DEV_IOTLB_ADDR(addr)        ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+                                  ((u64)((pfsid >> 4) & 0xfff) << 52))
 #define QI_DEV_IOTLB_SIZE      1
 #define QI_DEV_IOTLB_MAX_INVS  32
 
@@ -360,7 +361,8 @@ enum {
 #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+                                   ((u64)((pfsid >> 4) & 0xfff) << 52))
 #define QI_DEV_EIOTLB_MAX_INVS 32
 
 /* Page group response descriptor QW0 */
index 719fc3e..d41c521 100644 (file)
@@ -966,6 +966,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
 
 struct kvm_irq_ack_notifier {
        struct hlist_node link;
@@ -1382,4 +1383,10 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 }
 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
 
+typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+                               uintptr_t data, const char *name,
+                               struct task_struct **thread_ptr);
+
 #endif
index 0ebb105..4c75dae 100644 (file)
@@ -119,6 +119,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
 typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
 extern int walk_memory_blocks(unsigned long start, unsigned long size,
                              void *arg, walk_memory_blocks_func_t func);
+extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
 #define CONFIG_MEM_BLOCK_SIZE  (PAGES_PER_SECTION<<PAGE_SHIFT)
 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 
index 138c50d..0836fe2 100644 (file)
@@ -1545,9 +1545,8 @@ struct mlx5_ifc_extended_dest_format_bits {
 };
 
 union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
-       struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+       struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
        struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
-       u8         reserved_at_0[0x40];
 };
 
 struct mlx5_ifc_fte_match_param_bits {
index cc29227..a2adf95 100644 (file)
@@ -695,11 +695,6 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
 
 extern void kvfree(const void *addr);
 
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
-       return &page[1].compound_mapcount;
-}
-
 static inline int compound_mapcount(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageCompound(page), page);
index 2222fa7..270aa8f 100644 (file)
@@ -221,6 +221,11 @@ struct page {
 #endif
 } _struct_page_alignment;
 
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+       return &page[1].compound_mapcount;
+}
+
 /*
  * Used for sizing the vmemmap region on some architectures
  */
index 9eda1c3..c20f190 100644 (file)
@@ -925,6 +925,7 @@ struct dev_ifalias {
 struct devlink;
 struct tlsdev_ops;
 
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -1421,7 +1422,6 @@ struct net_device_ops {
        void                    (*ndo_dfwd_del_station)(struct net_device *pdev,
                                                        void *priv);
 
-       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
        int                     (*ndo_set_tx_maxrate)(struct net_device *dev,
                                                      int queue_index,
                                                      u32 maxrate);
@@ -1649,6 +1649,8 @@ enum netdev_priv_flags {
  *     @perm_addr:             Permanent hw address
  *     @addr_assign_type:      Hw address assignment type
  *     @addr_len:              Hardware address length
+ *     @upper_level:           Maximum depth level of upper devices.
+ *     @lower_level:           Maximum depth level of lower devices.
  *     @neigh_priv_len:        Used in neigh_alloc()
  *     @dev_id:                Used to differentiate devices that share
  *                             the same link layer address
@@ -1758,9 +1760,13 @@ enum netdev_priv_flags {
  *     @phydev:        Physical device may attach itself
  *                     for hardware timestamping
  *     @sfp_bus:       attached &struct sfp_bus structure.
- *
- *     @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- *     @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ *     @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
+                               spinlock
+ *     @qdisc_running_key:     lockdep class annotating Qdisc->running seqcount
+ *     @qdisc_xmit_lock_key:   lockdep class annotating
+ *                             netdev_queue->_xmit_lock spinlock
+ *     @addr_list_lock_key:    lockdep class annotating
+ *                             net_device->addr_list_lock spinlock
  *
  *     @proto_down:    protocol port state information can be sent to the
  *                     switch driver and used to set the phys state of the
@@ -1875,6 +1881,8 @@ struct net_device {
        unsigned char           perm_addr[MAX_ADDR_LEN];
        unsigned char           addr_assign_type;
        unsigned char           addr_len;
+       unsigned char           upper_level;
+       unsigned char           lower_level;
        unsigned short          neigh_priv_len;
        unsigned short          dev_id;
        unsigned short          dev_port;
@@ -2045,8 +2053,10 @@ struct net_device {
 #endif
        struct phy_device       *phydev;
        struct sfp_bus          *sfp_bus;
-       struct lock_class_key   *qdisc_tx_busylock;
-       struct lock_class_key   *qdisc_running_key;
+       struct lock_class_key   qdisc_tx_busylock_key;
+       struct lock_class_key   qdisc_running_key;
+       struct lock_class_key   qdisc_xmit_lock_key;
+       struct lock_class_key   addr_list_lock_key;
        bool                    proto_down;
        unsigned                wol_enabled:1;
 };
@@ -2124,23 +2134,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
-#define netdev_lockdep_set_classes(dev)                                \
-{                                                              \
-       static struct lock_class_key qdisc_tx_busylock_key;     \
-       static struct lock_class_key qdisc_running_key;         \
-       static struct lock_class_key qdisc_xmit_lock_key;       \
-       static struct lock_class_key dev_addr_list_lock_key;    \
-       unsigned int i;                                         \
-                                                               \
-       (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;      \
-       (dev)->qdisc_running_key = &qdisc_running_key;          \
-       lockdep_set_class(&(dev)->addr_list_lock,               \
-                         &dev_addr_list_lock_key);             \
-       for (i = 0; i < (dev)->num_tx_queues; i++)              \
-               lockdep_set_class(&(dev)->_tx[i]._xmit_lock,    \
-                                 &qdisc_xmit_lock_key);        \
-}
-
 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
                     struct net_device *sb_dev);
 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -3139,6 +3132,7 @@ static inline void netif_stop_queue(struct net_device *dev)
 }
 
 void netif_tx_stop_all_queues(struct net_device *dev);
+void netdev_update_lockdep_key(struct net_device *dev);
 
 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
@@ -4056,16 +4050,6 @@ static inline void netif_addr_lock(struct net_device *dev)
        spin_lock(&dev->addr_list_lock);
 }
 
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
-       int subclass = SINGLE_DEPTH_NESTING;
-
-       if (dev->netdev_ops->ndo_get_lock_subclass)
-               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
-
-       spin_lock_nested(&dev->addr_list_lock, subclass);
-}
-
 static inline void netif_addr_lock_bh(struct net_device *dev)
 {
        spin_lock_bh(&dev->addr_list_lock);
@@ -4329,6 +4313,16 @@ int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct netlink_ext_ack *extack);
 void netdev_upper_dev_unlink(struct net_device *dev,
                             struct net_device *upper_dev);
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack);
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev);
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev);
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
@@ -4340,7 +4334,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 void netdev_rss_key_fill(void *buffer, size_t len);
 
-int dev_get_nest_level(struct net_device *dev);
 int skb_checksum_help(struct sk_buff *skb);
 int skb_crc32c_csum_help(struct sk_buff *skb);
 int skb_csum_hwoffload_help(struct sk_buff *skb,
index f91cb88..1bf83c8 100644 (file)
@@ -622,12 +622,28 @@ static inline int PageTransCompound(struct page *page)
  *
  * Unlike PageTransCompound, this is safe to be called only while
  * split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * MMU notifier, otherwise it may result in page->_mapcount check false
  * positives.
+ *
+ * We have to treat page cache THP differently since every subpage of it
+ * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
+ * mapped in the current process so comparing subpage's _mapcount to
+ * compound_mapcount to filter out PTE mapped case.
  */
 static inline int PageTransCompoundMap(struct page *page)
 {
-       return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+       struct page *head;
+
+       if (!PageTransCompound(page))
+               return 0;
+
+       if (PageAnon(page))
+               return atomic_read(&page->_mapcount) < 0;
+
+       head = compound_head(page);
+       /* File THP is PMD mapped and not PTE mapped */
+       return atomic_read(&page->_mapcount) ==
+              atomic_read(compound_mapcount_ptr(head));
 }
 
 /*
index 61448c1..68ccc5b 100644 (file)
@@ -292,7 +292,7 @@ struct pmu {
         *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
         *  -EINVAL     -- @event is for this PMU but @event is not valid
         *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
-        *  -EACCESS    -- @event is for this PMU, @event is valid, but no privilidges
+        *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
         *
         *  0           -- @event is for this PMU and valid
         *
index 6eaa53c..30e676b 100644 (file)
@@ -51,7 +51,10 @@ struct sdma_script_start_addrs {
        /* End of v2 array */
        s32 zcanfd_2_mcu_addr;
        s32 zqspi_2_mcu_addr;
+       s32 mcu_2_ecspi_addr;
        /* End of v3 array */
+       s32 mcu_2_zqspi_addr;
+       /* End of v4 array */
 };
 
 /**
index 4c441be..e057d1f 100644 (file)
@@ -637,6 +637,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
  * struct dev_pm_domain - power management domain representation.
  *
  * @ops: Power management operations associated with this domain.
+ * @start: Called when a user needs to start the device via the domain.
  * @detach: Called when removing a device from the domain.
  * @activate: Called before executing probe routines for bus types and drivers.
  * @sync: Called after successful driver probe.
@@ -648,6 +649,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
  */
 struct dev_pm_domain {
        struct dev_pm_ops       ops;
+       int (*start)(struct device *dev);
        void (*detach)(struct device *dev, bool power_off);
        int (*activate)(struct device *dev);
        void (*sync)(struct device *dev);
index baf02ff..5a31c71 100644 (file)
@@ -366,6 +366,7 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
 struct device *dev_pm_domain_attach_by_name(struct device *dev,
                                            const char *name);
 void dev_pm_domain_detach(struct device *dev, bool power_off);
+int dev_pm_domain_start(struct device *dev);
 void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
 #else
 static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
@@ -383,6 +384,10 @@ static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
        return NULL;
 }
 static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline int dev_pm_domain_start(struct device *dev)
+{
+       return 0;
+}
 static inline void dev_pm_domain_set(struct device *dev,
                                     struct dev_pm_domain *pd) {}
 #endif
index b8197ab..7478618 100644 (file)
@@ -22,6 +22,7 @@ struct opp_table;
 
 enum dev_pm_opp_event {
        OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+       OPP_EVENT_ADJUST_VOLTAGE,
 };
 
 /**
@@ -113,6 +114,10 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq,
 void dev_pm_opp_remove(struct device *dev, unsigned long freq);
 void dev_pm_opp_remove_all_dynamic(struct device *dev);
 
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+                             unsigned long u_volt, unsigned long u_volt_min,
+                             unsigned long u_volt_max);
+
 int dev_pm_opp_enable(struct device *dev, unsigned long freq);
 
 int dev_pm_opp_disable(struct device *dev, unsigned long freq);
@@ -242,6 +247,14 @@ static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
 {
 }
 
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+                         unsigned long u_volt, unsigned long u_volt_min,
+                         unsigned long u_volt_max)
+{
+       return 0;
+}
+
 static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
 {
        return 0;
index 222c3e0..ebf5ef1 100644 (file)
@@ -34,8 +34,6 @@ enum pm_qos_flags_status {
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT    PM_QOS_LATENCY_ANY
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
-#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE     0
-#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE     (-1)
 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
@@ -54,8 +52,6 @@ struct pm_qos_flags_request {
 enum dev_pm_qos_req_type {
        DEV_PM_QOS_RESUME_LATENCY = 1,
        DEV_PM_QOS_LATENCY_TOLERANCE,
-       DEV_PM_QOS_MIN_FREQUENCY,
-       DEV_PM_QOS_MAX_FREQUENCY,
        DEV_PM_QOS_FLAGS,
 };
 
@@ -97,14 +93,10 @@ struct pm_qos_flags {
 struct dev_pm_qos {
        struct pm_qos_constraints resume_latency;
        struct pm_qos_constraints latency_tolerance;
-       struct pm_qos_constraints min_frequency;
-       struct pm_qos_constraints max_frequency;
        struct pm_qos_flags flags;
        struct dev_pm_qos_request *resume_latency_req;
        struct dev_pm_qos_request *latency_tolerance_req;
        struct dev_pm_qos_request *flags_req;
-       struct dev_pm_qos_request *min_frequency_req;
-       struct dev_pm_qos_request *max_frequency_req;
 };
 
 /* Action requested to pm_qos_update_target */
@@ -199,10 +191,6 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
        switch (type) {
        case DEV_PM_QOS_RESUME_LATENCY:
                return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
        default:
                WARN_ON(1);
                return 0;
@@ -267,4 +255,48 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
 }
 #endif
 
+#define FREQ_QOS_MIN_DEFAULT_VALUE     0
+#define FREQ_QOS_MAX_DEFAULT_VALUE     (-1)
+
+enum freq_qos_req_type {
+       FREQ_QOS_MIN = 1,
+       FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+       struct pm_qos_constraints min_freq;
+       struct blocking_notifier_head min_freq_notifiers;
+       struct pm_qos_constraints max_freq;
+       struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+       enum freq_qos_req_type type;
+       struct plist_node pnode;
+       struct freq_constraints *qos;
+};
+
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+       return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+                       enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+                        struct freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier);
+
 #endif
index b511601..63e6237 100644 (file)
@@ -316,24 +316,6 @@ radix_tree_iter_lookup(const struct radix_tree_root *root,
 }
 
 /**
- * radix_tree_iter_find - find a present entry
- * @root: radix tree root
- * @iter: iterator state
- * @index: start location
- *
- * This function returns the slot containing the entry with the lowest index
- * which is at least @index.  If @index is larger than any present entry, this
- * function returns NULL.  The @iter is updated to describe the entry found.
- */
-static inline void __rcu **
-radix_tree_iter_find(const struct radix_tree_root *root,
-                       struct radix_tree_iter *iter, unsigned long index)
-{
-       radix_tree_iter_init(iter, index);
-       return radix_tree_next_chunk(root, iter, 0);
-}
-
-/**
  * radix_tree_iter_retry - retry this chunk of the iteration
  * @iter:      iterator state
  *
index 9326d67..eaae6b4 100644 (file)
@@ -7,7 +7,7 @@
 struct reset_controller_dev;
 
 /**
- * struct reset_control_ops
+ * struct reset_control_ops - reset controller driver callbacks
  *
  * @reset: for self-deasserting resets, does all necessary
  *         things to reset the device
@@ -33,7 +33,7 @@ struct of_phandle_args;
  * @provider: name of the reset controller device controlling this reset line
  * @index: ID of the reset controller in the reset controller device
  * @dev_id: name of the device associated with this reset line
- * @con_id name of the reset line (can be NULL)
+ * @con_id: name of the reset line (can be NULL)
  */
 struct reset_control_lookup {
        struct list_head list;
index e7793fc..eb597e8 100644 (file)
@@ -143,7 +143,7 @@ static inline int device_reset_optional(struct device *dev)
  * If this function is called more than once for the same reset_control it will
  * return -EBUSY.
  *
- * See reset_control_get_shared for details on shared references to
+ * See reset_control_get_shared() for details on shared references to
  * reset-controls.
  *
  * Use of id names is optional.
index a8d59d6..9df7547 100644 (file)
@@ -105,6 +105,7 @@ enum lockdown_reason {
        LOCKDOWN_NONE,
        LOCKDOWN_MODULE_SIGNATURE,
        LOCKDOWN_DEV_MEM,
+       LOCKDOWN_EFI_TEST,
        LOCKDOWN_KEXEC,
        LOCKDOWN_HIBERNATION,
        LOCKDOWN_PCI_ACCESS,
index 7914fda..64a395c 100644 (file)
@@ -1354,7 +1354,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
        return skb->hash;
 }
 
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+                          const siphash_key_t *perturb);
 
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 {
@@ -1495,6 +1496,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
 }
 
 /**
+ *     skb_queue_empty_lockless - check if a queue is empty
+ *     @list: queue head
+ *
+ *     Returns true if the queue is empty, false otherwise.
+ *     This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+       return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
+/**
  *     skb_queue_is_last - check if skb is the last entry in the queue
  *     @list: queue head
  *     @skb: buffer
@@ -1847,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
 {
-       newsk->next = next;
-       newsk->prev = prev;
-       next->prev  = prev->next = newsk;
+       /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+       WRITE_ONCE(newsk->next, next);
+       WRITE_ONCE(newsk->prev, prev);
+       WRITE_ONCE(next->prev, newsk);
+       WRITE_ONCE(prev->next, newsk);
        list->qlen++;
 }
 
@@ -1860,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
        struct sk_buff *first = list->next;
        struct sk_buff *last = list->prev;
 
-       first->prev = prev;
-       prev->next = first;
+       WRITE_ONCE(first->prev, prev);
+       WRITE_ONCE(prev->next, first);
 
-       last->next = next;
-       next->prev = last;
+       WRITE_ONCE(last->next, next);
+       WRITE_ONCE(next->prev, last);
 }
 
 /**
@@ -2005,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        next       = skb->next;
        prev       = skb->prev;
        skb->next  = skb->prev = NULL;
-       next->prev = prev;
-       prev->next = next;
+       WRITE_ONCE(next->prev, prev);
+       WRITE_ONCE(prev->next, next);
 }
 
 /**
index e4b3fb4..ce70552 100644 (file)
@@ -139,6 +139,11 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
        }
 }
 
+static inline u32 sk_msg_iter_dist(u32 start, u32 end)
+{
+       return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
+}
+
 #define sk_msg_iter_var_prev(var)                      \
        do {                                            \
                if (var == 0)                           \
@@ -198,9 +203,7 @@ static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
        if (sk_msg_full(msg))
                return MAX_MSG_FRAGS;
 
-       return msg->sg.end >= msg->sg.start ?
-               msg->sg.end - msg->sg.start :
-               msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
+       return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
 }
 
 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
index fc0bed5..4049d97 100644 (file)
@@ -263,7 +263,7 @@ struct ucred {
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
-#define SOMAXCONN      128
+#define SOMAXCONN      4096
 
 /* Flags we can use with send/ and recv.
    Added those for 1003.1g not all are supported yet
index 87d27e1..d796058 100644 (file)
@@ -64,6 +64,11 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
        return 0;
 }
 
+static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt,
+                                           unsigned int max_reqs)
+{
+}
+
 static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
 {
        return false;
index 5420817..fa7ee50 100644 (file)
@@ -196,9 +196,9 @@ struct bin_attribute {
        .size   = _size,                                                \
 }
 
-#define __BIN_ATTR_WO(_name) {                                         \
+#define __BIN_ATTR_WO(_name, _size) {                                  \
        .attr   = { .name = __stringify(_name), .mode = 0200 },         \
-       .store  = _name##_store,                                        \
+       .write  = _name##_write,                                        \
        .size   = _size,                                                \
 }
 
index 4c7781f..07875cc 100644 (file)
@@ -48,7 +48,6 @@ struct virtio_vsock_sock {
 
 struct virtio_vsock_pkt {
        struct virtio_vsock_hdr hdr;
-       struct work_struct work;
        struct list_head list;
        /* socket refcnt not held, only use for cancellation */
        struct vsock_sock *vsk;
index f7fe456..3d56b02 100644 (file)
@@ -159,7 +159,6 @@ struct slave {
        unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
        s8     link;            /* one of BOND_LINK_XXXX */
        s8     link_new_state;  /* one of BOND_LINK_XXXX */
-       s8     new_link;
        u8     backup:1,   /* indicates backup slave. Value corresponds with
                              BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
               inactive:1, /* indicates inactive slave */
@@ -203,7 +202,6 @@ struct bonding {
        struct   slave __rcu *primary_slave;
        struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
        bool     force_primary;
-       u32      nest_level;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
        int     (*recv_probe)(const struct sk_buff *, struct bonding *,
                              struct slave *);
@@ -239,6 +237,7 @@ struct bonding {
        struct   dentry *debug_dir;
 #endif /* CONFIG_DEBUG_FS */
        struct rtnl_link_stats64 bond_stats;
+       struct lock_class_key stats_lock_key;
 };
 
 #define bond_slave_get_rcu(dev) \
@@ -549,7 +548,7 @@ static inline void bond_propose_link_state(struct slave *slave, int state)
 
 static inline void bond_commit_link_state(struct slave *slave, bool notify)
 {
-       if (slave->link == slave->link_new_state)
+       if (slave->link_new_state == BOND_LINK_NOCHANGE)
                return;
 
        slave->link = slave->link_new_state;
index 127a5c4..86e0283 100644 (file)
@@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       sk->sk_napi_id = skb->napi_id;
+       WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
        sk_rx_queue_set(sk, skb);
 }
@@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
                                        const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       if (!sk->sk_napi_id)
-               sk->sk_napi_id = skb->napi_id;
+       if (!READ_ONCE(sk->sk_napi_id))
+               WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
 }
 
index 23e4b65..2116c88 100644 (file)
@@ -38,7 +38,8 @@ struct devlink {
        struct device *dev;
        possible_net_t _net;
        struct mutex lock;
-       bool reload_failed;
+       u8 reload_failed:1,
+          reload_enabled:1;
        char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -774,6 +775,8 @@ struct ib_device;
 struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
 int devlink_register(struct devlink *devlink, struct device *dev);
 void devlink_unregister(struct devlink *devlink);
+void devlink_reload_enable(struct devlink *devlink);
+void devlink_reload_disable(struct devlink *devlink);
 void devlink_free(struct devlink *devlink);
 int devlink_port_register(struct devlink *devlink,
                          struct devlink_port *devlink_port,
index 90bd210..5cd1227 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/types.h>
 #include <linux/in6.h>
+#include <linux/siphash.h>
 #include <uapi/linux/if_ether.h>
 
 /**
@@ -276,7 +277,7 @@ struct flow_keys_basic {
 struct flow_keys {
        struct flow_dissector_key_control control;
 #define FLOW_KEYS_HASH_START_FIELD basic
-       struct flow_dissector_key_basic basic;
+       struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
        struct flow_dissector_key_tags tags;
        struct flow_dissector_key_vlan vlan;
        struct flow_dissector_key_vlan cvlan;
index d126b5d..2ad85e6 100644 (file)
@@ -69,7 +69,7 @@ struct fq {
        struct list_head backlogs;
        spinlock_t lock;
        u32 flows_cnt;
-       u32 perturbation;
+       siphash_key_t   perturbation;
        u32 limit;
        u32 memory_limit;
        u32 memory_usage;
index be40a4b..38a9a3d 100644 (file)
@@ -108,7 +108,7 @@ begin:
 
 static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
 {
-       u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
+       u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
 
        return reciprocal_scale(hash, fq->flows_cnt);
 }
@@ -308,12 +308,12 @@ static int fq_init(struct fq *fq, int flows_cnt)
        INIT_LIST_HEAD(&fq->backlogs);
        spin_lock_init(&fq->lock);
        fq->flows_cnt = max_t(u32, flows_cnt, 1);
-       fq->perturbation = prandom_u32();
+       get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
        fq->quantum = 300;
        fq->limit = 8192;
        fq->memory_limit = 16 << 20; /* 16 MBytes */
 
-       fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+       fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
        if (!fq->flows)
                return -ENOMEM;
 
@@ -331,7 +331,7 @@ static void fq_reset(struct fq *fq,
        for (i = 0; i < fq->flows_cnt; i++)
                fq_flow_reset(fq, &fq->flows[i], free_func);
 
-       kfree(fq->flows);
+       kvfree(fq->flows);
        fq->flows = NULL;
 }
 
index 81643cf..c814446 100644 (file)
@@ -21,9 +21,13 @@ void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
 int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
 #else
-void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
-int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
-int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
+static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+
+static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
+{ return 0; }
+
+static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
+                               unsigned int buf_num)
 { return 0; }
 #endif /* CONFIG_HWBM */
 #endif /* _HWBM_H */
index 95bb77f..a2c61c3 100644 (file)
@@ -185,7 +185,7 @@ static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
 }
 
 struct ip_frag_state {
-       struct iphdr    *iph;
+       bool            DF;
        unsigned int    hlen;
        unsigned int    ll_rs;
        unsigned int    mtu;
@@ -196,7 +196,7 @@ struct ip_frag_state {
 };
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
-                 unsigned int mtu, struct ip_frag_state *state);
+                 unsigned int mtu, bool DF, struct ip_frag_state *state);
 struct sk_buff *ip_frag_next(struct sk_buff *skb,
                             struct ip_frag_state *state);
 
index 3759167..078887c 100644 (file)
@@ -889,6 +889,7 @@ struct netns_ipvs {
        struct delayed_work     defense_work;   /* Work handler */
        int                     drop_rate;
        int                     drop_counter;
+       int                     old_secure_tcp;
        atomic_t                dropentry;
        /* locks in ctl.c */
        spinlock_t              dropentry_lock;  /* drop entry handling */
index 50a67bd..b8452cc 100644 (file)
@@ -439,8 +439,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 {
        unsigned long now = jiffies;
        
-       if (neigh->used != now)
-               neigh->used = now;
+       if (READ_ONCE(neigh->used) != now)
+               WRITE_ONCE(neigh->used, now);
        if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
                return __neigh_event_send(neigh, skb);
        return 0;
index 4c2cd93..c7e15a2 100644 (file)
@@ -342,7 +342,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
 #define __net_initconst        __initconst
 #endif
 
-int peernet2id_alloc(struct net *net, struct net *peer);
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
 int peernet2id(struct net *net, struct net *peer);
 bool peernet_has_id(struct net *net, struct net *peer);
 struct net *get_net_ns_by_id(struct net *net, int id);
index 001d294..2d0275f 100644 (file)
@@ -820,7 +820,8 @@ struct nft_expr_ops {
  */
 struct nft_expr {
        const struct nft_expr_ops       *ops;
-       unsigned char                   data[];
+       unsigned char                   data[]
+               __attribute__((aligned(__alignof__(u64))));
 };
 
 static inline void *nft_expr_priv(const struct nft_expr *expr)
index 637548d..d80acda 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mutex.h>
 #include <linux/rwsem.h>
 #include <linux/atomic.h>
+#include <linux/hashtable.h>
 #include <net/gen_stats.h>
 #include <net/rtnetlink.h>
 #include <net/flow_offload.h>
@@ -362,6 +363,7 @@ struct tcf_proto {
        bool                    deleting;
        refcount_t              refcnt;
        struct rcu_head         rcu;
+       struct hlist_node       destroy_ht_node;
 };
 
 struct qdisc_skb_cb {
@@ -414,6 +416,8 @@ struct tcf_block {
                struct list_head filter_chain_list;
        } chain0;
        struct rcu_head rcu;
+       DECLARE_HASHTABLE(proto_destroy_ht, 7);
+       struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
 };
 
 #ifdef CONFIG_PROVE_LOCKING
index f69b58b..718e62f 100644 (file)
@@ -954,8 +954,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
 {
        int cpu = raw_smp_processor_id();
 
-       if (unlikely(sk->sk_incoming_cpu != cpu))
-               sk->sk_incoming_cpu = cpu;
+       if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+               WRITE_ONCE(sk->sk_incoming_cpu, cpu);
 }
 
 static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -2242,12 +2242,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
  * sk_page_frag - return an appropriate page_frag
  * @sk: socket
  *
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
  */
 static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       if (gfpflags_allow_blocking(sk->sk_allocation))
+       if (gfpflags_normal_context(sk->sk_allocation))
                return &current->task_frag;
 
        return &sk->sk_frag;
@@ -2337,7 +2342,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
 
        return kt;
 #else
-       return sk->sk_stamp;
+       return READ_ONCE(sk->sk_stamp);
 #endif
 }
 
@@ -2348,7 +2353,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
        sk->sk_stamp = kt;
        write_sequnlock(&sk->sk_stamp_seq);
 #else
-       sk->sk_stamp = kt;
+       WRITE_ONCE(sk->sk_stamp, kt);
 #endif
 }
 
index c664e6d..794e297 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/socket.h>
 #include <linux/tcp.h>
 #include <linux/skmsg.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/rcupdate.h>
 
@@ -269,6 +270,10 @@ struct tls_context {
 
        bool in_tcp_sendpages;
        bool pending_open_record_frags;
+
+       struct mutex tx_lock; /* protects partially_sent_* fields and
+                              * per-type TX fields
+                              */
        unsigned long flags;
 
        /* cache cold stuff */
index 335283d..373aadc 100644 (file)
@@ -197,6 +197,7 @@ struct vxlan_rdst {
        u8                       offloaded:1;
        __be32                   remote_vni;
        u32                      remote_ifindex;
+       struct net_device        *remote_dev;
        struct list_head         list;
        struct rcu_head          rcu;
        struct dst_cache         dst_cache;
index 6a47ba8..e7e733a 100644 (file)
@@ -366,7 +366,7 @@ struct ib_tm_caps {
 
 struct ib_cq_init_attr {
        unsigned int    cqe;
-       int             comp_vector;
+       u32             comp_vector;
        u32             flags;
 };
 
index 985a5f5..31f76b6 100644 (file)
@@ -135,9 +135,9 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
                               struct link_info *li);
 
 #ifdef DEBUG
-inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
-                                 char *name,
-                                 struct asoc_simple_dai *dai)
+static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+                                        char *name,
+                                        struct asoc_simple_dai *dai)
 {
        struct device *dev = simple_priv_to_dev(priv);
 
@@ -167,7 +167,7 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
                dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
 }
 
-inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
 {
        struct snd_soc_card *card = simple_priv_to_card(priv);
        struct device *dev = simple_priv_to_dev(priv);
index 5df604d..75ae189 100644 (file)
@@ -1688,6 +1688,7 @@ TRACE_EVENT(qgroup_update_reserve,
                __entry->qgid           = qgroup->qgroupid;
                __entry->cur_reserved   = qgroup->rsv.values[type];
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1710,6 +1711,7 @@ TRACE_EVENT(qgroup_meta_reserve,
        TP_fast_assign_btrfs(root->fs_info,
                __entry->refroot        = root->root_key.objectid;
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1726,7 +1728,6 @@ TRACE_EVENT(qgroup_meta_convert,
        TP_STRUCT__entry_btrfs(
                __field(        u64,    refroot                 )
                __field(        s64,    diff                    )
-               __field(        int,    type                    )
        ),
 
        TP_fast_assign_btrfs(root->fs_info,
index 2bc9960..cf97f63 100644 (file)
@@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
-       TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
+       TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
                  __entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
                  __entry->saddr_v6, __entry->daddr_v6,
                  show_tcp_state_name(__entry->state))
index 1e988fd..6a6d2c7 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * linux/can.h
  *
index 0fb328d..dd2b925 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * linux/can/bcm.h
  *
index bfc4b5d..3463328 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * linux/can/error.h
  *
index 3aea538..c2190bb 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * linux/can/gw.h
  *
index c323253..df6e821 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
  * j1939.h
  *
index 1bc70d3..6f598b7 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
  * linux/can/netlink.h
  *
index be3b36e..6a11d30 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * linux/can/raw.h
  *
index 066812d..4fa9d87 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 #ifndef _UAPI_CAN_VXCAN_H
 #define _UAPI_CAN_VXCAN_H
 
index 580b7a2..a8a2174 100644 (file)
@@ -421,6 +421,7 @@ enum devlink_attr {
 
        DEVLINK_ATTR_RELOAD_FAILED,                     /* u8 0 or 1 */
 
+       DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,        /* u64 */
        /* add new attributes above here, update the policy in devlink.c */
 
        __DEVLINK_ATTR_MAX,
index 802b037..373cada 100644 (file)
  *
  * Protocol changelog:
  *
+ * 7.1:
+ *  - add the following messages:
+ *      FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
+ *      FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
+ *      FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
+ *      FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
+ *      FUSE_RELEASEDIR
+ *  - add padding to messages to accommodate 32-bit servers on 64-bit kernels
+ *
+ * 7.2:
+ *  - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
+ *  - add FUSE_FSYNCDIR message
+ *
+ * 7.3:
+ *  - add FUSE_ACCESS message
+ *  - add FUSE_CREATE message
+ *  - add filehandle to fuse_setattr_in
+ *
+ * 7.4:
+ *  - add frsize to fuse_kstatfs
+ *  - clean up request size limit checking
+ *
+ * 7.5:
+ *  - add flags and max_write to fuse_init_out
+ *
+ * 7.6:
+ *  - add max_readahead to fuse_init_in and fuse_init_out
+ *
+ * 7.7:
+ *  - add FUSE_INTERRUPT message
+ *  - add POSIX file lock support
+ *
+ * 7.8:
+ *  - add lock_owner and flags fields to fuse_release_in
+ *  - add FUSE_BMAP message
+ *  - add FUSE_DESTROY message
+ *
  * 7.9:
  *  - new fuse_getattr_in input argument of GETATTR
  *  - add lk_flags in fuse_lk_in
index e168dc5..d99b5a7 100644 (file)
@@ -63,6 +63,7 @@ struct nvme_passthru_cmd64 {
        __u32   cdw14;
        __u32   cdw15;
        __u32   timeout_ms;
+       __u32   rsvd2;
        __u64   result;
 };
 
index 59e89a1..9dc9d00 100644 (file)
 #define PTP_ENABLE_FEATURE (1<<0)
 #define PTP_RISING_EDGE    (1<<1)
 #define PTP_FALLING_EDGE   (1<<2)
+#define PTP_STRICT_FLAGS   (1<<3)
+#define PTP_EXTTS_EDGES    (PTP_RISING_EDGE | PTP_FALLING_EDGE)
 
 /*
  * flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl.
  */
 #define PTP_EXTTS_VALID_FLAGS  (PTP_ENABLE_FEATURE |   \
                                 PTP_RISING_EDGE |      \
-                                PTP_FALLING_EDGE)
+                                PTP_FALLING_EDGE |     \
+                                PTP_STRICT_FLAGS)
 
 /*
  * flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
index 99335e1..25b4fa0 100644 (file)
  *               sent when the child exits.
  * @stack:       Specify the location of the stack for the
  *               child process.
+ *               Note, @stack is expected to point to the
+ *               lowest address. The stack direction will be
+ *               determined by the kernel and set up
+ *               appropriately based on @stack_size.
  * @stack_size:  The size of the stack for the child process.
  * @tls:         If CLONE_SETTLS is set, the tls descriptor
  *               is set to tls.
index 1f31c2f..4508d5e 100644 (file)
@@ -351,12 +351,12 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
        struct dentry *d = kern_path_locked(watch->path, parent);
        if (IS_ERR(d))
                return PTR_ERR(d);
-       inode_unlock(d_backing_inode(parent->dentry));
        if (d_is_positive(d)) {
                /* update watch filter fields */
                watch->dev = d->d_sb->s_dev;
                watch->ino = d_backing_inode(d)->i_ino;
        }
+       inode_unlock(d_backing_inode(parent->dentry));
        dput(d);
        return 0;
 }
index ddd8add..a3eaf08 100644 (file)
@@ -1311,12 +1311,12 @@ static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
                return false;
 
        switch (off) {
-       case offsetof(struct bpf_sysctl, write):
+       case bpf_ctx_range(struct bpf_sysctl, write):
                if (type != BPF_READ)
                        return false;
                bpf_ctx_record_field_size(info, size_default);
                return bpf_ctx_narrow_access_ok(off, size, size_default);
-       case offsetof(struct bpf_sysctl, file_pos):
+       case bpf_ctx_range(struct bpf_sysctl, file_pos):
                if (type == BPF_READ) {
                        bpf_ctx_record_field_size(info, size_default);
                        return bpf_ctx_narrow_access_ok(off, size, size_default);
index 66088a9..ef0e1e3 100644 (file)
@@ -502,7 +502,7 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
        return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 }
 
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 {
        int i;
 
index d27f3b6..3867864 100644 (file)
@@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 
                if (!dtab->n_buckets) /* Overflow check */
                        return -EINVAL;
-               cost += sizeof(struct hlist_head) * dtab->n_buckets;
+               cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
        }
 
        /* if map size is larger than memlock limit, reject it */
@@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_ops = {
        .map_check_btf = map_check_no_btf,
 };
 
+static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
+                                      struct net_device *netdev)
+{
+       unsigned long flags;
+       u32 i;
+
+       spin_lock_irqsave(&dtab->index_lock, flags);
+       for (i = 0; i < dtab->n_buckets; i++) {
+               struct bpf_dtab_netdev *dev;
+               struct hlist_head *head;
+               struct hlist_node *next;
+
+               head = dev_map_index_hash(dtab, i);
+
+               hlist_for_each_entry_safe(dev, next, head, index_hlist) {
+                       if (netdev != dev->dev)
+                               continue;
+
+                       dtab->items--;
+                       hlist_del_rcu(&dev->index_hlist);
+                       call_rcu(&dev->rcu, __dev_map_entry_free);
+               }
+       }
+       spin_unlock_irqrestore(&dtab->index_lock, flags);
+}
+
 static int dev_map_notification(struct notifier_block *notifier,
                                ulong event, void *ptr)
 {
@@ -735,6 +761,11 @@ static int dev_map_notification(struct notifier_block *notifier,
                 */
                rcu_read_lock();
                list_for_each_entry_rcu(dtab, &dev_map_list, list) {
+                       if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+                               dev_map_hash_remove_netdev(dtab, netdev);
+                               continue;
+                       }
+
                        for (i = 0; i < dtab->map.max_entries; i++) {
                                struct bpf_dtab_netdev *dev, *odev;
 
index 82eabd4..ace1cfa 100644 (file)
@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
        return map;
 }
 
-void *bpf_map_area_alloc(size_t size, int numa_node)
+void *bpf_map_area_alloc(u64 size, int numa_node)
 {
        /* We really just want to fail instead of triggering OOM killer
         * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node)
        const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
        void *area;
 
+       if (size >= SIZE_MAX)
+               return NULL;
+
        if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
                area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
                                    numa_node);
@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
                atomic_long_sub(pages, &user->locked_vm);
 }
 
-int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
+int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
 {
        u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
        struct user_struct *user;
@@ -1326,24 +1329,32 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 {
        struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
 
+       kvfree(aux->func_info);
        free_used_maps(aux);
        bpf_prog_uncharge_memlock(aux->prog);
        security_bpf_prog_free(aux);
        bpf_prog_free(aux->prog);
 }
 
+static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
+{
+       bpf_prog_kallsyms_del_all(prog);
+       btf_put(prog->aux->btf);
+       bpf_prog_free_linfo(prog);
+
+       if (deferred)
+               call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+       else
+               __bpf_prog_put_rcu(&prog->aux->rcu);
+}
+
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
                perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-               bpf_prog_kallsyms_del_all(prog);
-               btf_put(prog->aux->btf);
-               kvfree(prog->aux->func_info);
-               bpf_prog_free_linfo(prog);
-
-               call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+               __bpf_prog_put_noref(prog, true);
        }
 }
 
@@ -1741,11 +1752,12 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
        return err;
 
 free_used_maps:
-       bpf_prog_free_linfo(prog);
-       kvfree(prog->aux->func_info);
-       btf_put(prog->aux->btf);
-       bpf_prog_kallsyms_del_subprogs(prog);
-       free_used_maps(prog->aux);
+       /* In case we have subprogs, we need to wait for a grace
+        * period before we can tear down JIT memory since symbols
+        * are already exposed under kallsyms.
+        */
+       __bpf_prog_put_noref(prog, prog->aux->func_cnt);
+       return err;
 free_prog:
        bpf_prog_uncharge_memlock(prog);
 free_prog_sec:
index 080561b..ef4242e 100644 (file)
@@ -2119,11 +2119,12 @@ int cgroup_do_get_tree(struct fs_context *fc)
 
                nsdentry = kernfs_node_dentry(cgrp->kn, sb);
                dput(fc->root);
-               fc->root = nsdentry;
                if (IS_ERR(nsdentry)) {
-                       ret = PTR_ERR(nsdentry);
                        deactivate_locked_super(sb);
+                       ret = PTR_ERR(nsdentry);
+                       nsdentry = NULL;
                }
+               fc->root = nsdentry;
        }
 
        if (!ctx->kfc.new_sb_created)
index c52bc91..c87ee64 100644 (file)
@@ -798,7 +798,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
                    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
                        continue;
 
-               if (is_sched_load_balance(cp))
+               if (is_sched_load_balance(cp) &&
+                   !cpumask_empty(cp->effective_cpus))
                        csa[csn++] = cp;
 
                /* skip @cp's subtree if not a partition root */
index fc28e17..e2cad3e 100644 (file)
@@ -2373,7 +2373,18 @@ void __init boot_cpu_hotplug_init(void)
        this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
 }
 
-enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+       CPU_MITIGATIONS_OFF,
+       CPU_MITIGATIONS_AUTO,
+       CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+static enum cpu_mitigations cpu_mitigations __ro_after_init =
+       CPU_MITIGATIONS_AUTO;
 
 static int __init mitigations_parse_cmdline(char *arg)
 {
@@ -2390,3 +2401,17 @@ static int __init mitigations_parse_cmdline(char *arg)
        return 0;
 }
 early_param("mitigations", mitigations_parse_cmdline);
+
+/* mitigations=off */
+bool cpu_mitigations_off(void)
+{
+       return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_off);
+
+/* mitigations=auto,nosmt */
+bool cpu_mitigations_auto_nosmt(void)
+{
+       return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
index 9ec0b0b..00a0146 100644 (file)
@@ -1031,7 +1031,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
 {
 }
 
-void
+static inline void
 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
 {
 }
@@ -5607,8 +5607,10 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                perf_pmu_output_stop(event);
 
                /* now it's safe to free the pages */
-               atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
-               atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+               if (!rb->aux_mmap_locked)
+                       atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+               else
+                       atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
 
                /* this has to be the last one */
                rb_free_aux(rb);
@@ -6947,7 +6949,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
-       struct pmu *pmu = event->pmu;
+       struct pmu *pmu = event->ctx->pmu;
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
@@ -10533,6 +10535,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                goto err_ns;
        }
 
+       /*
+        * Disallow uncore-cgroup events, they don't make sense as the cgroup will
+        * be different on other CPUs in the uncore mask.
+        */
+       if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
+               err = -EINVAL;
+               goto err_pmu;
+       }
+
        if (event->attr.aux_output &&
            !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
                err = -EOPNOTSUPP;
@@ -10633,7 +10644,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
 
        attr->size = size;
 
-       if (attr->__reserved_1)
+       if (attr->__reserved_1 || attr->__reserved_2)
                return -EINVAL;
 
        if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -11321,8 +11332,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        int err;
 
        /*
-        * Get the target context (task or percpu):
+        * Grouping is not supported for kernel events, neither is 'AUX',
+        * make sure the caller's intentions are adjusted.
         */
+       if (attr->aux_output)
+               return ERR_PTR(-EINVAL);
 
        event = perf_event_alloc(attr, cpu, task, NULL, NULL,
                                 overflow_handler, context, -1);
@@ -11334,6 +11348,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        /* Mark owner so we could distinguish it from user events. */
        event->owner = TASK_TOMBSTONE;
 
+       /*
+        * Get the target context (task or percpu):
+        */
        ctx = find_get_context(event->pmu, task, event);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
@@ -11785,7 +11802,7 @@ inherit_event(struct perf_event *parent_event,
                                                   GFP_KERNEL);
                if (!child_ctx->task_ctx_data) {
                        free_event(child_event);
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                }
        }
 
@@ -11888,7 +11905,7 @@ static int inherit_group(struct perf_event *parent_event,
                if (IS_ERR(child_ctr))
                        return PTR_ERR(child_ctr);
 
-               if (sub->aux_event == parent_event &&
+               if (sub->aux_event == parent_event && child_ctr &&
                    !perf_get_aux_event(child_ctr, leader))
                        return -EINVAL;
        }
index bcdf531..55af693 100644 (file)
@@ -2561,7 +2561,35 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
        return 0;
 }
 
-static bool clone3_args_valid(const struct kernel_clone_args *kargs)
+/**
+ * clone3_stack_valid - check and prepare stack
+ * @kargs: kernel clone args
+ *
+ * Verify that the stack arguments userspace gave us are sane.
+ * In addition, set the stack direction for userspace since it's easy for us to
+ * determine.
+ */
+static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
+{
+       if (kargs->stack == 0) {
+               if (kargs->stack_size > 0)
+                       return false;
+       } else {
+               if (kargs->stack_size == 0)
+                       return false;
+
+               if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
+                       return false;
+
+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
+               kargs->stack += kargs->stack_size;
+#endif
+       }
+
+       return true;
+}
+
+static bool clone3_args_valid(struct kernel_clone_args *kargs)
 {
        /*
         * All lower bits of the flag word are taken.
@@ -2581,6 +2609,9 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
            kargs->exit_signal)
                return false;
 
+       if (!clone3_stack_valid(kargs))
+               return false;
+
        return true;
 }
 
index 132672b..dd822fd 100644 (file)
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  * @type:      Type of irqchip_fwnode. See linux/irqdomain.h
  * @name:      Optional user provided domain name
  * @id:                Optional user provided id if name != NULL
- * @data:      Optional user-provided data
+ * @pa:                Optional user-provided physical address
  *
  * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  * fwnode_handle (or NULL on failure).
index 9568a2f..a45cba7 100644 (file)
@@ -650,3 +650,249 @@ static int __init pm_qos_power_init(void)
 }
 
 late_initcall(pm_qos_power_init);
+
+/* Definitions related to the frequency QoS below. */
+
+/**
+ * freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void freq_constraints_init(struct freq_constraints *qos)
+{
+       struct pm_qos_constraints *c;
+
+       c = &qos->min_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->type = PM_QOS_MAX;
+       c->notifiers = &qos->min_freq_notifiers;
+       BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+
+       c = &qos->max_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->type = PM_QOS_MIN;
+       c->notifiers = &qos->max_freq_notifiers;
+       BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+}
+
+/**
+ * freq_qos_read_value - Get frequency QoS constraint for a given list.
+ * @qos: Constraints to evaluate.
+ * @type: QoS request type.
+ */
+s32 freq_qos_read_value(struct freq_constraints *qos,
+                       enum freq_qos_req_type type)
+{
+       s32 ret;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = IS_ERR_OR_NULL(qos) ?
+                       FREQ_QOS_MIN_DEFAULT_VALUE :
+                       pm_qos_read_value(&qos->min_freq);
+               break;
+       case FREQ_QOS_MAX:
+               ret = IS_ERR_OR_NULL(qos) ?
+                       FREQ_QOS_MAX_DEFAULT_VALUE :
+                       pm_qos_read_value(&qos->max_freq);
+               break;
+       default:
+               WARN_ON(1);
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * freq_qos_apply - Add/modify/remove frequency QoS request.
+ * @req: Constraint request to apply.
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ */
+static int freq_qos_apply(struct freq_qos_request *req,
+                         enum pm_qos_req_action action, s32 value)
+{
+       int ret;
+
+       switch(req->type) {
+       case FREQ_QOS_MIN:
+               ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
+                                          action, value);
+               break;
+       case FREQ_QOS_MAX:
+               ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
+                                          action, value);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+/**
+ * freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object.  The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_add_request(struct freq_constraints *qos,
+                        struct freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !req)
+               return -EINVAL;
+
+       if (WARN(freq_qos_request_active(req),
+                "%s() called for active request\n", __func__))
+               return -EINVAL;
+
+       req->qos = qos;
+       req->type = type;
+       ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
+       if (ret < 0) {
+               req->qos = NULL;
+               req->type = 0;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_request);
+
+/**
+ * freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
+{
+       if (!req)
+               return -EINVAL;
+
+       if (WARN(!freq_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       if (req->pnode.prio == new_value)
+               return 0;
+
+       return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
+}
+EXPORT_SYMBOL_GPL(freq_qos_update_request);
+
+/**
+ * freq_qos_remove_request - Remove frequency QoS request from its list.
+ * @req: Request to remove.
+ *
+ * Remove the given frequency QoS request from the list of constraints it
+ * belongs to and recompute the effective constraint value for that list.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_remove_request(struct freq_qos_request *req)
+{
+       int ret;
+
+       if (!req)
+               return -EINVAL;
+
+       if (WARN(!freq_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+       req->qos = NULL;
+       req->type = 0;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_request);
+
+/**
+ * freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int freq_qos_add_notifier(struct freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
+                                                      notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
+                                                      notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
+
+/**
+ * freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
+                                                        notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
+                                                        notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
index dd05a37..44123b4 100644 (file)
@@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
         * affecting a valid clamp bucket, the next time it's enqueued,
         * it will already see the updated clamp bucket value.
         */
-       if (!p->uclamp[clamp_id].active) {
+       if (p->uclamp[clamp_id].active) {
                uclamp_rq_dec_id(rq, p, clamp_id);
                uclamp_rq_inc_id(rq, p, clamp_id);
        }
@@ -1073,6 +1073,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
        task_rq_unlock(rq, p, &rf);
 }
 
+#ifdef CONFIG_UCLAMP_TASK_GROUP
 static inline void
 uclamp_update_active_tasks(struct cgroup_subsys_state *css,
                           unsigned int clamps)
@@ -1091,7 +1092,6 @@ uclamp_update_active_tasks(struct cgroup_subsys_state *css,
        css_task_iter_end(&it);
 }
 
-#ifdef CONFIG_UCLAMP_TASK_GROUP
 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
 static void uclamp_update_root_tg(void)
 {
@@ -3929,13 +3929,22 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
        }
 
 restart:
+#ifdef CONFIG_SMP
        /*
-        * Ensure that we put DL/RT tasks before the pick loop, such that they
-        * can PULL higher prio tasks when we lower the RQ 'priority'.
+        * We must do the balancing pass before put_next_task(), such
+        * that when we release the rq->lock the task is in the same
+        * state as before we took rq->lock.
+        *
+        * We can terminate the balance pass as soon as we know there is
+        * a runnable task of @class priority or higher.
         */
-       prev->sched_class->put_prev_task(rq, prev, rf);
-       if (!rq->nr_running)
-               newidle_balance(rq, rf);
+       for_class_range(class, prev->sched_class, &idle_sched_class) {
+               if (class->balance(rq, prev, rf))
+                       break;
+       }
+#endif
+
+       put_prev_task(rq, prev);
 
        for_each_class(class) {
                p = class->pick_next_task(rq, NULL, NULL);
@@ -6010,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
+       __sched_fork(0, idle);
+
        raw_spin_lock_irqsave(&idle->pi_lock, flags);
        raw_spin_lock(&rq->lock);
 
-       __sched_fork(0, idle);
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
        idle->flags |= PF_IDLE;
@@ -6201,7 +6211,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
        for_each_class(class) {
                next = class->pick_next_task(rq, NULL, NULL);
                if (next) {
-                       next->sched_class->put_prev_task(rq, next, NULL);
+                       next->sched_class->put_prev_task(rq, next);
                        return next;
                }
        }
index 2dc4872..a8a0803 100644 (file)
@@ -1691,6 +1691,22 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
        resched_curr(rq);
 }
 
+static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+{
+       if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
+               /*
+                * This is OK, because current is on_cpu, which avoids it being
+                * picked for load-balance and preemption/IRQs are still
+                * disabled avoiding further scheduler activity on it and we've
+                * not yet started the picking loop.
+                */
+               rq_unpin_lock(rq, rf);
+               pull_dl_task(rq);
+               rq_repin_lock(rq, rf);
+       }
+
+       return sched_stop_runnable(rq) || sched_dl_runnable(rq);
+}
 #endif /* CONFIG_SMP */
 
 /*
@@ -1758,45 +1774,28 @@ static struct task_struct *
 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        struct sched_dl_entity *dl_se;
+       struct dl_rq *dl_rq = &rq->dl;
        struct task_struct *p;
-       struct dl_rq *dl_rq;
 
        WARN_ON_ONCE(prev || rf);
 
-       dl_rq = &rq->dl;
-
-       if (unlikely(!dl_rq->dl_nr_running))
+       if (!sched_dl_runnable(rq))
                return NULL;
 
        dl_se = pick_next_dl_entity(rq, dl_rq);
        BUG_ON(!dl_se);
-
        p = dl_task_of(dl_se);
-
        set_next_task_dl(rq, p);
-
        return p;
 }
 
-static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
 {
        update_curr_dl(rq);
 
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
-
-       if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
-               /*
-                * This is OK, because current is on_cpu, which avoids it being
-                * picked for load-balance and preemption/IRQs are still
-                * disabled avoiding further scheduler activity on it and we've
-                * not yet started the picking loop.
-                */
-               rq_unpin_lock(rq, rf);
-               pull_dl_task(rq);
-               rq_repin_lock(rq, rf);
-       }
 }
 
 /*
@@ -2442,6 +2441,7 @@ const struct sched_class dl_sched_class = {
        .set_next_task          = set_next_task_dl,
 
 #ifdef CONFIG_SMP
+       .balance                = balance_dl,
        .select_task_rq         = select_task_rq_dl,
        .migrate_task_rq        = migrate_task_rq_dl,
        .set_cpus_allowed       = set_cpus_allowed_dl,
index 682a754..69a81a5 100644 (file)
@@ -6570,6 +6570,15 @@ static void task_dead_fair(struct task_struct *p)
 {
        remove_entity_load_avg(&p->se);
 }
+
+static int
+balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+       if (rq->nr_running)
+               return 1;
+
+       return newidle_balance(rq, rf) != 0;
+}
 #endif /* CONFIG_SMP */
 
 static unsigned long wakeup_gran(struct sched_entity *se)
@@ -6746,7 +6755,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
        int new_tasks;
 
 again:
-       if (!cfs_rq->nr_running)
+       if (!sched_fair_runnable(rq))
                goto idle;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6884,7 +6893,7 @@ idle:
 /*
  * Account for a descheduled task:
  */
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
 {
        struct sched_entity *se = &prev->se;
        struct cfs_rq *cfs_rq;
@@ -7539,6 +7548,19 @@ static void update_blocked_averages(int cpu)
        update_rq_clock(rq);
 
        /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
+       update_irq_load_avg(rq, 0);
+
+       /* Don't need periodic decay once load/util_avg are null */
+       if (others_have_blocked(rq))
+               done = false;
+
+       /*
         * Iterates the task_group tree in a bottom up fashion, see
         * list_add_leaf_cfs_rq() for details.
         */
@@ -7565,14 +7587,6 @@ static void update_blocked_averages(int cpu)
                        done = false;
        }
 
-       curr_class = rq->curr->sched_class;
-       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
-       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
-       update_irq_load_avg(rq, 0);
-       /* Don't need periodic decay once load/util_avg are null */
-       if (others_have_blocked(rq))
-               done = false;
-
        update_blocked_load_status(rq, !done);
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -7633,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
-       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
        curr_class = rq->curr->sched_class;
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
+
+       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+
        update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -10414,11 +10434,11 @@ const struct sched_class fair_sched_class = {
        .check_preempt_curr     = check_preempt_wakeup,
 
        .pick_next_task         = pick_next_task_fair,
-
        .put_prev_task          = put_prev_task_fair,
        .set_next_task          = set_next_task_fair,
 
 #ifdef CONFIG_SMP
+       .balance                = balance_fair,
        .select_task_rq         = select_task_rq_fair,
        .migrate_task_rq        = migrate_task_rq_fair,
 
index 8dad5aa..80167b1 100644 (file)
@@ -104,7 +104,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * update no idle residency and return.
         */
        if (current_clr_polling_and_test()) {
-               dev->last_residency = 0;
+               dev->last_residency_ns = 0;
                local_irq_enable();
                return -EBUSY;
        }
@@ -165,7 +165,9 @@ static void cpuidle_idle_call(void)
         * until a proper wakeup interrupt happens.
         */
 
-       if (idle_should_enter_s2idle() || dev->use_deepest_state) {
+       if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
+               u64 max_latency_ns;
+
                if (idle_should_enter_s2idle()) {
                        rcu_idle_enter();
 
@@ -176,12 +178,16 @@ static void cpuidle_idle_call(void)
                        }
 
                        rcu_idle_exit();
+
+                       max_latency_ns = U64_MAX;
+               } else {
+                       max_latency_ns = dev->forced_idle_latency_limit_ns;
                }
 
                tick_nohz_idle_stop_tick();
                rcu_idle_enter();
 
-               next_state = cpuidle_find_deepest_state(drv, dev);
+               next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
                call_cpuidle(drv, dev, next_state);
        } else {
                bool stop_tick = true;
@@ -311,7 +317,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void play_idle(unsigned long duration_us)
+void play_idle_precise(u64 duration_ns, u64 latency_ns)
 {
        struct idle_timer it;
 
@@ -323,29 +329,29 @@ void play_idle(unsigned long duration_us)
        WARN_ON_ONCE(current->nr_cpus_allowed != 1);
        WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
        WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
-       WARN_ON_ONCE(!duration_us);
+       WARN_ON_ONCE(!duration_ns);
 
        rcu_sleep_check();
        preempt_disable();
        current->flags |= PF_IDLE;
-       cpuidle_use_deepest_state(true);
+       cpuidle_use_deepest_state(latency_ns);
 
        it.done = 0;
        hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        it.timer.function = idle_inject_timer_fn;
-       hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
+       hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
                      HRTIMER_MODE_REL_PINNED);
 
        while (!READ_ONCE(it.done))
                do_idle();
 
-       cpuidle_use_deepest_state(false);
+       cpuidle_use_deepest_state(0);
        current->flags &= ~PF_IDLE;
 
        preempt_fold_need_resched();
        preempt_enable();
 }
-EXPORT_SYMBOL_GPL(play_idle);
+EXPORT_SYMBOL_GPL(play_idle_precise);
 
 void cpu_startup_entry(enum cpuhp_state state)
 {
@@ -365,6 +371,12 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
        return task_cpu(p); /* IDLE tasks as never migrated */
 }
+
+static int
+balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+       return WARN_ON_ONCE(1);
+}
 #endif
 
 /*
@@ -375,7 +387,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
        resched_curr(rq);
 }
 
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
 {
 }
 
@@ -460,6 +472,7 @@ const struct sched_class idle_sched_class = {
        .set_next_task          = set_next_task_idle,
 
 #ifdef CONFIG_SMP
+       .balance                = balance_idle,
        .select_task_rq         = select_task_rq_idle,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
index ebaa4e6..9b8adc0 100644 (file)
@@ -1469,6 +1469,22 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
        resched_curr(rq);
 }
 
+static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+{
+       if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
+               /*
+                * This is OK, because current is on_cpu, which avoids it being
+                * picked for load-balance and preemption/IRQs are still
+                * disabled avoiding further scheduler activity on it and we've
+                * not yet started the picking loop.
+                */
+               rq_unpin_lock(rq, rf);
+               pull_rt_task(rq);
+               rq_repin_lock(rq, rf);
+       }
+
+       return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
+}
 #endif /* CONFIG_SMP */
 
 /*
@@ -1552,21 +1568,18 @@ static struct task_struct *
 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
        struct task_struct *p;
-       struct rt_rq *rt_rq = &rq->rt;
 
        WARN_ON_ONCE(prev || rf);
 
-       if (!rt_rq->rt_queued)
+       if (!sched_rt_runnable(rq))
                return NULL;
 
        p = _pick_next_task_rt(rq);
-
        set_next_task_rt(rq, p);
-
        return p;
 }
 
-static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
 
@@ -1578,18 +1591,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_fla
         */
        if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
-
-       if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
-               /*
-                * This is OK, because current is on_cpu, which avoids it being
-                * picked for load-balance and preemption/IRQs are still
-                * disabled avoiding further scheduler activity on it and we've
-                * not yet started the picking loop.
-                */
-               rq_unpin_lock(rq, rf);
-               pull_rt_task(rq);
-               rq_repin_lock(rq, rf);
-       }
 }
 
 #ifdef CONFIG_SMP
@@ -2366,8 +2367,8 @@ const struct sched_class rt_sched_class = {
        .set_next_task          = set_next_task_rt,
 
 #ifdef CONFIG_SMP
+       .balance                = balance_rt,
        .select_task_rq         = select_task_rq_rt,
-
        .set_cpus_allowed       = set_cpus_allowed_common,
        .rq_online              = rq_online_rt,
        .rq_offline             = rq_offline_rt,
index 0db2c1b..c8870c5 100644 (file)
@@ -1727,10 +1727,11 @@ struct sched_class {
        struct task_struct * (*pick_next_task)(struct rq *rq,
                                               struct task_struct *prev,
                                               struct rq_flags *rf);
-       void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
+       void (*put_prev_task)(struct rq *rq, struct task_struct *p);
        void (*set_next_task)(struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
+       int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
        void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
 
@@ -1773,7 +1774,7 @@ struct sched_class {
 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
        WARN_ON_ONCE(rq->curr != prev);
-       prev->sched_class->put_prev_task(rq, prev, NULL);
+       prev->sched_class->put_prev_task(rq, prev);
 }
 
 static inline void set_next_task(struct rq *rq, struct task_struct *next)
@@ -1787,8 +1788,12 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
 #else
 #define sched_class_highest (&dl_sched_class)
 #endif
+
+#define for_class_range(class, _from, _to) \
+       for (class = (_from); class != (_to); class = class->next)
+
 #define for_each_class(class) \
-   for (class = sched_class_highest; class; class = class->next)
+       for_class_range(class, sched_class_highest, NULL)
 
 extern const struct sched_class stop_sched_class;
 extern const struct sched_class dl_sched_class;
@@ -1796,6 +1801,25 @@ extern const struct sched_class rt_sched_class;
 extern const struct sched_class fair_sched_class;
 extern const struct sched_class idle_sched_class;
 
+static inline bool sched_stop_runnable(struct rq *rq)
+{
+       return rq->stop && task_on_rq_queued(rq->stop);
+}
+
+static inline bool sched_dl_runnable(struct rq *rq)
+{
+       return rq->dl.dl_nr_running > 0;
+}
+
+static inline bool sched_rt_runnable(struct rq *rq)
+{
+       return rq->rt.rt_queued > 0;
+}
+
+static inline bool sched_fair_runnable(struct rq *rq)
+{
+       return rq->cfs.nr_running > 0;
+}
 
 #ifdef CONFIG_SMP
 
index 7e1cee4..c064073 100644 (file)
@@ -15,6 +15,12 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
        return task_cpu(p); /* stop tasks as never migrate */
 }
+
+static int
+balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+       return sched_stop_runnable(rq);
+}
 #endif /* CONFIG_SMP */
 
 static void
@@ -31,16 +37,13 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
 static struct task_struct *
 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-       struct task_struct *stop = rq->stop;
-
        WARN_ON_ONCE(prev || rf);
 
-       if (!stop || !task_on_rq_queued(stop))
+       if (!sched_stop_runnable(rq))
                return NULL;
 
-       set_next_task_stop(rq, stop);
-
-       return stop;
+       set_next_task_stop(rq, rq->stop);
+       return rq->stop;
 }
 
 static void
@@ -60,7 +63,7 @@ static void yield_task_stop(struct rq *rq)
        BUG(); /* the stop task should never yield, its pointless. */
 }
 
-static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
 {
        struct task_struct *curr = rq->curr;
        u64 delta_exec;
@@ -129,6 +132,7 @@ const struct sched_class stop_sched_class = {
        .set_next_task          = set_next_task_stop,
 
 #ifdef CONFIG_SMP
+       .balance                = balance_stop,
        .select_task_rq         = select_task_rq_stop,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
index b5667a2..49b835f 100644 (file)
@@ -1948,7 +1948,7 @@ next_level:
 static int
 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
 {
-       enum s_alloc alloc_state;
+       enum s_alloc alloc_state = sa_none;
        struct sched_domain *sd;
        struct s_data d;
        struct rq *rq = NULL;
@@ -1956,6 +1956,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        struct sched_domain_topology_level *tl_asym;
        bool has_asym = false;
 
+       if (WARN_ON(cpumask_empty(cpu_map)))
+               goto error;
+
        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
        if (alloc_state != sa_rootdomain)
                goto error;
@@ -2026,7 +2029,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        rcu_read_unlock();
 
        if (has_asym)
-               static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+               static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
 
        if (rq && sched_debug_enabled) {
                pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
@@ -2121,8 +2124,12 @@ int sched_init_domains(const struct cpumask *cpu_map)
  */
 static void detach_destroy_domains(const struct cpumask *cpu_map)
 {
+       unsigned int cpu = cpumask_any(cpu_map);
        int i;
 
+       if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
+               static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
+
        rcu_read_lock();
        for_each_cpu(i, cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
index c4da1ef..bcd46f5 100644 (file)
@@ -2205,8 +2205,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
                 */
                preempt_disable();
                read_unlock(&tasklist_lock);
-               preempt_enable_no_resched();
                cgroup_enter_frozen();
+               preempt_enable_no_resched();
                freezable_schedule();
                cgroup_leave_frozen(true);
        } else {
index 6d1f68b..c9ea7eb 100644 (file)
@@ -141,7 +141,8 @@ unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
        struct stacktrace_cookie c = {
                .store  = store,
                .size   = size,
-               .skip   = skipnr + 1,
+               /* skip this function if they are tracing us */
+               .skip   = skipnr + !!(current == tsk),
        };
 
        if (!try_get_task_stack(tsk))
@@ -298,7 +299,8 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
        struct stack_trace trace = {
                .entries        = store,
                .max_entries    = size,
-               .skip           = skipnr + 1,
+               /* skip this function if they are tracing us */
+               .skip   = skipnr + !!(current == task),
        };
 
        save_stack_trace_tsk(task, &trace);
index 65eb796..069ca78 100644 (file)
@@ -771,7 +771,7 @@ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
        /* fill PPS status fields */
        pps_fill_timex(txc);
 
-       txc->time.tv_sec = (time_t)ts->tv_sec;
+       txc->time.tv_sec = ts->tv_sec;
        txc->time.tv_usec = ts->tv_nsec;
        if (!(time_status & STA_NANO))
                txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
index 92a4319..42d512f 100644 (file)
@@ -266,7 +266,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
 /**
  * thread_group_sample_cputime - Sample cputime for a given task
  * @tsk:       Task for which cputime needs to be started
- * @iimes:     Storage for time samples
+ * @samples:   Storage for time samples
  *
  * Called from sys_getitimer() to calculate the expiry time of an active
  * timer. That means group cputime accounting is already active. Called
@@ -1038,12 +1038,12 @@ unlock:
  * member of @pct->bases[CLK].nextevt. False otherwise
  */
 static inline bool
-task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct)
+task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
 {
        int i;
 
        for (i = 0; i < CPUCLOCK_MAX; i++) {
-               if (sample[i] >= pct->bases[i].nextevt)
+               if (samples[i] >= pct->bases[i].nextevt)
                        return true;
        }
        return false;
index 142b076..dbd6905 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/seqlock.h>
 #include <linux/bitops.h>
 
+#include "timekeeping.h"
+
 /**
  * struct clock_read_data - data required to read from sched_clock()
  *
index 4bc37ac..5ee0f77 100644 (file)
@@ -110,8 +110,7 @@ void update_vsyscall(struct timekeeper *tk)
        nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
        vdso_ts->sec    += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
 
-       if (__arch_use_vsyscall(vdata))
-               update_vdso_data(vdata, tk);
+       update_vdso_data(vdata, tk);
 
        __arch_update_vsyscall(vdata, tk);
 
@@ -124,10 +123,8 @@ void update_vsyscall_tz(void)
 {
        struct vdso_data *vdata = __arch_get_k_vdso_data();
 
-       if (__arch_use_vsyscall(vdata)) {
-               vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
-               vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
-       }
+       vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
+       vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
 
        __arch_sync_vdso_data(vdata);
 }
index 0892e38..a9dfa04 100644 (file)
@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
                goto out;
        }
 
+       mutex_lock(&event_mutex);
        ret = perf_trace_event_init(tp_event, p_event);
        if (ret)
                destroy_local_trace_kprobe(tp_event);
+       mutex_unlock(&event_mutex);
 out:
        kfree(func);
        return ret;
@@ -282,8 +284,10 @@ out:
 
 void perf_kprobe_destroy(struct perf_event *p_event)
 {
+       mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       mutex_unlock(&event_mutex);
 
        destroy_local_trace_kprobe(p_event->tp_event);
 }
index 57648c5..7482a14 100644 (file)
@@ -679,6 +679,8 @@ static bool synth_field_signed(char *type)
 {
        if (str_has_prefix(type, "u"))
                return false;
+       if (strcmp(type, "gfp_t") == 0)
+               return false;
 
        return true;
 }
index 183f92a..3321d04 100644 (file)
@@ -447,7 +447,6 @@ config ASSOCIATIVE_ARRAY
 config HAS_IOMEM
        bool
        depends on !NO_IOMEM
-       select GENERIC_IO
        default y
 
 config HAS_IOPORT_MAP
index 5cff72f..33ffbf3 100644 (file)
@@ -106,7 +106,12 @@ retry:
                was_locked = 1;
        } else {
                local_irq_restore(flags);
-               cpu_relax();
+               /*
+                * Wait for the lock to release before jumping to
+                * atomic_cmpxchg() in order to mitigate the thundering herd
+                * problem.
+                */
+               do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
                goto retry;
        }
 
index 66a3748..c2cf2c5 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -215,7 +215,7 @@ int idr_for_each(const struct idr *idr,
 EXPORT_SYMBOL(idr_for_each);
 
 /**
- * idr_get_next() - Find next populated entry.
+ * idr_get_next_ul() - Find next populated entry.
  * @idr: IDR handle.
  * @nextid: Pointer to an ID.
  *
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(idr_for_each);
  * to the ID of the found value.  To use in a loop, the value pointed to by
  * nextid must be incremented by the user.
  */
-void *idr_get_next(struct idr *idr, int *nextid)
+void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
 {
        struct radix_tree_iter iter;
        void __rcu **slot;
@@ -245,18 +245,14 @@ void *idr_get_next(struct idr *idr, int *nextid)
        }
        if (!slot)
                return NULL;
-       id = iter.index + base;
-
-       if (WARN_ON_ONCE(id > INT_MAX))
-               return NULL;
 
-       *nextid = id;
+       *nextid = iter.index + base;
        return entry;
 }
-EXPORT_SYMBOL(idr_get_next);
+EXPORT_SYMBOL(idr_get_next_ul);
 
 /**
- * idr_get_next_ul() - Find next populated entry.
+ * idr_get_next() - Find next populated entry.
  * @idr: IDR handle.
  * @nextid: Pointer to an ID.
  *
@@ -265,22 +261,17 @@ EXPORT_SYMBOL(idr_get_next);
  * to the ID of the found value.  To use in a loop, the value pointed to by
  * nextid must be incremented by the user.
  */
-void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
+void *idr_get_next(struct idr *idr, int *nextid)
 {
-       struct radix_tree_iter iter;
-       void __rcu **slot;
-       unsigned long base = idr->idr_base;
        unsigned long id = *nextid;
+       void *entry = idr_get_next_ul(idr, &id);
 
-       id = (id < base) ? 0 : id - base;
-       slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
-       if (!slot)
+       if (WARN_ON_ONCE(id > INT_MAX))
                return NULL;
-
-       *nextid = iter.index + base;
-       return rcu_dereference_raw(*slot);
+       *nextid = id;
+       return entry;
 }
-EXPORT_SYMBOL(idr_get_next_ul);
+EXPORT_SYMBOL(idr_get_next);
 
 /**
  * idr_replace() - replace pointer for given ID.
index 18c1dfb..c8fa1d2 100644 (file)
@@ -1529,7 +1529,7 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
                        offset = radix_tree_find_next_bit(node, IDR_FREE,
                                                        offset + 1);
                        start = next_index(start, node, offset);
-                       if (start > max)
+                       if (start > max || start == 0)
                                return ERR_PTR(-ENOSPC);
                        while (offset == RADIX_TREE_MAP_SIZE) {
                                offset = node->offset + 1;
index 9d631a7..7df4f7f 100644 (file)
@@ -1110,6 +1110,28 @@ static noinline void check_find_entry(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_move_tiny(struct xarray *xa)
+{
+       XA_STATE(xas, xa, 0);
+
+       XA_BUG_ON(xa, !xa_empty(xa));
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_next(&xas) != NULL);
+       XA_BUG_ON(xa, xas_next(&xas) != NULL);
+       rcu_read_unlock();
+       xa_store_index(xa, 0, GFP_KERNEL);
+       rcu_read_lock();
+       xas_set(&xas, 0);
+       XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
+       XA_BUG_ON(xa, xas_next(&xas) != NULL);
+       xas_set(&xas, 0);
+       XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
+       XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+       rcu_read_unlock();
+       xa_erase_index(xa, 0);
+       XA_BUG_ON(xa, !xa_empty(xa));
+}
+
 static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 {
        XA_STATE(xas, xa, 0);
@@ -1217,6 +1239,8 @@ static noinline void check_move(struct xarray *xa)
 
        xa_destroy(xa);
 
+       check_move_tiny(xa);
+
        for (i = 0; i < 16; i++)
                check_move_small(xa, 1UL << i);
 
index e630e7f..45f57fd 100644 (file)
@@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
                return -1;
        }
 
-       res->tv_sec = 0;
-       res->tv_nsec = ns;
-
+       if (likely(res)) {
+               res->tv_sec = 0;
+               res->tv_nsec = ns;
+       }
        return 0;
 }
 
@@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
                ret = clock_getres_fallback(clock, &ts);
 #endif
 
-       if (likely(!ret)) {
+       if (likely(!ret && res)) {
                res->tv_sec = ts.tv_sec;
                res->tv_nsec = ts.tv_nsec;
        }
index 446b956..1237c21 100644 (file)
@@ -994,6 +994,8 @@ void *__xas_prev(struct xa_state *xas)
 
        if (!xas_frozen(xas->xa_node))
                xas->xa_index--;
+       if (!xas->xa_node)
+               return set_bounds(xas);
        if (xas_not_node(xas->xa_node))
                return xas_load(xas);
 
@@ -1031,6 +1033,8 @@ void *__xas_next(struct xa_state *xas)
 
        if (!xas_frozen(xas->xa_node))
                xas->xa_index++;
+       if (!xas->xa_node)
+               return set_bounds(xas);
        if (xas_not_node(xas->xa_node))
                return xas_load(xas);
 
index 08c3c80..156f26f 100644 (file)
@@ -1146,6 +1146,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
 
                if (DEC_IS_DYNALLOC(s->dict.mode)) {
                        if (s->dict.allocated < s->dict.size) {
+                               s->dict.allocated = s->dict.size;
                                vfree(s->dict.buf);
                                s->dict.buf = vmalloc(s->dict.size);
                                if (s->dict.buf == NULL) {
index 8345bb6..0461df1 100644 (file)
@@ -67,28 +67,31 @@ void __dump_page(struct page *page, const char *reason)
         */
        mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
-       pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx",
-                 page, page_ref_count(page), mapcount,
-                 page->mapping, page_to_pgoff(page));
        if (PageCompound(page))
-               pr_cont(" compound_mapcount: %d", compound_mapcount(page));
-       pr_cont("\n");
-       if (PageAnon(page))
-               pr_warn("anon ");
-       else if (PageKsm(page))
-               pr_warn("ksm ");
+               pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
+                       "index:%#lx compound_mapcount: %d\n",
+                       page, page_ref_count(page), mapcount,
+                       page->mapping, page_to_pgoff(page),
+                       compound_mapcount(page));
+       else
+               pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
+                       page, page_ref_count(page), mapcount,
+                       page->mapping, page_to_pgoff(page));
+       if (PageKsm(page))
+               pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
+       else if (PageAnon(page))
+               pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
        else if (mapping) {
-               pr_warn("%ps ", mapping->a_ops);
                if (mapping->host && mapping->host->i_dentry.first) {
                        struct dentry *dentry;
                        dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
-                       pr_warn("name:\"%pd\" ", dentry);
-               }
+                       pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
+               } else
+                       pr_warn("%ps\n", mapping->a_ops);
+               pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
        }
        BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
 
-       pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
-
 hex_only:
        print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
index f1930fa..2ac38bd 100644 (file)
@@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
 again:
        rcu_read_lock();
        h_cg = hugetlb_cgroup_from_task(current);
-       if (!css_tryget_online(&h_cg->css)) {
+       if (!css_tryget(&h_cg->css)) {
                rcu_read_unlock();
                goto again;
        }
index 0a1b4b4..a8a57be 100644 (file)
@@ -1028,12 +1028,13 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        anon_vma_lock_write(vma->anon_vma);
 
-       pte = pte_offset_map(pmd, address);
-       pte_ptl = pte_lockptr(mm, pmd);
-
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
                                address, address + HPAGE_PMD_SIZE);
        mmu_notifier_invalidate_range_start(&range);
+
+       pte = pte_offset_map(pmd, address);
+       pte_ptl = pte_lockptr(mm, pmd);
+
        pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
        /*
         * After this gup_fast can't run anymore. This also removes
@@ -1601,17 +1602,6 @@ static void collapse_file(struct mm_struct *mm,
                                        result = SCAN_FAIL;
                                        goto xa_unlocked;
                                }
-                       } else if (!PageUptodate(page)) {
-                               xas_unlock_irq(&xas);
-                               wait_on_page_locked(page);
-                               if (!trylock_page(page)) {
-                                       result = SCAN_PAGE_LOCK;
-                                       goto xa_unlocked;
-                               }
-                               get_page(page);
-                       } else if (PageDirty(page)) {
-                               result = SCAN_FAIL;
-                               goto xa_locked;
                        } else if (trylock_page(page)) {
                                get_page(page);
                                xas_unlock_irq(&xas);
@@ -1626,7 +1616,12 @@ static void collapse_file(struct mm_struct *mm,
                 * without racing with truncate.
                 */
                VM_BUG_ON_PAGE(!PageLocked(page), page);
-               VM_BUG_ON_PAGE(!PageUptodate(page), page);
+
+               /* make sure the page is up to date */
+               if (unlikely(!PageUptodate(page))) {
+                       result = SCAN_FAIL;
+                       goto out_unlock;
+               }
 
                /*
                 * If file was truncated then extended, or hole-punched, before
@@ -1642,6 +1637,16 @@ static void collapse_file(struct mm_struct *mm,
                        goto out_unlock;
                }
 
+               if (!is_shmem && PageDirty(page)) {
+                       /*
+                        * khugepaged only works on read-only fd, so this
+                        * page is dirty because it hasn't been flushed
+                        * since first write.
+                        */
+                       result = SCAN_FAIL;
+                       goto out_unlock;
+               }
+
                if (isolate_lru_page(page)) {
                        result = SCAN_DEL_PAGE_LRU;
                        goto out_unlock;
index 2be9f3f..94c343b 100644 (file)
@@ -363,8 +363,12 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
                ClearPageReferenced(page);
                test_and_clear_page_young(page);
                if (pageout) {
-                       if (!isolate_lru_page(page))
-                               list_add(&page->lru, &page_list);
+                       if (!isolate_lru_page(page)) {
+                               if (PageUnevictable(page))
+                                       putback_lru_page(page);
+                               else
+                                       list_add(&page->lru, &page_list);
+                       }
                } else
                        deactivate_page(page);
 huge_unlock:
@@ -441,8 +445,12 @@ regular_page:
                ClearPageReferenced(page);
                test_and_clear_page_young(page);
                if (pageout) {
-                       if (!isolate_lru_page(page))
-                               list_add(&page->lru, &page_list);
+                       if (!isolate_lru_page(page)) {
+                               if (PageUnevictable(page))
+                                       putback_lru_page(page);
+                               else
+                                       list_add(&page->lru, &page_list);
+                       }
                } else
                        deactivate_page(page);
        }
index 3631065..46ad252 100644 (file)
@@ -484,7 +484,7 @@ ino_t page_cgroup_ino(struct page *page)
        unsigned long ino = 0;
 
        rcu_read_lock();
-       if (PageHead(page) && PageSlab(page))
+       if (PageSlab(page) && !PageTail(page))
                memcg = memcg_from_slab_page(page);
        else
                memcg = READ_ONCE(page->mem_cgroup);
@@ -960,7 +960,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
                        if (unlikely(!memcg))
                                memcg = root_mem_cgroup;
                }
-       } while (!css_tryget_online(&memcg->css));
+       } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
 }
@@ -2535,6 +2535,15 @@ retry:
        }
 
        /*
+        * Memcg doesn't have a dedicated reserve for atomic
+        * allocations. But like the global atomic pool, we need to
+        * put the burden of reclaim on regular allocation requests
+        * and let these go through as privileged allocations.
+        */
+       if (gfp_mask & __GFP_ATOMIC)
+               goto force;
+
+       /*
         * Unlike in global OOM situations, memcg is not in a physical
         * memory shortage.  Allow dying and OOM-killed tasks to
         * bypass the last charges so that they can exit quickly and
@@ -5014,12 +5023,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
-       /*
-        * Flush percpu vmstats and vmevents to guarantee the value correctness
-        * on parent's and all ancestor levels.
-        */
-       memcg_flush_percpu_vmstats(memcg, false);
-       memcg_flush_percpu_vmevents(memcg);
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
        free_percpu(memcg->vmstats_percpu);
@@ -5030,6 +5033,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 static void mem_cgroup_free(struct mem_cgroup *memcg)
 {
        memcg_wb_domain_exit(memcg);
+       /*
+        * Flush percpu vmstats and vmevents to guarantee the value correctness
+        * on parent's and all ancestor levels.
+        */
+       memcg_flush_percpu_vmstats(memcg, false);
+       memcg_flush_percpu_vmevents(memcg);
        __mem_cgroup_free(memcg);
 }
 
index df570e5..3b62a9f 100644 (file)
@@ -447,6 +447,14 @@ static void update_pgdat_span(struct pglist_data *pgdat)
                                             zone->spanned_pages;
 
                /* No need to lock the zones, they can't change. */
+               if (!zone->spanned_pages)
+                       continue;
+               if (!node_end_pfn) {
+                       node_start_pfn = zone->zone_start_pfn;
+                       node_end_pfn = zone_end_pfn;
+                       continue;
+               }
+
                if (zone_end_pfn > node_end_pfn)
                        node_end_pfn = zone_end_pfn;
                if (zone->zone_start_pfn < node_start_pfn)
@@ -1638,6 +1646,18 @@ static int check_cpu_on_node(pg_data_t *pgdat)
        return 0;
 }
 
+static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
+{
+       int nid = *(int *)arg;
+
+       /*
+        * If a memory block belongs to multiple nodes, the stored nid is not
+        * reliable. However, such blocks are always online (e.g., cannot get
+        * offlined) and, therefore, are still spanned by the node.
+        */
+       return mem->nid == nid ? -EEXIST : 0;
+}
+
 /**
  * try_offline_node
  * @nid: the node ID
@@ -1650,25 +1670,24 @@ static int check_cpu_on_node(pg_data_t *pgdat)
 void try_offline_node(int nid)
 {
        pg_data_t *pgdat = NODE_DATA(nid);
-       unsigned long start_pfn = pgdat->node_start_pfn;
-       unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
-       unsigned long pfn;
-
-       for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
-               unsigned long section_nr = pfn_to_section_nr(pfn);
-
-               if (!present_section_nr(section_nr))
-                       continue;
+       int rc;
 
-               if (pfn_to_nid(pfn) != nid)
-                       continue;
+       /*
+        * If the node still spans pages (especially ZONE_DEVICE), don't
+        * offline it. A node spans memory after move_pfn_range_to_zone(),
+        * e.g., after the memory block was onlined.
+        */
+       if (pgdat->node_spanned_pages)
+               return;
 
-               /*
-                * some memory sections of this node are not removed, and we
-                * can't offline node now.
-                */
+       /*
+        * Especially offline memory blocks might not be spanned by the
+        * node. They will get spanned by the node once they get onlined.
+        * However, they link to the node in sysfs and can get onlined later.
+        */
+       rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
+       if (rc)
                return;
-       }
 
        if (check_cpu_on_node(pgdat))
                return;
index 4ae967b..e08c941 100644 (file)
@@ -672,7 +672,9 @@ static const struct mm_walk_ops queue_pages_walk_ops = {
  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
  *     specified.
  * 0 - queue pages successfully or no misplaced page.
- * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
+ * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
+ *         memory range specified by nodemask and maxnode points outside
+ *         your accessible address space (-EFAULT)
  */
 static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -1286,7 +1288,7 @@ static long do_mbind(unsigned long start, unsigned long len,
                          flags | MPOL_MF_INVERT, &pagelist);
 
        if (ret < 0) {
-               err = -EIO;
+               err = ret;
                goto up_out;
        }
 
@@ -1305,10 +1307,12 @@ static long do_mbind(unsigned long start, unsigned long len,
 
                if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
                        err = -EIO;
-       } else
-               putback_movable_pages(&pagelist);
-
+       } else {
 up_out:
+               if (!list_empty(&pagelist))
+                       putback_movable_pages(&pagelist);
+       }
+
        up_write(&mm->mmap_sem);
 mpol_out:
        mpol_put(new);
index 7fde886..9a889e4 100644 (file)
@@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
                                        mn->ops->invalidate_range_start, _ret,
                                        !mmu_notifier_range_blockable(range) ? "non-" : "");
                                WARN_ON(mmu_notifier_range_blockable(range) ||
-                                       ret != -EAGAIN);
+                                       _ret != -EAGAIN);
                                ret = _ret;
                        }
                }
index ecc3dba..f391c0c 100644 (file)
@@ -1948,6 +1948,14 @@ void __init page_alloc_init_late(void)
        wait_for_completion(&pgdat_init_all_done_comp);
 
        /*
+        * The number of managed pages has changed due to the initialisation
+        * so the pcpu batch and high limits needs to be updated or the limits
+        * will be artificially small.
+        */
+       for_each_populated_zone(zone)
+               zone_pcp_update(zone);
+
+       /*
         * We initialized the rest of the deferred pages.  Permanently disable
         * on-demand struct page initialization.
         */
@@ -3720,10 +3728,6 @@ try_this_zone:
 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
 {
        unsigned int filter = SHOW_MEM_FILTER_NODES;
-       static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
-
-       if (!__ratelimit(&show_mem_rs))
-               return;
 
        /*
         * This documents exceptions given to allocations in certain
@@ -3744,8 +3748,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
-       static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
-                                     DEFAULT_RATELIMIT_BURST);
+       static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
 
        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
                return;
@@ -8514,7 +8517,6 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
        WARN(count != 0, "%d pages are still in use!\n", count);
 }
 
-#ifdef CONFIG_MEMORY_HOTPLUG
 /*
  * The zone indicated has a new number of managed_pages; batch sizes and percpu
  * page high values need to be recalulated.
@@ -8528,7 +8530,6 @@ void __meminit zone_pcp_update(struct zone *zone)
                                per_cpu_ptr(zone->pageset, cpu));
        mutex_unlock(&pcp_batch_high_lock);
 }
-#endif
 
 void zone_pcp_reset(struct zone *zone)
 {
index 24ee600..60a66a5 100644 (file)
@@ -73,6 +73,7 @@ static void swap_slot_free_notify(struct page *page)
 {
        struct swap_info_struct *sis;
        struct gendisk *disk;
+       swp_entry_t entry;
 
        /*
         * There is no guarantee that the page is in swap cache - the software
@@ -104,11 +105,10 @@ static void swap_slot_free_notify(struct page *page)
         * we again wish to reclaim it.
         */
        disk = sis->bdev->bd_disk;
-       if (disk->fops->swap_slot_free_notify) {
-               swp_entry_t entry;
+       entry.val = page_private(page);
+       if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
                unsigned long offset;
 
-               entry.val = page_private(page);
                offset = swp_offset(entry);
 
                SetPageDirty(page);
index 68e455f..b2b0169 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -323,8 +323,8 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  * Expects a pointer to a slab page. Please note, that PageSlab() check
  * isn't sufficient, as it returns true also for tail compound slab pages,
  * which do not have slab_cache pointer set.
- * So this function assumes that the page can pass PageHead() and PageSlab()
- * checks.
+ * So this function assumes that the page can pass PageSlab() && !PageTail()
+ * check.
  *
  * The kmem_cache can be reparented asynchronously. The caller must ensure
  * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
index b25c807..e72e802 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1433,12 +1433,15 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
        void *old_tail = *tail ? *tail : *head;
        int rsize;
 
-       if (slab_want_init_on_free(s)) {
-               void *p = NULL;
+       /* Head and tail of the reconstructed freelist */
+       *head = NULL;
+       *tail = NULL;
 
-               do {
-                       object = next;
-                       next = get_freepointer(s, object);
+       do {
+               object = next;
+               next = get_freepointer(s, object);
+
+               if (slab_want_init_on_free(s)) {
                        /*
                         * Clear the object and the metadata, but don't touch
                         * the redzone.
@@ -1448,29 +1451,8 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                                                           : 0;
                        memset((char *)object + s->inuse, 0,
                               s->size - s->inuse - rsize);
-                       set_freepointer(s, object, p);
-                       p = object;
-               } while (object != old_tail);
-       }
-
-/*
- * Compiler cannot detect this function can be removed if slab_free_hook()
- * evaluates to nothing.  Thus, catch all relevant config debug options here.
- */
-#if defined(CONFIG_LOCKDEP)    ||              \
-       defined(CONFIG_DEBUG_KMEMLEAK) ||       \
-       defined(CONFIG_DEBUG_OBJECTS_FREE) ||   \
-       defined(CONFIG_KASAN)
 
-       next = *head;
-
-       /* Head and tail of the reconstructed freelist */
-       *head = NULL;
-       *tail = NULL;
-
-       do {
-               object = next;
-               next = get_freepointer(s, object);
+               }
                /* If object's reuse doesn't have to be delayed */
                if (!slab_free_hook(s, object)) {
                        /* Move object to the new freelist */
@@ -1485,9 +1467,6 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                *tail = NULL;
 
        return *head != NULL;
-#else
-       return true;
-#endif
 }
 
 static void *setup_object(struct kmem_cache *s, struct page *page,
index 6afc892..a822204 100644 (file)
@@ -1383,12 +1383,29 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
                        unsigned long freecount = 0;
                        struct free_area *area;
                        struct list_head *curr;
+                       bool overflow = false;
 
                        area = &(zone->free_area[order]);
 
-                       list_for_each(curr, &area->free_list[mtype])
-                               freecount++;
-                       seq_printf(m, "%6lu ", freecount);
+                       list_for_each(curr, &area->free_list[mtype]) {
+                               /*
+                                * Cap the free_list iteration because it might
+                                * be really large and we are under a spinlock
+                                * so a long time spent here could trigger a
+                                * hard lockup detector. Anyway this is a
+                                * debugging tool so knowing there is a handful
+                                * of pages of this order should be more than
+                                * sufficient.
+                                */
+                               if (++freecount >= 100000) {
+                                       overflow = true;
+                                       break;
+                               }
+                       }
+                       seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
+                       spin_unlock_irq(&zone->lock);
+                       cond_resched();
+                       spin_lock_irq(&zone->lock);
                }
                seq_putc(m, '\n');
        }
@@ -1972,7 +1989,7 @@ void __init init_mm_internals(void)
 #endif
 #ifdef CONFIG_PROC_FS
        proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
-       proc_create_seq("pagetypeinfo", 0444, NULL, &pagetypeinfo_op);
+       proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
        proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
        proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
 #endif
index 54728d2..d4bcfd8 100644 (file)
@@ -172,7 +172,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       vlan->nest_level = dev_get_nest_level(real_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 93eadf1..e5bff5c 100644 (file)
@@ -489,36 +489,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
        dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
-                                    struct netdev_queue *txq,
-                                    void *_subclass)
-{
-       lockdep_set_class_and_subclass(&txq->_xmit_lock,
-                                      &vlan_netdev_xmit_lock_key,
-                                      *(int *)_subclass);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
-{
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &vlan_netdev_addr_lock_key,
-                                      subclass);
-       netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
-}
-
-static int vlan_dev_get_lock_subclass(struct net_device *dev)
-{
-       return vlan_dev_priv(dev)->nest_level;
-}
-
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .parse   = eth_header_parse,
@@ -609,8 +579,6 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
-
        vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->vlan_pcpu_stats)
                return -ENOMEM;
@@ -812,7 +780,6 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
-       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
        .ndo_get_iflink         = vlan_dev_get_iflink,
 };
 
index b7528e7..0ce530a 100644 (file)
@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* writable? */
index d78938e..5b0b20e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/pkt_sched.h>
@@ -193,14 +195,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        unsigned char *ogm_buff;
        u32 random_seqno;
 
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        /* randomize initial seqno to avoid collision */
        get_random_bytes(&random_seqno, sizeof(random_seqno));
        atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
 
        hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
        ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
-       if (!ogm_buff)
+       if (!ogm_buff) {
+               mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
                return -ENOMEM;
+       }
 
        hard_iface->bat_iv.ogm_buff = ogm_buff;
 
@@ -212,35 +218,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
 
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        return 0;
 }
 
 static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
 {
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        kfree(hard_iface->bat_iv.ogm_buff);
        hard_iface->bat_iv.ogm_buff = NULL;
+
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_ogm_packet *batadv_ogm_packet;
-       unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+       void *ogm_buff;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+       ogm_buff = hard_iface->bat_iv.ogm_buff;
+       if (!ogm_buff)
+               goto unlock;
+
+       batadv_ogm_packet = ogm_buff;
        ether_addr_copy(batadv_ogm_packet->orig,
                        hard_iface->net_dev->dev_addr);
        ether_addr_copy(batadv_ogm_packet->prev_sender,
                        hard_iface->net_dev->dev_addr);
+
+unlock:
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 static void
 batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_ogm_packet *batadv_ogm_packet;
-       unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+       void *ogm_buff;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+       ogm_buff = hard_iface->bat_iv.ogm_buff;
+       if (!ogm_buff)
+               goto unlock;
+
+       batadv_ogm_packet = ogm_buff;
        batadv_ogm_packet->ttl = BATADV_TTL;
+
+unlock:
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 /* when do we schedule our own ogm to be sent */
@@ -742,7 +772,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
        }
 }
 
-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+/**
+ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
+ * @hard_iface: interface whose ogm buffer should be transmitted
+ */
+static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
@@ -753,9 +787,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        u16 tvlv_len = 0;
        unsigned long send_time;
 
-       if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
-           hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
-               return;
+       lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
 
        /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
@@ -823,6 +855,17 @@ out:
                batadv_hardif_put(primary_if);
 }
 
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+       if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+           hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+               return;
+
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+       batadv_iv_ogm_schedule_buff(hard_iface);
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
 /**
  * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
  * @orig_node: originator which reproadcasted the OGMs directly
index dc4f743..8033f24 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/random.h>
 #include <linux/rculist.h>
@@ -256,14 +257,12 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
- * @work: work queue item
+ * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
+ * @bat_priv: the bat priv with all the soft interface information
  */
-static void batadv_v_ogm_send(struct work_struct *work)
+static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *hard_iface;
-       struct batadv_priv_bat_v *bat_v;
-       struct batadv_priv *bat_priv;
        struct batadv_ogm2_packet *ogm_packet;
        struct sk_buff *skb, *skb_tmp;
        unsigned char *ogm_buff;
@@ -271,8 +270,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
        u16 tvlv_len = 0;
        int ret;
 
-       bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
-       bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+       lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
 
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
                goto out;
@@ -364,6 +362,23 @@ out:
 }
 
 /**
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+       struct batadv_priv_bat_v *bat_v;
+       struct batadv_priv *bat_priv;
+
+       bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+       bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+       batadv_v_ogm_send_softif(bat_priv);
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
+/**
  * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
  * @work: work queue item
  *
@@ -424,11 +439,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
        struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
        struct batadv_ogm2_packet *ogm_packet;
 
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
        if (!bat_priv->bat_v.ogm_buff)
-               return;
+               goto unlock;
 
        ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
        ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+
+unlock:
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
 }
 
 /**
@@ -1050,6 +1069,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
        atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
        INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
 
+       mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
+
        return 0;
 }
 
@@ -1061,7 +1082,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
 {
        cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
 
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+
        kfree(bat_priv->bat_v.ogm_buff);
        bat_priv->bat_v.ogm_buff = NULL;
        bat_priv->bat_v.ogm_buff_len = 0;
+
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
 }
index c90e473..afb5228 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kref.h>
 #include <linux/limits.h>
 #include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/rculist.h>
@@ -929,6 +930,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
        INIT_LIST_HEAD(&hard_iface->list);
        INIT_HLIST_HEAD(&hard_iface->neigh_list);
 
+       mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
        spin_lock_init(&hard_iface->neigh_list_lock);
        kref_init(&hard_iface->refcount);
 
index 9cbed6f..5ee8e9a 100644 (file)
@@ -740,36 +740,6 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
        return 0;
 }
 
-/* batman-adv network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key batadv_netdev_xmit_lock_key;
-static struct lock_class_key batadv_netdev_addr_lock_key;
-
-/**
- * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
- * @dev: device which owns the tx queue
- * @txq: tx queue to modify
- * @_unused: always NULL
- */
-static void batadv_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
-}
-
-/**
- * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
- * @dev: network device to modify
- */
-static void batadv_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
-}
-
 /**
  * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
@@ -783,8 +753,6 @@ static int batadv_softif_init_late(struct net_device *dev)
        int ret;
        size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
 
-       batadv_set_lockdep_class(dev);
-
        bat_priv = netdev_priv(dev);
        bat_priv->soft_iface = dev;
 
index be7c02a..4d7f1ba 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/kref.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/sched.h> /* for linux/wait.h */
@@ -81,6 +82,9 @@ struct batadv_hard_iface_bat_iv {
 
        /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
+
+       /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+       struct mutex ogm_buff_mutex;
 };
 
 /**
@@ -1539,6 +1543,9 @@ struct batadv_priv_bat_v {
        /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
 
+       /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+       struct mutex ogm_buff_mutex;
+
        /** @ogm_wq: workqueue used to schedule OGM transmissions */
        struct delayed_work ogm_wq;
 };
index bb55d92..4febc82 100644 (file)
@@ -571,15 +571,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
        return err < 0 ? NET_XMIT_DROP : err;
 }
 
-static int bt_dev_init(struct net_device *dev)
-{
-       netdev_lockdep_set_classes(dev);
-
-       return 0;
-}
-
 static const struct net_device_ops netdev_ops = {
-       .ndo_init               = bt_dev_init,
        .ndo_start_xmit         = bt_xmit,
 };
 
index 94ddf19..5f508c5 100644 (file)
@@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= EPOLLHUP;
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == BT_CLOSED)
index 681b728..e804a30 100644 (file)
@@ -24,8 +24,6 @@
 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_br_ops);
 
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -108,11 +106,6 @@ out:
        return NETDEV_TX_OK;
 }
 
-static void br_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
 static int br_dev_init(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -150,7 +143,6 @@ static int br_dev_init(struct net_device *dev)
                br_mdb_hash_fini(br);
                br_fdb_hash_fini(br);
        }
-       br_set_lockdep_class(dev);
 
        return err;
 }
index ed91ea3..12a4f4d 100644 (file)
@@ -20,7 +20,6 @@ static unsigned int
 ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct ebt_nat_info *info = par->targinfo;
-       struct net_device *dev;
 
        if (skb_ensure_writable(skb, ETH_ALEN))
                return EBT_DROP;
@@ -33,10 +32,22 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
                else
                        skb->pkt_type = PACKET_MULTICAST;
        } else {
-               if (xt_hooknum(par) != NF_BR_BROUTING)
-                       dev = br_port_get_rcu(xt_in(par))->br->dev;
-               else
+               const struct net_device *dev;
+
+               switch (xt_hooknum(par)) {
+               case NF_BR_BROUTING:
                        dev = xt_in(par);
+                       break;
+               case NF_BR_PRE_ROUTING:
+                       dev = br_port_get_rcu(xt_in(par))->br->dev;
+                       break;
+               default:
+                       dev = NULL;
+                       break;
+               }
+
+               if (!dev) /* NF_BR_LOCAL_OUT */
+                       return info->target;
 
                if (ether_addr_equal(info->mac, dev->dev_addr))
                        skb->pkt_type = PACKET_HOST;
index 506d614..8096732 100644 (file)
@@ -95,7 +95,7 @@ slow_path:
         * This may also be a clone skbuff, we could preserve the geometry for
         * the copies but probably not worth the effort.
         */
-       ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state);
+       ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
 
        while (state.left > 0) {
                struct sk_buff *skb2;
index 13ea920..ef14da5 100644 (file)
@@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *file,
                mask |= EPOLLRDHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue) ||
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
                (sk->sk_shutdown & RCV_SHUTDOWN))
                mask |= EPOLLIN | EPOLLRDNORM;
 
index 5518a7d..128d37a 100644 (file)
@@ -86,11 +86,12 @@ static atomic_t skbcounter = ATOMIC_INIT(0);
 
 /* af_can socket functions */
 
-static void can_sock_destruct(struct sock *sk)
+void can_sock_destruct(struct sock *sk)
 {
        skb_queue_purge(&sk->sk_receive_queue);
        skb_queue_purge(&sk->sk_error_queue);
 }
+EXPORT_SYMBOL(can_sock_destruct);
 
 static const struct can_proto *can_get_proto(int protocol)
 {
index def2f81..137054b 100644 (file)
@@ -51,6 +51,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
        if (!skb)
                return;
 
+       j1939_priv_get(priv);
        can_skb_set_owner(skb, iskb->sk);
 
        /* get a pointer to the header of the skb
@@ -104,6 +105,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
        j1939_simple_recv(priv, skb);
        j1939_sk_recv(priv, skb);
  done:
+       j1939_priv_put(priv);
        kfree_skb(skb);
 }
 
@@ -150,6 +152,10 @@ static void __j1939_priv_release(struct kref *kref)
 
        netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
 
+       WARN_ON_ONCE(!list_empty(&priv->active_session_list));
+       WARN_ON_ONCE(!list_empty(&priv->ecus));
+       WARN_ON_ONCE(!list_empty(&priv->j1939_socks));
+
        dev_put(ndev);
        kfree(priv);
 }
@@ -207,6 +213,9 @@ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
 {
        struct can_ml_priv *can_ml_priv = ndev->ml_priv;
 
+       if (!can_ml_priv)
+               return NULL;
+
        return can_ml_priv->j1939_priv;
 }
 
index 37c1040..de09b0a 100644 (file)
@@ -78,7 +78,6 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
 {
        jsk->state |= J1939_SOCK_BOUND;
        j1939_priv_get(priv);
-       jsk->priv = priv;
 
        spin_lock_bh(&priv->j1939_socks_lock);
        list_add_tail(&jsk->list, &priv->j1939_socks);
@@ -91,7 +90,6 @@ static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
        list_del_init(&jsk->list);
        spin_unlock_bh(&priv->j1939_socks_lock);
 
-       jsk->priv = NULL;
        j1939_priv_put(priv);
        jsk->state &= ~J1939_SOCK_BOUND;
 }
@@ -349,6 +347,34 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
        spin_unlock_bh(&priv->j1939_socks_lock);
 }
 
+static void j1939_sk_sock_destruct(struct sock *sk)
+{
+       struct j1939_sock *jsk = j1939_sk(sk);
+
+       /* This function will be call by the generic networking code, when then
+        * the socket is ultimately closed (sk->sk_destruct).
+        *
+        * The race between
+        * - processing a received CAN frame
+        *   (can_receive -> j1939_can_recv)
+        *   and accessing j1939_priv
+        * ... and ...
+        * - closing a socket
+        *   (j1939_can_rx_unregister -> can_rx_unregister)
+        *   and calling the final j1939_priv_put()
+        *
+        * is avoided by calling the final j1939_priv_put() from this
+        * RCU deferred cleanup call.
+        */
+       if (jsk->priv) {
+               j1939_priv_put(jsk->priv);
+               jsk->priv = NULL;
+       }
+
+       /* call generic CAN sock destruct */
+       can_sock_destruct(sk);
+}
+
 static int j1939_sk_init(struct sock *sk)
 {
        struct j1939_sock *jsk = j1939_sk(sk);
@@ -371,6 +397,7 @@ static int j1939_sk_init(struct sock *sk)
        atomic_set(&jsk->skb_pending, 0);
        spin_lock_init(&jsk->sk_session_queue_lock);
        INIT_LIST_HEAD(&jsk->sk_session_queue);
+       sk->sk_destruct = j1939_sk_sock_destruct;
 
        return 0;
 }
@@ -443,6 +470,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                }
 
                jsk->ifindex = addr->can_ifindex;
+
+               /* the corresponding j1939_priv_put() is called via
+                * sk->sk_destruct, which points to j1939_sk_sock_destruct()
+                */
+               j1939_priv_get(priv);
+               jsk->priv = priv;
        }
 
        /* set default transmit pgn */
@@ -560,8 +593,8 @@ static int j1939_sk_release(struct socket *sock)
        if (!sk)
                return 0;
 
-       jsk = j1939_sk(sk);
        lock_sock(sk);
+       jsk = j1939_sk(sk);
 
        if (jsk->state & J1939_SOCK_BOUND) {
                struct j1939_priv *priv = jsk->priv;
@@ -580,6 +613,7 @@ static int j1939_sk_release(struct socket *sock)
                j1939_netdev_stop(priv);
        }
 
+       kfree(jsk->filters);
        sock_orphan(sk);
        sock->sk = NULL;
 
@@ -909,8 +943,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
        memset(serr, 0, sizeof(*serr));
        switch (type) {
        case J1939_ERRQUEUE_ACK:
-               if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
+               if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) {
+                       kfree_skb(skb);
                        return;
+               }
 
                serr->ee.ee_errno = ENOMSG;
                serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
@@ -918,8 +954,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
                state = "ACK";
                break;
        case J1939_ERRQUEUE_SCHED:
-               if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
+               if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) {
+                       kfree_skb(skb);
                        return;
+               }
 
                serr->ee.ee_errno = ENOMSG;
                serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
@@ -1054,51 +1092,72 @@ static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct j1939_sock *jsk = j1939_sk(sk);
-       struct j1939_priv *priv = jsk->priv;
+       struct j1939_priv *priv;
        int ifindex;
        int ret;
 
+       lock_sock(sock->sk);
        /* various socket state tests */
-       if (!(jsk->state & J1939_SOCK_BOUND))
-               return -EBADFD;
+       if (!(jsk->state & J1939_SOCK_BOUND)) {
+               ret = -EBADFD;
+               goto sendmsg_done;
+       }
 
+       priv = jsk->priv;
        ifindex = jsk->ifindex;
 
-       if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR)
+       if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
                /* no source address assigned yet */
-               return -EBADFD;
+               ret = -EBADFD;
+               goto sendmsg_done;
+       }
 
        /* deal with provided destination address info */
        if (msg->msg_name) {
                struct sockaddr_can *addr = msg->msg_name;
 
-               if (msg->msg_namelen < J1939_MIN_NAMELEN)
-                       return -EINVAL;
+               if (msg->msg_namelen < J1939_MIN_NAMELEN) {
+                       ret = -EINVAL;
+                       goto sendmsg_done;
+               }
 
-               if (addr->can_family != AF_CAN)
-                       return -EINVAL;
+               if (addr->can_family != AF_CAN) {
+                       ret = -EINVAL;
+                       goto sendmsg_done;
+               }
 
-               if (addr->can_ifindex && addr->can_ifindex != ifindex)
-                       return -EBADFD;
+               if (addr->can_ifindex && addr->can_ifindex != ifindex) {
+                       ret = -EBADFD;
+                       goto sendmsg_done;
+               }
 
                if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
-                   !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
-                       return -EINVAL;
+                   !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
+                       ret = -EINVAL;
+                       goto sendmsg_done;
+               }
 
                if (!addr->can_addr.j1939.name &&
                    addr->can_addr.j1939.addr == J1939_NO_ADDR &&
-                   !sock_flag(sk, SOCK_BROADCAST))
+                   !sock_flag(sk, SOCK_BROADCAST)) {
                        /* broadcast, but SO_BROADCAST not set */
-                       return -EACCES;
+                       ret = -EACCES;
+                       goto sendmsg_done;
+               }
        } else {
                if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
-                   !sock_flag(sk, SOCK_BROADCAST))
+                   !sock_flag(sk, SOCK_BROADCAST)) {
                        /* broadcast, but SO_BROADCAST not set */
-                       return -EACCES;
+                       ret = -EACCES;
+                       goto sendmsg_done;
+               }
        }
 
        ret = j1939_sk_send_loop(priv, sk, msg, size);
 
+sendmsg_done:
+       release_sock(sock->sk);
+
        return ret;
 }
 
index fe000ea..9f99af5 100644 (file)
@@ -255,6 +255,7 @@ static void __j1939_session_drop(struct j1939_session *session)
                return;
 
        j1939_sock_pending_del(session->sk);
+       sock_put(session->sk);
 }
 
 static void j1939_session_destroy(struct j1939_session *session)
@@ -266,6 +267,9 @@ static void j1939_session_destroy(struct j1939_session *session)
 
        netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
 
+       WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
+       WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
+
        skb_queue_purge(&session->skb_queue);
        __j1939_session_drop(session);
        j1939_priv_put(session->priv);
@@ -1042,12 +1046,13 @@ j1939_session_deactivate_activate_next(struct j1939_session *session)
                j1939_sk_queue_activate_next(session);
 }
 
-static void j1939_session_cancel(struct j1939_session *session,
+static void __j1939_session_cancel(struct j1939_session *session,
                                 enum j1939_xtp_abort err)
 {
        struct j1939_priv *priv = session->priv;
 
        WARN_ON_ONCE(!err);
+       lockdep_assert_held(&session->priv->active_session_list_lock);
 
        session->err = j1939_xtp_abort_to_errno(priv, err);
        /* do not send aborts on incoming broadcasts */
@@ -1062,6 +1067,20 @@ static void j1939_session_cancel(struct j1939_session *session,
                j1939_sk_send_loop_abort(session->sk, session->err);
 }
 
+static void j1939_session_cancel(struct j1939_session *session,
+                                enum j1939_xtp_abort err)
+{
+       j1939_session_list_lock(session->priv);
+
+       if (session->state >= J1939_SESSION_ACTIVE &&
+           session->state < J1939_SESSION_WAITING_ABORT) {
+               j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
+               __j1939_session_cancel(session, err);
+       }
+
+       j1939_session_list_unlock(session->priv);
+}
+
 static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
 {
        struct j1939_session *session =
@@ -1108,8 +1127,6 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
                netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n",
                             __func__, session, ret);
                if (session->skcb.addr.type != J1939_SIMPLE) {
-                       j1939_tp_set_rxtimeout(session,
-                                              J1939_XTP_ABORT_TIMEOUT_MS);
                        j1939_session_cancel(session, J1939_XTP_ABORT_OTHER);
                } else {
                        session->err = ret;
@@ -1169,7 +1186,7 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
                        hrtimer_start(&session->rxtimer,
                                      ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
                                      HRTIMER_MODE_REL_SOFT);
-                       j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
+                       __j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
                }
                j1939_session_list_unlock(session->priv);
        }
@@ -1273,9 +1290,27 @@ j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
 static void
 j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
 {
+       struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
+       const u8 *dat;
+       int len;
+
        if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
                return;
 
+       dat = skb->data;
+
+       if (skcb->addr.type == J1939_ETP)
+               len = j1939_etp_ctl_to_size(dat);
+       else
+               len = j1939_tp_ctl_to_size(dat);
+
+       if (session->total_message_size != len) {
+               netdev_warn_once(session->priv->ndev,
+                                "%s: 0x%p: Incorrect size. Expected: %i; got: %i.\n",
+                                __func__, session, session->total_message_size,
+                                len);
+       }
+
        netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
 
        session->pkt.tx_acked = session->pkt.total;
@@ -1357,7 +1392,6 @@ j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
 
  out_session_cancel:
        j1939_session_timers_cancel(session);
-       j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
        j1939_session_cancel(session, err);
 }
 
@@ -1432,7 +1466,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
        skcb = j1939_skb_to_cb(skb);
        memcpy(skcb, rel_skcb, sizeof(*skcb));
 
-       session = j1939_session_new(priv, skb, skb->len);
+       session = j1939_session_new(priv, skb, size);
        if (!session) {
                kfree_skb(skb);
                return NULL;
@@ -1554,7 +1588,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
 
                /* RTS on active session */
                j1939_session_timers_cancel(session);
-               j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
                j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
        }
 
@@ -1565,7 +1598,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
                             session->last_cmd);
 
                j1939_session_timers_cancel(session);
-               j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
                j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
 
                return -EBUSY;
@@ -1767,7 +1799,6 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
 
  out_session_cancel:
        j1939_session_timers_cancel(session);
-       j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
        j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
        j1939_session_put(session);
 }
@@ -1848,6 +1879,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
                return ERR_PTR(-ENOMEM);
 
        /* skb is recounted in j1939_session_new() */
+       sock_hold(skb->sk);
        session->sk = skb->sk;
        session->transmission = true;
        session->pkt.total = (size + 6) / 7;
@@ -2010,7 +2042,11 @@ int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
                                 &priv->active_session_list,
                                 active_session_list_entry) {
                if (!sk || sk == session->sk) {
-                       j1939_session_timers_cancel(session);
+                       if (hrtimer_try_to_cancel(&session->txtimer) == 1)
+                               j1939_session_put(session);
+                       if (hrtimer_try_to_cancel(&session->rxtimer) == 1)
+                               j1939_session_put(session);
+
                        session->err = ESHUTDOWN;
                        j1939_session_deactivate_locked(session);
                }
index c210fc1..da3c24e 100644 (file)
@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
        if (error)
                goto out_err;
 
-       if (sk->sk_receive_queue.prev != skb)
+       if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
                goto out;
 
        /* Socket shut down? */
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (sk->sk_receive_queue.prev != *last);
+       } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
 
        error = -EAGAIN;
 
@@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
        mask = 0;
 
        /* exceptional events? */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
index bf3ed41..99ac84f 100644 (file)
 #include "net-sysfs.h"
 
 #define MAX_GRO_SKBS 8
+#define MAX_NEST_DEV 8
 
 /* This should be increased if a protocol with a bigger head is added. */
 #define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -276,88 +277,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 EXPORT_PER_CPU_SYMBOL(softnet_data);
 
-#ifdef CONFIG_LOCKDEP
-/*
- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
- * according to dev->type
- */
-static const unsigned short netdev_lock_type[] = {
-        ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
-        ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
-        ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
-        ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
-        ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
-        ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
-        ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
-        ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
-        ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
-        ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
-        ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
-        ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-        ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
-        ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
-        ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
-
-static const char *const netdev_lock_name[] = {
-       "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-       "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-       "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-       "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-       "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-       "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-       "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-       "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-       "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-       "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-       "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-       "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-       "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
-       "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
-       "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
-
-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
-
-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
-               if (netdev_lock_type[i] == dev_type)
-                       return i;
-       /* the last key is used by default */
-       return ARRAY_SIZE(netdev_lock_type) - 1;
-}
-
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-       int i;
-
-       i = netdev_lock_pos(dev_type);
-       lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-       int i;
-
-       i = netdev_lock_pos(dev->type);
-       lockdep_set_class_and_name(&dev->addr_list_lock,
-                                  &netdev_addr_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-#else
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-}
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-}
-#endif
-
 /*******************************************************************************
  *
  *             Protocol management and registration routines
@@ -6489,6 +6408,9 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
+       /* lookup ignore flag */
+       bool ignore;
+
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
@@ -6511,7 +6433,7 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
        return NULL;
 }
 
-static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
+static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
 {
        struct net_device *dev = data;
 
@@ -6532,7 +6454,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                             upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -6550,7 +6472,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                               upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@@ -6594,6 +6516,22 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
+static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
+{
+       struct netdev_adjacent *upper;
+
+       ASSERT_RTNL();
+
+       if (list_empty(&dev->adj_list.upper))
+               return NULL;
+
+       upper = list_first_entry(&dev->adj_list.upper,
+                                struct netdev_adjacent, list);
+       if (likely(upper->master) && !upper->ignore)
+               return upper->dev;
+       return NULL;
+}
+
 /**
  * netdev_has_any_lower_dev - Check if device is linked to some device
  * @dev: device
@@ -6644,6 +6582,23 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
 
+static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *upper;
+
+       upper = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+       *ignore = upper->ignore;
+
+       return upper->dev;
+}
+
 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6661,34 +6616,111 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
        return upper->dev;
 }
 
+static int __netdev_walk_all_upper_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.upper;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = __netdev_next_upper_dev(now, &iter, &ignore);
+                       if (!udev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
                                  int (*fn)(struct net_device *dev,
                                            void *data),
                                  void *data)
 {
-       struct net_device *udev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.upper,
-            udev = netdev_next_upper_dev_rcu(dev, &iter);
-            udev;
-            udev = netdev_next_upper_dev_rcu(dev, &iter)) {
-               /* first is the upper device itself */
-               ret = fn(udev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.upper;
 
-               /* then look at all of its upper devices */
-               ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = netdev_next_upper_dev_rcu(now, &iter);
+                       if (!udev)
+                               break;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
 
+static bool __netdev_has_upper_dev(struct net_device *dev,
+                                  struct net_device *upper_dev)
+{
+       ASSERT_RTNL();
+
+       return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
+                                          upper_dev);
+}
+
 /**
  * netdev_lower_get_next_private - Get the next ->private from the
  *                                lower neighbour list
@@ -6785,34 +6817,119 @@ static struct net_device *netdev_next_lower_dev(struct net_device *dev,
        return lower->dev;
 }
 
+static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+       *ignore = lower->ignore;
+
+       return lower->dev;
+}
+
 int netdev_walk_all_lower_dev(struct net_device *dev,
                              int (*fn)(struct net_device *dev,
                                        void *data),
                              void *data)
 {
-       struct net_device *ldev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev(dev, &iter);
-            ldev;
-            ldev = netdev_next_lower_dev(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.lower;
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev(ldev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
 
+static int __netdev_walk_all_lower_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = __netdev_next_lower_dev(now, &iter, &ignore);
+                       if (!ldev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6827,28 +6944,99 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
        return lower->dev;
 }
 
-int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
-                                 int (*fn)(struct net_device *dev,
-                                           void *data),
-                                 void *data)
+static u8 __netdev_upper_depth(struct net_device *dev)
+{
+       struct net_device *udev;
+       struct list_head *iter;
+       u8 max_depth = 0;
+       bool ignore;
+
+       for (iter = &dev->adj_list.upper,
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore);
+            udev;
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < udev->upper_level)
+                       max_depth = udev->upper_level;
+       }
+
+       return max_depth;
+}
+
+static u8 __netdev_lower_depth(struct net_device *dev)
 {
        struct net_device *ldev;
        struct list_head *iter;
-       int ret;
+       u8 max_depth = 0;
+       bool ignore;
 
        for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev_rcu(dev, &iter);
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
             ldev;
-            ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < ldev->lower_level)
+                       max_depth = ldev->lower_level;
+       }
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
-               if (ret)
-                       return ret;
+       return max_depth;
+}
+
+static int __netdev_update_upper_level(struct net_device *dev, void *data)
+{
+       dev->upper_level = __netdev_upper_depth(dev) + 1;
+       return 0;
+}
+
+static int __netdev_update_lower_level(struct net_device *dev, void *data)
+{
+       dev->lower_level = __netdev_lower_depth(dev) + 1;
+       return 0;
+}
+
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+                                 int (*fn)(struct net_device *dev,
+                                           void *data),
+                                 void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev_rcu(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
@@ -6952,6 +7140,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        adj->master = master;
        adj->ref_nr = 1;
        adj->private = private;
+       adj->ignore = false;
        dev_hold(adj_dev);
 
        pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
@@ -7102,14 +7291,17 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (netdev_has_upper_dev(upper_dev, dev))
+       if (__netdev_has_upper_dev(upper_dev, dev))
                return -EBUSY;
 
+       if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
+               return -EMLINK;
+
        if (!master) {
-               if (netdev_has_upper_dev(dev, upper_dev))
+               if (__netdev_has_upper_dev(dev, upper_dev))
                        return -EEXIST;
        } else {
-               master_dev = netdev_master_upper_dev_get(dev);
+               master_dev = __netdev_master_upper_dev_get(dev);
                if (master_dev)
                        return master_dev == upper_dev ? -EEXIST : -EBUSY;
        }
@@ -7131,6 +7323,13 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (ret)
                goto rollback;
 
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
+
        return 0;
 
 rollback:
@@ -7213,9 +7412,96 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 
        call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
                                      &changeupper_info.info);
+
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
+                                     struct net_device *lower_dev,
+                                     bool val)
+{
+       struct netdev_adjacent *adj;
+
+       adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
+       if (adj)
+               adj->ignore = val;
+
+       adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
+       if (adj)
+               adj->ignore = val;
+}
+
+static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
+                                       struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
+}
+
+static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
+                                      struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
+}
+
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!new_dev)
+               return 0;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_disable(dev, old_dev);
+
+       err = netdev_upper_dev_link(new_dev, dev, extack);
+       if (err) {
+               if (old_dev && new_dev != old_dev)
+                       netdev_adjacent_dev_enable(dev, old_dev);
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_adjacent_change_prepare);
+
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev)
+{
+       if (!new_dev || !old_dev)
+               return;
+
+       if (new_dev == old_dev)
+               return;
+
+       netdev_adjacent_dev_enable(dev, old_dev);
+       netdev_upper_dev_unlink(old_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_commit);
+
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev)
+{
+       if (!new_dev)
+               return;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_enable(dev, old_dev);
+
+       netdev_upper_dev_unlink(new_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_abort);
+
 /**
  * netdev_bonding_info_change - Dispatch event about slave change
  * @dev: device
@@ -7329,25 +7615,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
 
-int dev_get_nest_level(struct net_device *dev)
-{
-       struct net_device *lower = NULL;
-       struct list_head *iter;
-       int max_nest = -1;
-       int nest;
-
-       ASSERT_RTNL();
-
-       netdev_for_each_lower_dev(dev, lower, iter) {
-               nest = dev_get_nest_level(lower);
-               if (max_nest < nest)
-                       max_nest = nest;
-       }
-
-       return max_nest + 1;
-}
-EXPORT_SYMBOL(dev_get_nest_level);
-
 /**
  * netdev_lower_change - Dispatch event about lower device state change
  * @lower_dev: device
@@ -8154,7 +8421,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                        return -EINVAL;
                }
 
-               if (prog->aux->id == prog_id) {
+               /* prog->aux->id may be 0 for orphaned device-bound progs */
+               if (prog->aux->id && prog->aux->id == prog_id) {
                        bpf_prog_put(prog);
                        return 0;
                }
@@ -8619,7 +8887,7 @@ static void netdev_init_one_queue(struct net_device *dev,
 {
        /* Initialize queue lock */
        spin_lock_init(&queue->_xmit_lock);
-       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+       lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
@@ -8666,6 +8934,43 @@ void netif_tx_stop_all_queues(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_tx_stop_all_queues);
 
+static void netdev_register_lockdep_key(struct net_device *dev)
+{
+       lockdep_register_key(&dev->qdisc_tx_busylock_key);
+       lockdep_register_key(&dev->qdisc_running_key);
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+}
+
+static void netdev_unregister_lockdep_key(struct net_device *dev)
+{
+       lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
+       lockdep_unregister_key(&dev->qdisc_running_key);
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+}
+
+void netdev_update_lockdep_key(struct net_device *dev)
+{
+       struct netdev_queue *queue;
+       int i;
+
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               queue = netdev_get_tx_queue(dev, i);
+
+               lockdep_set_class(&queue->_xmit_lock,
+                                 &dev->qdisc_xmit_lock_key);
+       }
+}
+EXPORT_SYMBOL(netdev_update_lockdep_key);
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -8700,7 +9005,7 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(!net);
 
        spin_lock_init(&dev->addr_list_lock);
-       netdev_set_addr_lockdep_class(dev);
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
 
        ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
@@ -9210,8 +9515,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev_net_set(dev, &init_net);
 
+       netdev_register_lockdep_key(dev);
+
        dev->gso_max_size = GSO_MAX_SIZE;
        dev->gso_max_segs = GSO_MAX_SEGS;
+       dev->upper_level = 1;
+       dev->lower_level = 1;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
@@ -9292,6 +9601,8 @@ void free_netdev(struct net_device *dev)
        free_percpu(dev->pcpu_refcnt);
        dev->pcpu_refcnt = NULL;
 
+       netdev_unregister_lockdep_key(dev);
+
        /*  Compatibility with error handling in drivers */
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                netdev_freemem(dev);
@@ -9460,7 +9771,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        rcu_barrier();
 
-       new_nsid = peernet2id_alloc(dev_net(dev), net);
+       new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
        /* If there is an ifindex conflict assign a new one */
        if (__dev_get_by_index(net, dev->ifindex))
                new_ifindex = dev_new_index(net);
index 6393ba9..2f949b5 100644 (file)
@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
                return;
 
        netif_addr_lock_bh(from);
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
        __dev_set_rx_mode(to);
        netif_addr_unlock(to);
@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
                return;
 
        netif_addr_lock_bh(from);
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
        __dev_set_rx_mode(to);
        netif_addr_unlock(to);
index f80151e..93905dc 100644 (file)
@@ -2699,7 +2699,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
        struct devlink *devlink = info->user_ptr[0];
        int err;
 
-       if (!devlink_reload_supported(devlink))
+       if (!devlink_reload_supported(devlink) || !devlink->reload_enabled)
                return -EOPNOTSUPP;
 
        err = devlink_resources_validate(devlink, NULL, info);
@@ -4618,6 +4618,7 @@ struct devlink_health_reporter {
        bool auto_recover;
        u8 health_state;
        u64 dump_ts;
+       u64 dump_real_ts;
        u64 error_count;
        u64 recovery_count;
        u64 last_recovery_ts;
@@ -4790,6 +4791,7 @@ static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
                goto dump_err;
 
        reporter->dump_ts = jiffies;
+       reporter->dump_real_ts = ktime_get_real_ns();
 
        return 0;
 
@@ -4952,6 +4954,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
                              jiffies_to_msecs(reporter->dump_ts),
                              DEVLINK_ATTR_PAD))
                goto reporter_nest_cancel;
+       if (reporter->dump_fmsg &&
+           nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
+                             reporter->dump_real_ts, DEVLINK_ATTR_PAD))
+               goto reporter_nest_cancel;
 
        nla_nest_end(msg, reporter_attr);
        genlmsg_end(msg, hdr);
@@ -6196,6 +6202,8 @@ EXPORT_SYMBOL_GPL(devlink_register);
 void devlink_unregister(struct devlink *devlink)
 {
        mutex_lock(&devlink_mutex);
+       WARN_ON(devlink_reload_supported(devlink) &&
+               devlink->reload_enabled);
        devlink_notify(devlink, DEVLINK_CMD_DEL);
        list_del(&devlink->list);
        mutex_unlock(&devlink_mutex);
@@ -6203,6 +6211,41 @@ void devlink_unregister(struct devlink *devlink)
 EXPORT_SYMBOL_GPL(devlink_unregister);
 
 /**
+ *     devlink_reload_enable - Enable reload of devlink instance
+ *
+ *     @devlink: devlink
+ *
+ *     Should be called at end of device initialization
+ *     process when reload operation is supported.
+ */
+void devlink_reload_enable(struct devlink *devlink)
+{
+       mutex_lock(&devlink_mutex);
+       devlink->reload_enabled = true;
+       mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_reload_enable);
+
+/**
+ *     devlink_reload_disable - Disable reload of devlink instance
+ *
+ *     @devlink: devlink
+ *
+ *     Should be called at the beginning of device cleanup
+ *     process when reload operation is supported.
+ */
+void devlink_reload_disable(struct devlink *devlink)
+{
+       mutex_lock(&devlink_mutex);
+       /* Mutex is taken which ensures that no reload operation is in
+        * progress while setting up forbidded flag.
+        */
+       devlink->reload_enabled = false;
+       mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_reload_disable);
+
+/**
  *     devlink_free - Free devlink instance resources
  *
  *     @devlink: devlink
index c763106..cd9bc67 100644 (file)
@@ -1396,11 +1396,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
 
 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 {
-       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       struct ethtool_wolinfo wol;
 
        if (!dev->ethtool_ops->get_wol)
                return -EOPNOTSUPP;
 
+       memset(&wol, 0, sizeof(struct ethtool_wolinfo));
+       wol.cmd = ETHTOOL_GWOL;
        dev->ethtool_ops->get_wol(dev, &wol);
 
        if (copy_to_user(useraddr, &wol, sizeof(wol)))
index 7c09d87..68eda10 100644 (file)
@@ -1350,30 +1350,21 @@ out_bad:
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 
-static u32 hashrnd __read_mostly;
+static siphash_key_t hashrnd __read_mostly;
 static __always_inline void __flow_hash_secret_init(void)
 {
        net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
-                                            u32 keyval)
+static const void *flow_keys_hash_start(const struct flow_keys *flow)
 {
-       return jhash2(words, length, keyval);
-}
-
-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
-{
-       const void *p = flow;
-
-       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
-       return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
+       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
+       return &flow->FLOW_KEYS_HASH_START_FIELD;
 }
 
 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
 {
        size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
-       BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
        BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
                     sizeof(*flow) - sizeof(flow->addrs));
 
@@ -1388,7 +1379,7 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
                diff -= sizeof(flow->addrs.tipckey);
                break;
        }
-       return (sizeof(*flow) - diff) / sizeof(u32);
+       return sizeof(*flow) - diff;
 }
 
 __be32 flow_get_u32_src(const struct flow_keys *flow)
@@ -1454,14 +1445,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
        }
 }
 
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
+                                       const siphash_key_t *keyval)
 {
        u32 hash;
 
        __flow_hash_consistentify(keys);
 
-       hash = __flow_hash_words(flow_keys_hash_start(keys),
-                                flow_keys_hash_length(keys), keyval);
+       hash = siphash(flow_keys_hash_start(keys),
+                      flow_keys_hash_length(keys), keyval);
        if (!hash)
                hash = 1;
 
@@ -1471,12 +1463,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
 u32 flow_hash_from_keys(struct flow_keys *keys)
 {
        __flow_hash_secret_init();
-       return __flow_hash_from_keys(keys, hashrnd);
+       return __flow_hash_from_keys(keys, &hashrnd);
 }
 EXPORT_SYMBOL(flow_hash_from_keys);
 
 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
-                                 struct flow_keys *keys, u32 keyval)
+                                 struct flow_keys *keys,
+                                 const siphash_key_t *keyval)
 {
        skb_flow_dissect_flow_keys(skb, keys,
                                   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -1524,7 +1517,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
                           &keys, NULL, 0, 0, 0,
                           FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
-       return __flow_hash_from_keys(&keys, hashrnd);
+       return __flow_hash_from_keys(&keys, &hashrnd);
 }
 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
 
@@ -1544,13 +1537,14 @@ void __skb_get_hash(struct sk_buff *skb)
 
        __flow_hash_secret_init();
 
-       hash = ___skb_get_hash(skb, &keys, hashrnd);
+       hash = ___skb_get_hash(skb, &keys, &hashrnd);
 
        __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+                          const siphash_key_t *perturb)
 {
        struct flow_keys keys;
 
index f93785e..74cfb8b 100644 (file)
@@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb)
        int err = -EINVAL;
 
        if (skb->protocol == htons(ETH_P_IP)) {
+               struct net_device *dev = skb_dst(skb)->dev;
                struct iphdr *iph = ip_hdr(skb);
 
+               dev_hold(dev);
+               skb_dst_drop(skb);
                err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
-                                          iph->tos, skb_dst(skb)->dev);
+                                          iph->tos, dev);
+               dev_put(dev);
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               skb_dst_drop(skb);
                err = ipv6_stub->ipv6_route_input(skb);
        } else {
                err = -EAFNOSUPPORT;
index 6d3e482..3940284 100644 (file)
@@ -246,11 +246,11 @@ static int __peernet2id(struct net *net, struct net *peer)
 }
 
 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
-                             struct nlmsghdr *nlh);
+                             struct nlmsghdr *nlh, gfp_t gfp);
 /* This function returns the id of a peer netns. If no id is assigned, one will
  * be allocated and returned.
  */
-int peernet2id_alloc(struct net *net, struct net *peer)
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
 {
        bool alloc = false, alive = false;
        int id;
@@ -269,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        id = __peernet2id_alloc(net, peer, &alloc);
        spin_unlock_bh(&net->nsid_lock);
        if (alloc && id >= 0)
-               rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
+               rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
        if (alive)
                put_net(peer);
        return id;
@@ -479,6 +479,7 @@ struct net *copy_net_ns(unsigned long flags,
 
        if (rv < 0) {
 put_userns:
+               key_remove_domain(net->key_domain);
                put_user_ns(user_ns);
                net_drop_ns(net);
 dec_ucounts:
@@ -533,7 +534,8 @@ static void unhash_nsid(struct net *net, struct net *last)
                        idr_remove(&tmp->netns_ids, id);
                spin_unlock_bh(&tmp->nsid_lock);
                if (id >= 0)
-                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
+                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
+                                         GFP_KERNEL);
                if (tmp == last)
                        break;
        }
@@ -766,7 +768,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
        spin_unlock_bh(&net->nsid_lock);
        if (err >= 0) {
                rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
-                                 nlh);
+                                 nlh, GFP_KERNEL);
                err = 0;
        } else if (err == -ENOSPC && nsid >= 0) {
                err = -EEXIST;
@@ -1054,7 +1056,7 @@ end:
 }
 
 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
-                             struct nlmsghdr *nlh)
+                             struct nlmsghdr *nlh, gfp_t gfp)
 {
        struct net_fill_args fillargs = {
                .portid = portid,
@@ -1065,7 +1067,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
        struct sk_buff *msg;
        int err = -ENOMEM;
 
-       msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+       msg = nlmsg_new(rtnl_net_get_size(), gfp);
        if (!msg)
                goto out;
 
@@ -1073,7 +1075,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
        if (err < 0)
                goto err_out;
 
-       rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
+       rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
        return;
 
 err_out:
index 1ee6460..c81cd80 100644 (file)
@@ -1523,7 +1523,7 @@ static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
 
 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
                                  const struct net_device *dev,
-                                 struct net *src_net)
+                                 struct net *src_net, gfp_t gfp)
 {
        bool put_iflink = false;
 
@@ -1531,7 +1531,7 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
                struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
 
                if (!net_eq(dev_net(dev), link_net)) {
-                       int id = peernet2id_alloc(src_net, link_net);
+                       int id = peernet2id_alloc(src_net, link_net, gfp);
 
                        if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
                                return -EMSGSIZE;
@@ -1589,7 +1589,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
                            int type, u32 pid, u32 seq, u32 change,
                            unsigned int flags, u32 ext_filter_mask,
                            u32 event, int *new_nsid, int new_ifindex,
-                           int tgt_netnsid)
+                           int tgt_netnsid, gfp_t gfp)
 {
        struct ifinfomsg *ifm;
        struct nlmsghdr *nlh;
@@ -1681,7 +1681,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
                        goto nla_put_failure;
        }
 
-       if (rtnl_fill_link_netnsid(skb, dev, src_net))
+       if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
                goto nla_put_failure;
 
        if (new_nsid &&
@@ -2001,7 +2001,7 @@ walk_entries:
                                               NETLINK_CB(cb->skb).portid,
                                               nlh->nlmsg_seq, 0, flags,
                                               ext_filter_mask, 0, NULL, 0,
-                                              netnsid);
+                                              netnsid, GFP_KERNEL);
 
                        if (err < 0) {
                                if (likely(skb->len))
@@ -2355,6 +2355,7 @@ static int do_set_master(struct net_device *dev, int ifindex,
                        err = ops->ndo_del_slave(upper_dev, dev);
                        if (err)
                                return err;
+                       netdev_update_lockdep_key(dev);
                } else {
                        return -EOPNOTSUPP;
                }
@@ -3359,7 +3360,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = rtnl_fill_ifinfo(nskb, dev, net,
                               RTM_NEWLINK, NETLINK_CB(skb).portid,
                               nlh->nlmsg_seq, 0, 0, ext_filter_mask,
-                              0, NULL, 0, netnsid);
+                              0, NULL, 0, netnsid, GFP_KERNEL);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size */
                WARN_ON(err == -EMSGSIZE);
@@ -3471,7 +3472,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
 
        err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
                               type, 0, 0, change, 0, 0, event,
-                              new_nsid, new_ifindex, -1);
+                              new_nsid, new_ifindex, -1, flags);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -3916,7 +3917,7 @@ static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
        ndm = nlmsg_data(nlh);
        if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
            ndm->ndm_flags || ndm->ndm_type) {
-               NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
+               NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
                return -EINVAL;
        }
 
index cf390e0..ad31e4e 100644 (file)
@@ -270,18 +270,28 @@ void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 
        msg->sg.data[i].length -= trim;
        sk_mem_uncharge(sk, trim);
+       /* Adjust copybreak if it falls into the trimmed part of last buf */
+       if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
+               msg->sg.copybreak = msg->sg.data[i].length;
 out:
-       /* If we trim data before curr pointer update copybreak and current
-        * so that any future copy operations start at new copy location.
+       sk_msg_iter_var_next(i);
+       msg->sg.end = i;
+
+       /* If we trim data a full sg elem before curr pointer update
+        * copybreak and current so that any future copy operations
+        * start at new copy location.
         * However trimed data that has not yet been used in a copy op
         * does not require an update.
         */
-       if (msg->sg.curr >= i) {
+       if (!msg->sg.size) {
+               msg->sg.curr = msg->sg.start;
+               msg->sg.copybreak = 0;
+       } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
+                  sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
+               sk_msg_iter_var_prev(i);
                msg->sg.curr = i;
                msg->sg.copybreak = msg->sg.data[i].length;
        }
-       sk_msg_iter_var_next(i);
-       msg->sg.end = i;
 }
 EXPORT_SYMBOL_GPL(sk_msg_trim);
 
index a515392..ac78a57 100644 (file)
@@ -1127,7 +1127,7 @@ set_rcvbuf:
                break;
                }
        case SO_INCOMING_CPU:
-               sk->sk_incoming_cpu = val;
+               WRITE_ONCE(sk->sk_incoming_cpu, val);
                break;
 
        case SO_CNX_ADVICE:
@@ -1476,7 +1476,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case SO_INCOMING_CPU:
-               v.val = sk->sk_incoming_cpu;
+               v.val = READ_ONCE(sk->sk_incoming_cpu);
                break;
 
        case SO_MEMINFO:
@@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty(&sk->sk_receive_queue) ||
+       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
               sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
index d9b4200..d19557c 100644 (file)
@@ -117,7 +117,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                                                    inet->inet_daddr,
                                                    inet->inet_sport,
                                                    inet->inet_dport);
-       inet->inet_id = dp->dccps_iss ^ jiffies;
+       inet->inet_id = prandom_u32();
 
        err = dccp_connect(sk);
        rt = NULL;
@@ -416,7 +416,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
        RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
        newinet->mc_index  = inet_iif(skb);
        newinet->mc_ttl    = ip_hdr(skb)->ttl;
-       newinet->inet_id   = jiffies;
+       newinet->inet_id   = prandom_u32();
 
        if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
                goto put_and_exit;
index 0ea7528..3349ea8 100644 (file)
@@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wai
        struct dn_scp *scp = DN_SK(sk);
        __poll_t mask = datagram_poll(file, sock, wait);
 
-       if (!skb_queue_empty(&scp->other_receive_queue))
+       if (!skb_queue_empty_lockless(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
 
        return mask;
index a8e52c9..3255dfc 100644 (file)
@@ -310,8 +310,6 @@ static void dsa_master_reset_mtu(struct net_device *dev)
        rtnl_unlock();
 }
 
-static struct lock_class_key dsa_master_addr_list_lock_key;
-
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
        int ret;
@@ -325,9 +323,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
        wmb();
 
        dev->dsa_ptr = cpu_dp;
-       lockdep_set_class(&dev->addr_list_lock,
-                         &dsa_master_addr_list_lock_key);
-
        ret = dsa_master_ethtool_setup(dev);
        if (ret)
                return ret;
index 75d5822..028e65f 100644 (file)
@@ -1341,15 +1341,6 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
        return ret;
 }
 
-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
-                                           struct netdev_queue *txq,
-                                           void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &dsa_slave_netdev_xmit_lock_key);
-}
-
 int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
@@ -1433,9 +1424,6 @@ int dsa_slave_create(struct dsa_port *port)
        slave_dev->max_mtu = ETH_MAX_MTU;
        SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
 
-       netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
-                                NULL);
-
        SET_NETDEV_DEV(slave_dev, port->ds->dev);
        slave_dev->dev.of_node = port->dn;
        slave_dev->vlan_features = master->vlan_features;
index 9c1cc24..9e5a883 100644 (file)
@@ -106,7 +106,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
        slave = ds->ports[port].slave;
 
        err = br_vlan_get_pvid(slave, &pvid);
-       if (err < 0)
+       if (!pvid || err < 0)
                /* There is no pvid on the bridge for this port, which is
                 * perfectly valid. Nothing to restore, bye-bye!
                 */
index 3297e7f..c0b107c 100644 (file)
@@ -58,13 +58,6 @@ static const struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
 };
 
-static int lowpan_dev_init(struct net_device *ldev)
-{
-       netdev_lockdep_set_classes(ldev);
-
-       return 0;
-}
-
 static int lowpan_open(struct net_device *dev)
 {
        if (!open_count)
@@ -96,7 +89,6 @@ static int lowpan_get_iflink(const struct net_device *dev)
 }
 
 static const struct net_device_ops lowpan_netdev_ops = {
-       .ndo_init               = lowpan_dev_init,
        .ndo_start_xmit         = lowpan_xmit,
        .ndo_open               = lowpan_open,
        .ndo_stop               = lowpan_stop,
index 9a0fe0c..4a8550c 100644 (file)
@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
-       inet->inet_id = jiffies;
+       inet->inet_id = prandom_u32();
 
        sk_dst_set(sk, &rt->dst);
        err = 0;
index dde77f7..71c78d2 100644 (file)
@@ -1148,7 +1148,7 @@ void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric)
        if (!(dev->flags & IFF_UP) ||
            ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
            ipv4_is_zeronet(prefix) ||
-           prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32)
+           (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
                return;
 
        /* add the new */
index 0913a09..f1888c6 100644 (file)
@@ -1814,8 +1814,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
        int ret = 0;
        unsigned int hash = fib_laddr_hashfn(local);
        struct hlist_head *head = &fib_info_laddrhash[hash];
+       int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
        struct net *net = dev_net(dev);
-       int tb_id = l3mdev_fib_table(dev);
        struct fib_info *fi;
 
        if (!fib_info_laddrhash || local == 0)
index 9782486..83fb001 100644 (file)
@@ -240,7 +240,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        return -1;
 
                score = sk->sk_family == PF_INET ? 2 : 1;
-               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+               if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
        return score;
index 52690bb..10636fb 100644 (file)
@@ -509,9 +509,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        key = &tun_info->key;
        if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
                goto err_free_skb;
-       md = ip_tunnel_info_opts(tun_info);
-       if (!md)
+       if (tun_info->options_len < sizeof(*md))
                goto err_free_skb;
+       md = ip_tunnel_info_opts(tun_info);
 
        /* ERSPAN has fixed 8 byte GRE header */
        version = md->version;
index 814b9b8..3d8baaa 100644 (file)
@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
 EXPORT_SYMBOL(ip_fraglist_prepare);
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
-                 unsigned int ll_rs, unsigned int mtu,
+                 unsigned int ll_rs, unsigned int mtu, bool DF,
                  struct ip_frag_state *state)
 {
        struct iphdr *iph = ip_hdr(skb);
 
+       state->DF = DF;
        state->hlen = hlen;
        state->ll_rs = ll_rs;
        state->mtu = mtu;
@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
-       if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
-               state->iph->frag_off |= htons(IP_DF);
-
        /* ANK: dirty, but effective trick. Upgrade options only if
         * the segment to be fragmented was THE FIRST (otherwise,
         * options are already fixed) and make it ONCE
@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
         */
        iph = ip_hdr(skb2);
        iph->frag_off = htons((state->offset >> 3));
+       if (state->DF)
+               iph->frag_off |= htons(IP_DF);
 
        /*
         *      Added AC : If we are fragmenting a fragment that's not the
@@ -883,7 +883,8 @@ slow_path:
         *      Fragment the datagram.
         */
 
-       ip_frag_init(skb, hlen, ll_rs, mtu, &state);
+       ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
+                    &state);
 
        /*
         *      Keep copying data until we run out.
index 716d547..5800743 100644 (file)
@@ -2289,7 +2289,8 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
                        rcu_read_unlock();
                        return -ENODEV;
                }
-               skb2 = skb_clone(skb, GFP_ATOMIC);
+
+               skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
                if (!skb2) {
                        read_unlock(&mrt_lock);
                        rcu_read_unlock();
index 42187a3..d8876f0 100644 (file)
@@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        }
        /* This barrier is coupled with smp_wmb() in tcp_reset() */
        smp_rmb();
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR;
 
        return mask;
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
                sk_busy_loop(sk, nonblock);
 
index 6be5683..67b2dc7 100644 (file)
@@ -303,7 +303,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                                                 inet->inet_daddr);
        }
 
-       inet->inet_id = tp->write_seq ^ jiffies;
+       inet->inet_id = prandom_u32();
 
        if (tcp_fastopen_defer_connect(sk, &err))
                return err;
@@ -1450,7 +1450,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
-       newinet->inet_id = newtp->write_seq ^ jiffies;
+       newinet->inet_id = prandom_u32();
 
        if (!dst) {
                dst = inet_csk_route_child_sock(sk, newsk, req);
@@ -2681,7 +2681,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
        net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
 
-       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
        net->ipv4.sysctl_tcp_sack = 1;
        net->ipv4.sysctl_tcp_window_scaling = 1;
        net->ipv4.sysctl_tcp_timestamps = 1;
index 14bc654..1d58ce8 100644 (file)
@@ -388,7 +388,7 @@ static int compute_score(struct sock *sk, struct net *net,
                return -1;
        score += 4;
 
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
+       if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
        return score;
 }
@@ -1316,6 +1316,20 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
                scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
 }
 
+static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
+{
+       /* We come here after udp_lib_checksum_complete() returned 0.
+        * This means that __skb_checksum_complete() might have
+        * set skb->csum_valid to 1.
+        * On 64bit platforms, we can set csum_unnecessary
+        * to true, but only if the skb is not shared.
+        */
+#if BITS_PER_LONG == 64
+       if (!skb_shared(skb))
+               udp_skb_scratch(skb)->csum_unnecessary = true;
+#endif
+}
+
 static int udp_skb_truesize(struct sk_buff *skb)
 {
        return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
@@ -1550,10 +1564,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
                        *total += skb->truesize;
                        kfree_skb(skb);
                } else {
-                       /* the csum related bits could be changed, refresh
-                        * the scratch area
-                        */
-                       udp_set_dev_scratch(skb);
+                       udp_skb_csum_unnecessary_set(skb);
                        break;
                }
        }
@@ -1577,7 +1588,7 @@ static int first_packet_length(struct sock *sk)
 
        spin_lock_bh(&rcvq->lock);
        skb = __first_packet_length(sk, rcvq, &total);
-       if (!skb && !skb_queue_empty(sk_queue)) {
+       if (!skb && !skb_queue_empty_lockless(sk_queue)) {
                spin_lock(&sk_queue->lock);
                skb_queue_splice_tail_init(sk_queue, rcvq);
                spin_unlock(&sk_queue->lock);
@@ -1650,7 +1661,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
                                return skb;
                        }
 
-                       if (skb_queue_empty(sk_queue)) {
+                       if (skb_queue_empty_lockless(sk_queue)) {
                                spin_unlock_bh(&queue->lock);
                                goto busy_check;
                        }
@@ -1676,7 +1687,7 @@ busy_check:
                                break;
 
                        sk_busy_loop(sk, flags & MSG_DONTWAIT);
-               } while (!skb_queue_empty(sk_queue));
+               } while (!skb_queue_empty_lockless(sk_queue));
 
                /* sk_queue is empty, reader_queue may contain peeked packets */
        } while (timeo &&
@@ -2712,7 +2723,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
        __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
-       if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+       if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
index 783f3c1..2fc0792 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <net/ipv6.h>
 #include <net/ipv6_stubs.h>
+#include <net/addrconf.h>
 #include <net/ip.h>
 
 /* if ipv6 module registers this function is used by xfrm to force all
index cf60fae..fbe9d42 100644 (file)
@@ -105,7 +105,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        return -1;
 
                score = 1;
-               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+               if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
        return score;
index 787d9f2..923034c 100644 (file)
@@ -980,9 +980,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                dsfield = key->tos;
                if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
                        goto tx_err;
-               md = ip_tunnel_info_opts(tun_info);
-               if (!md)
+               if (tun_info->options_len < sizeof(*md))
                        goto tx_err;
+               md = ip_tunnel_info_opts(tun_info);
 
                tun_id = tunnel_id_to_key32(key->tun_id);
                if (md->version == 1) {
index a63ff85..e60bf8e 100644 (file)
@@ -621,6 +621,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
 {
        struct __rt6_probe_work *work = NULL;
        const struct in6_addr *nh_gw;
+       unsigned long last_probe;
        struct neighbour *neigh;
        struct net_device *dev;
        struct inet6_dev *idev;
@@ -639,6 +640,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
        nh_gw = &fib6_nh->fib_nh_gw6;
        dev = fib6_nh->fib_nh_dev;
        rcu_read_lock_bh();
+       last_probe = READ_ONCE(fib6_nh->last_probe);
        idev = __in6_dev_get(dev);
        neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
        if (neigh) {
@@ -654,13 +656,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
                                __neigh_set_probe_once(neigh);
                }
                write_unlock(&neigh->lock);
-       } else if (time_after(jiffies, fib6_nh->last_probe +
+       } else if (time_after(jiffies, last_probe +
                                       idev->cnf.rtr_probe_interval)) {
                work = kmalloc(sizeof(*work), GFP_ATOMIC);
        }
 
-       if (work) {
-               fib6_nh->last_probe = jiffies;
+       if (!work || cmpxchg(&fib6_nh->last_probe,
+                            last_probe, jiffies) != last_probe) {
+               kfree(work);
+       } else {
                INIT_WORK(&work->work, rt6_probe_deferred);
                work->target = *nh_gw;
                dev_hold(dev);
@@ -3383,6 +3387,9 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
        int err;
 
        fib6_nh->fib_nh_family = AF_INET6;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+       fib6_nh->last_probe = jiffies;
+#endif
 
        err = -ENODEV;
        if (cfg->fc_ifindex) {
index 9d4f75e..e705674 100644 (file)
@@ -81,6 +81,11 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
        if (!pskb_may_pull(skb, srhoff + len))
                return NULL;
 
+       /* note that pskb_may_pull may change pointers in header;
+        * for this reason it is necessary to reload them when needed.
+        */
+       srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
        if (!seg6_validate_srh(srh, len))
                return NULL;
 
@@ -336,6 +341,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
        if (!ipv6_addr_any(&slwt->nh6))
                nhaddr = &slwt->nh6;
 
+       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
        seg6_lookup_nexthop(skb, nhaddr, 0);
 
        return dst_input(skb);
@@ -365,6 +372,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
 
        skb_dst_drop(skb);
 
+       skb_set_transport_header(skb, sizeof(struct iphdr));
+
        err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
        if (err)
                goto drop;
@@ -385,6 +394,8 @@ static int input_action_end_dt6(struct sk_buff *skb,
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto drop;
 
+       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
        seg6_lookup_nexthop(skb, NULL, slwt->table);
 
        return dst_input(skb);
index 6324d3a..9fec580 100644 (file)
@@ -135,7 +135,7 @@ static int compute_score(struct sock *sk, struct net *net,
                return -1;
        score++;
 
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
+       if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
 
        return score;
index fd5ac27..d3b520b 100644 (file)
@@ -56,7 +56,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
 {
        eth_hw_addr_random(dev);
        eth_broadcast_addr(dev->broadcast);
-       netdev_lockdep_set_classes(dev);
 
        return 0;
 }
index aba094b..2d05c4c 100644 (file)
@@ -1292,8 +1292,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        ieee80211_remove_interfaces(local);
  fail_rate:
        rtnl_unlock();
-       ieee80211_led_exit(local);
  fail_flows:
+       ieee80211_led_exit(local);
        destroy_workqueue(local->workqueue);
  fail_workqueue:
        wiphy_unregister(local->hw.wiphy);
index bd11fef..8d3a238 100644 (file)
@@ -2457,7 +2457,8 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
 {
        struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
 
-       if (time_after(stats->last_rx, sta->status_stats.last_ack))
+       if (!sta->status_stats.last_ack ||
+           time_after(stats->last_rx, sta->status_stats.last_ack))
                return stats->last_rx;
        return sta->status_stats.last_ack;
 }
index e64d5f9..d73d182 100644 (file)
@@ -296,7 +296,8 @@ ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr)
 
        if (unlikely(!flag_nested(nla)))
                return -IPSET_ERR_PROTOCOL;
-       if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL))
+       if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
+                            ipaddr_policy, NULL))
                return -IPSET_ERR_PROTOCOL;
        if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
                return -IPSET_ERR_PROTOCOL;
@@ -314,7 +315,8 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
        if (unlikely(!flag_nested(nla)))
                return -IPSET_ERR_PROTOCOL;
 
-       if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL))
+       if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
+                            ipaddr_policy, NULL))
                return -IPSET_ERR_PROTOCOL;
        if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
                return -IPSET_ERR_PROTOCOL;
@@ -934,7 +936,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
 
        /* Without holding any locks, create private part. */
        if (attr[IPSET_ATTR_DATA] &&
-           nla_parse_nested_deprecated(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) {
+           nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+                            set->type->create_policy, NULL)) {
                ret = -IPSET_ERR_PROTOCOL;
                goto put_out;
        }
@@ -1281,6 +1284,14 @@ dump_attrs(struct nlmsghdr *nlh)
        }
 }
 
+static const struct nla_policy
+ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_FLAGS]      = { .type = NLA_U32 },
+};
+
 static int
 dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
 {
@@ -1292,9 +1303,9 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
        ip_set_id_t index;
        int ret;
 
-       ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, attr,
-                                  nlh->nlmsg_len - min_len,
-                                  ip_set_setname_policy, NULL);
+       ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
+                       nlh->nlmsg_len - min_len,
+                       ip_set_dump_policy, NULL);
        if (ret)
                return ret;
 
@@ -1543,9 +1554,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
                memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
                cmdattr = (void *)&errmsg->msg + min_len;
 
-               ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr,
-                                          nlh->nlmsg_len - min_len,
-                                          ip_set_adt_policy, NULL);
+               ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
+                               nlh->nlmsg_len - min_len, ip_set_adt_policy,
+                               NULL);
 
                if (ret) {
                        nlmsg_free(skb2);
@@ -1596,7 +1607,9 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
 
        use_lineno = !!attr[IPSET_ATTR_LINENO];
        if (attr[IPSET_ATTR_DATA]) {
-               if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL))
+               if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+                                    attr[IPSET_ATTR_DATA],
+                                    set->type->adt_policy, NULL))
                        return -IPSET_ERR_PROTOCOL;
                ret = call_ad(ctnl, skb, set, tb, adt, flags,
                              use_lineno);
@@ -1606,7 +1619,8 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
                nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
                        if (nla_type(nla) != IPSET_ATTR_DATA ||
                            !flag_nested(nla) ||
-                           nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL))
+                           nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+                                            set->type->adt_policy, NULL))
                                return -IPSET_ERR_PROTOCOL;
                        ret = call_ad(ctnl, skb, set, tb, adt,
                                      flags, use_lineno);
@@ -1655,7 +1669,8 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
        if (!set)
                return -ENOENT;
 
-       if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL))
+       if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+                            set->type->adt_policy, NULL))
                return -IPSET_ERR_PROTOCOL;
 
        rcu_read_lock_bh();
@@ -1961,7 +1976,7 @@ static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
        [IPSET_CMD_LIST]        = {
                .call           = ip_set_dump,
                .attr_count     = IPSET_ATTR_CMD_MAX,
-               .policy         = ip_set_setname_policy,
+               .policy         = ip_set_dump_policy,
        },
        [IPSET_CMD_SAVE]        = {
                .call           = ip_set_dump,
@@ -2069,8 +2084,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
                }
 
                req_version->version = IPSET_PROTOCOL;
-               ret = copy_to_user(user, req_version,
-                                  sizeof(struct ip_set_req_version));
+               if (copy_to_user(user, req_version,
+                                sizeof(struct ip_set_req_version)))
+                       ret = -EFAULT;
                goto done;
        }
        case IP_SET_OP_GET_BYNAME: {
@@ -2129,7 +2145,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
        }       /* end of switch(op) */
 
 copy:
-       ret = copy_to_user(user, data, copylen);
+       if (copy_to_user(user, data, copylen))
+               ret = -EFAULT;
 
 done:
        vfree(data);
index 24d8f4d..4ce563e 100644 (file)
@@ -209,7 +209,7 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
            (skb_mac_header(skb) + ETH_HLEN) > skb->data)
                return -EINVAL;
 
-       if (opt->flags & IPSET_DIM_ONE_SRC)
+       if (opt->flags & IPSET_DIM_TWO_SRC)
                ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
        else
                ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
index c259cbc..3d932de 100644 (file)
@@ -368,6 +368,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
                [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
                [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
                [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
index a3ae69b..4398322 100644 (file)
@@ -476,6 +476,7 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
                [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
                [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
                [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
index 4515056..f9b16f2 100644 (file)
@@ -193,21 +193,29 @@ struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *
 
        mutex_lock(&__ip_vs_app_mutex);
 
+       /* increase the module use count */
+       if (!ip_vs_use_count_inc()) {
+               err = -ENOENT;
+               goto out_unlock;
+       }
+
        list_for_each_entry(a, &ipvs->app_list, a_list) {
                if (!strcmp(app->name, a->name)) {
                        err = -EEXIST;
+                       /* decrease the module use count */
+                       ip_vs_use_count_dec();
                        goto out_unlock;
                }
        }
        a = kmemdup(app, sizeof(*app), GFP_KERNEL);
        if (!a) {
                err = -ENOMEM;
+               /* decrease the module use count */
+               ip_vs_use_count_dec();
                goto out_unlock;
        }
        INIT_LIST_HEAD(&a->incs_list);
        list_add(&a->a_list, &ipvs->app_list);
-       /* increase the module use count */
-       ip_vs_use_count_inc();
 
 out_unlock:
        mutex_unlock(&__ip_vs_app_mutex);
index 8b48e7c..3cccc88 100644 (file)
@@ -93,7 +93,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
 static void update_defense_level(struct netns_ipvs *ipvs)
 {
        struct sysinfo i;
-       static int old_secure_tcp = 0;
        int availmem;
        int nomem;
        int to_change = -1;
@@ -174,35 +173,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
        spin_lock(&ipvs->securetcp_lock);
        switch (ipvs->sysctl_secure_tcp) {
        case 0:
-               if (old_secure_tcp >= 2)
+               if (ipvs->old_secure_tcp >= 2)
                        to_change = 0;
                break;
        case 1:
                if (nomem) {
-                       if (old_secure_tcp < 2)
+                       if (ipvs->old_secure_tcp < 2)
                                to_change = 1;
                        ipvs->sysctl_secure_tcp = 2;
                } else {
-                       if (old_secure_tcp >= 2)
+                       if (ipvs->old_secure_tcp >= 2)
                                to_change = 0;
                }
                break;
        case 2:
                if (nomem) {
-                       if (old_secure_tcp < 2)
+                       if (ipvs->old_secure_tcp < 2)
                                to_change = 1;
                } else {
-                       if (old_secure_tcp >= 2)
+                       if (ipvs->old_secure_tcp >= 2)
                                to_change = 0;
                        ipvs->sysctl_secure_tcp = 1;
                }
                break;
        case 3:
-               if (old_secure_tcp < 2)
+               if (ipvs->old_secure_tcp < 2)
                        to_change = 1;
                break;
        }
-       old_secure_tcp = ipvs->sysctl_secure_tcp;
+       ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
        if (to_change >= 0)
                ip_vs_protocol_timeout_change(ipvs,
                                              ipvs->sysctl_secure_tcp > 1);
@@ -1275,7 +1274,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        struct ip_vs_service *svc = NULL;
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOPROTOOPT;
 
        /* Lookup the scheduler by 'u->sched_name' */
        if (strcmp(u->sched_name, "none")) {
@@ -2435,9 +2435,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
        if (copy_from_user(arg, user, len) != 0)
                return -EFAULT;
 
-       /* increase the module use count */
-       ip_vs_use_count_inc();
-
        /* Handle daemons since they have another lock */
        if (cmd == IP_VS_SO_SET_STARTDAEMON ||
            cmd == IP_VS_SO_SET_STOPDAEMON) {
@@ -2450,13 +2447,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                        ret = -EINVAL;
                        if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
                                    sizeof(cfg.mcast_ifn)) <= 0)
-                               goto out_dec;
+                               return ret;
                        cfg.syncid = dm->syncid;
                        ret = start_sync_thread(ipvs, &cfg, dm->state);
                } else {
                        ret = stop_sync_thread(ipvs, dm->state);
                }
-               goto out_dec;
+               return ret;
        }
 
        mutex_lock(&__ip_vs_mutex);
@@ -2551,10 +2548,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
   out_unlock:
        mutex_unlock(&__ip_vs_mutex);
-  out_dec:
-       /* decrease the module use count */
-       ip_vs_use_count_dec();
-
        return ret;
 }
 
index 8e104df..166c669 100644 (file)
@@ -68,7 +68,8 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
        struct ip_vs_pe *tmp;
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOENT;
 
        mutex_lock(&ip_vs_pe_mutex);
        /* Make sure that the pe with this name doesn't exist
index 2f9d5cd..d490372 100644 (file)
@@ -179,7 +179,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
        }
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOENT;
 
        mutex_lock(&ip_vs_sched_mutex);
 
index a4a78c4..8dc892a 100644 (file)
@@ -1762,6 +1762,10 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
                  sizeof(struct ip_vs_sync_conn_v0));
 
+       /* increase the module use count */
+       if (!ip_vs_use_count_inc())
+               return -ENOPROTOOPT;
+
        /* Do not hold one mutex and then to block on another */
        for (;;) {
                rtnl_lock();
@@ -1892,9 +1896,6 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        mutex_unlock(&ipvs->sync_mutex);
        rtnl_unlock();
 
-       /* increase the module use count */
-       ip_vs_use_count_inc();
-
        return 0;
 
 out:
@@ -1924,11 +1925,17 @@ out:
                }
                kfree(ti);
        }
+
+       /* decrease the module use count */
+       ip_vs_use_count_dec();
        return result;
 
 out_early:
        mutex_unlock(&ipvs->sync_mutex);
        rtnl_unlock();
+
+       /* decrease the module use count */
+       ip_vs_use_count_dec();
        return result;
 }
 
index 132f522..128245e 100644 (file)
@@ -202,6 +202,8 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        int err;
 
+       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+
        err = rhashtable_insert_fast(&flow_table->rhashtable,
                                     &flow->tuplehash[0].node,
                                     nf_flow_offload_rhash_params);
@@ -218,7 +220,6 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
                return err;
        }
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
index d481f9b..712a428 100644 (file)
@@ -1922,6 +1922,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
 
+               flags |= chain->flags & NFT_BASE_CHAIN;
                return nf_tables_updchain(&ctx, genmask, policy, flags);
        }
 
@@ -5143,9 +5144,6 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
        struct nft_trans *trans;
        int err;
 
-       if (!obj->ops->update)
-               return -EOPNOTSUPP;
-
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
                                sizeof(struct nft_trans_obj));
        if (!trans)
@@ -6499,7 +6497,8 @@ static void nft_obj_commit_update(struct nft_trans *trans)
        obj = nft_trans_obj(trans);
        newobj = nft_trans_obj_newobj(trans);
 
-       obj->ops->update(obj, newobj);
+       if (obj->ops->update)
+               obj->ops->update(obj, newobj);
 
        kfree(newobj);
 }
index e546f75..e25dab8 100644 (file)
@@ -334,7 +334,8 @@ int nft_flow_rule_offload_commit(struct net *net)
 
                switch (trans->msg_type) {
                case NFT_MSG_NEWCHAIN:
-                       if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+                       if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+                           nft_trans_chain_update(trans))
                                continue;
 
                        policy = nft_trans_chain_policy(trans);
@@ -347,7 +348,7 @@ int nft_flow_rule_offload_commit(struct net *net)
 
                        policy = nft_trans_chain_policy(trans);
                        err = nft_flow_offload_chain(trans->ctx.chain, &policy,
-                                                    FLOW_BLOCK_BIND);
+                                                    FLOW_BLOCK_UNBIND);
                        break;
                case NFT_MSG_NEWRULE:
                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
index 9743001..02afa75 100644 (file)
@@ -134,12 +134,13 @@ static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
                                const struct nft_expr *expr)
 {
        const struct nft_bitwise *priv = nft_expr_priv(expr);
+       struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
 
        if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
-           priv->sreg != priv->dreg)
+           priv->sreg != priv->dreg || priv->len != reg->len)
                return -EOPNOTSUPP;
 
-       memcpy(&ctx->regs[priv->dreg].mask, &priv->mask, sizeof(priv->mask));
+       memcpy(&reg->mask, &priv->mask, sizeof(priv->mask));
 
        return 0;
 }
index bd173b1..0744b2b 100644 (file)
@@ -116,7 +116,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
        u8 *mask = (u8 *)&flow->match.mask;
        u8 *key = (u8 *)&flow->match.key;
 
-       if (priv->op != NFT_CMP_EQ)
+       if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
                return -EOPNOTSUPP;
 
        memcpy(key + reg->offset, &priv->data, priv->len);
index 22a80eb..5cb2d89 100644 (file)
@@ -161,13 +161,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ethhdr, h_source):
+               if (priv->len != ETH_ALEN)
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  src, ETH_ALEN, reg);
                break;
        case offsetof(struct ethhdr, h_dest):
+               if (priv->len != ETH_ALEN)
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  dst, ETH_ALEN, reg);
                break;
+       default:
+               return -EOPNOTSUPP;
        }
 
        return 0;
@@ -181,14 +189,23 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct iphdr, saddr):
+               if (priv->len != sizeof(struct in_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
                                  sizeof(struct in_addr), reg);
                break;
        case offsetof(struct iphdr, daddr):
+               if (priv->len != sizeof(struct in_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
                                  sizeof(struct in_addr), reg);
                break;
        case offsetof(struct iphdr, protocol):
+               if (priv->len != sizeof(__u8))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
                                  sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -208,14 +225,23 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ipv6hdr, saddr):
+               if (priv->len != sizeof(struct in6_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
                                  sizeof(struct in6_addr), reg);
                break;
        case offsetof(struct ipv6hdr, daddr):
+               if (priv->len != sizeof(struct in6_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
                                  sizeof(struct in6_addr), reg);
                break;
        case offsetof(struct ipv6hdr, nexthdr):
+               if (priv->len != sizeof(__u8))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
                                  sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -255,10 +281,16 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct tcphdr, source):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct tcphdr, dest):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
                                  sizeof(__be16), reg);
                break;
@@ -277,10 +309,16 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct udphdr, source):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct udphdr, dest):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
                                  sizeof(__be16), reg);
                break;
index c4f54ad..58d5373 100644 (file)
@@ -64,28 +64,6 @@ static DEFINE_SPINLOCK(nr_list_lock);
 static const struct proto_ops nr_proto_ops;
 
 /*
- * NETROM network devices are virtual network devices encapsulating NETROM
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key nr_netdev_xmit_lock_key;
-static struct lock_class_key nr_netdev_addr_lock_key;
-
-static void nr_set_lockdep_one(struct net_device *dev,
-                              struct netdev_queue *txq,
-                              void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
-}
-
-static void nr_set_lockdep_key(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
-}
-
-/*
  *     Socket removal during an interrupt is now safe.
  */
 static void nr_remove_socket(struct sock *sk)
@@ -1414,7 +1392,6 @@ static int __init nr_proto_init(void)
                        free_netdev(dev);
                        goto fail;
                }
-               nr_set_lockdep_key(dev);
                dev_nr[i] = dev;
        }
 
index ccdd790..2860441 100644 (file)
@@ -554,11 +554,11 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == LLCP_CLOSED)
index 17e6ca6..afde0d7 100644 (file)
@@ -1099,7 +1099,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
 
        local = nfc_llcp_find_local(dev);
        if (!local) {
-               nfc_put_device(dev);
                rc = -ENODEV;
                goto exit;
        }
@@ -1159,7 +1158,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
 
        local = nfc_llcp_find_local(dev);
        if (!local) {
-               nfc_put_device(dev);
                rc = -ENODEV;
                goto exit;
        }
index f30e406..d8c364d 100644 (file)
@@ -1881,7 +1881,7 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = {
 /* Called with ovs_mutex or RCU read lock. */
 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                                   struct net *net, u32 portid, u32 seq,
-                                  u32 flags, u8 cmd)
+                                  u32 flags, u8 cmd, gfp_t gfp)
 {
        struct ovs_header *ovs_header;
        struct ovs_vport_stats vport_stats;
@@ -1902,7 +1902,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                goto nla_put_failure;
 
        if (!net_eq(net, dev_net(vport->dev))) {
-               int id = peernet2id_alloc(net, dev_net(vport->dev));
+               int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
 
                if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
                        goto nla_put_failure;
@@ -1943,11 +1943,12 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
        struct sk_buff *skb;
        int retval;
 
-       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
+       retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
+                                        GFP_KERNEL);
        BUG_ON(retval < 0);
 
        return skb;
@@ -2089,7 +2090,7 @@ restart:
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_NEW);
+                                     OVS_VPORT_CMD_NEW, GFP_KERNEL);
 
        new_headroom = netdev_get_fwd_headroom(vport->dev);
 
@@ -2150,7 +2151,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_SET);
+                                     OVS_VPORT_CMD_SET, GFP_KERNEL);
        BUG_ON(err < 0);
 
        ovs_unlock();
@@ -2190,7 +2191,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_DEL);
+                                     OVS_VPORT_CMD_DEL, GFP_KERNEL);
        BUG_ON(err < 0);
 
        /* the vport deletion may trigger dp headroom update */
@@ -2237,7 +2238,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock_free;
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_GET);
+                                     OVS_VPORT_CMD_GET, GFP_ATOMIC);
        BUG_ON(err < 0);
        rcu_read_unlock();
 
@@ -2273,7 +2274,8 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI,
-                                                   OVS_VPORT_CMD_GET) < 0)
+                                                   OVS_VPORT_CMD_GET,
+                                                   GFP_ATOMIC) < 0)
                                goto out;
 
                        j++;
index 21c90d3..58a7b83 100644 (file)
@@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
                              IFF_NO_QUEUE;
        netdev->needs_free_netdev = true;
-       netdev->priv_destructor = internal_dev_destructor;
+       netdev->priv_destructor = NULL;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->rtnl_link_ops = &internal_dev_link_ops;
 
@@ -159,7 +159,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        struct internal_dev *internal_dev;
        struct net_device *dev;
        int err;
-       bool free_vport = true;
 
        vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
        if (IS_ERR(vport)) {
@@ -190,10 +189,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
        rtnl_lock();
        err = register_netdevice(vport->dev);
-       if (err) {
-               free_vport = false;
+       if (err)
                goto error_unlock;
-       }
+       vport->dev->priv_destructor = internal_dev_destructor;
 
        dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
@@ -207,8 +205,7 @@ error_unlock:
 error_free_netdev:
        free_netdev(dev);
 error_free_vport:
-       if (free_vport)
-               ovs_vport_free(vport);
+       ovs_vport_free(vport);
 error:
        return ERR_PTR(err);
 }
index 96ea9f2..76d499f 100644 (file)
@@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
 
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
-       if (!skb_queue_empty(&pn->ctrlreq_queue))
+       if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
                mask |= EPOLLPRI;
        if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
                return EPOLLHUP;
index 233f136..18c6fac 100644 (file)
@@ -450,6 +450,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        struct ib_qp_init_attr attr;
        struct ib_cq_init_attr cq_attr = {};
        struct rds_ib_device *rds_ibdev;
+       unsigned long max_wrs;
        int ret, fr_queue_space;
 
        /*
@@ -469,10 +470,15 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        /* add the conn now so that connection establishment has the dev */
        rds_ib_add_conn(rds_ibdev, conn);
 
-       if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
-               rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
-       if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
-               rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
+       max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
+               rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
+       if (ic->i_send_ring.w_nr != max_wrs)
+               rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
+
+       max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
+               rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
+       if (ic->i_recv_ring.w_nr != max_wrs)
+               rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
 
        /* Protection domain and memory range */
        ic->i_pd = rds_ibdev->pd;
@@ -1099,8 +1105,9 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
        ic->i_flowctl = 0;
        atomic_set(&ic->i_credits, 0);
 
-       rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
-       rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+       /* Re-init rings, but retain sizes. */
+       rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
+       rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
 
        if (ic->i_ibinc) {
                rds_inc_put(&ic->i_ibinc->ii_inc);
@@ -1147,8 +1154,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
         * rds_ib_conn_shutdown() waits for these to be emptied so they
         * must be initialized before it can be called.
         */
-       rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
-       rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+       rds_ib_ring_init(&ic->i_send_ring, 0);
+       rds_ib_ring_init(&ic->i_recv_ring, 0);
 
        ic->conn = conn;
        conn->c_transport_data = ic;
index f0e9ccf..6a0df7c 100644 (file)
@@ -65,28 +65,6 @@ static const struct proto_ops rose_proto_ops;
 ax25_address rose_callsign;
 
 /*
- * ROSE network devices are virtual network devices encapsulating ROSE
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key rose_netdev_xmit_lock_key;
-static struct lock_class_key rose_netdev_addr_lock_key;
-
-static void rose_set_lockdep_one(struct net_device *dev,
-                                struct netdev_queue *txq,
-                                void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
-}
-
-static void rose_set_lockdep_key(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
-}
-
-/*
  *     Convert a ROSE address into text.
  */
 char *rose2asc(char *buf, const rose_address *addr)
@@ -1533,7 +1511,6 @@ static int __init rose_proto_init(void)
                        free_netdev(dev);
                        goto fail;
                }
-               rose_set_lockdep_key(dev);
                dev_rose[i] = dev;
        }
 
index ecc17da..7c7d10f 100644 (file)
@@ -601,6 +601,7 @@ struct rxrpc_call {
        int                     debug_id;       /* debug ID for printks */
        unsigned short          rx_pkt_offset;  /* Current recvmsg packet offset */
        unsigned short          rx_pkt_len;     /* Current recvmsg packet len */
+       bool                    rx_pkt_last;    /* Current recvmsg packet is last */
 
        /* Rx/Tx circular buffer, depending on phase.
         *
index a409079..8578c39 100644 (file)
@@ -267,11 +267,13 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  */
 static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
                             u8 *_annotation,
-                            unsigned int *_offset, unsigned int *_len)
+                            unsigned int *_offset, unsigned int *_len,
+                            bool *_last)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned int offset = sizeof(struct rxrpc_wire_header);
        unsigned int len;
+       bool last = false;
        int ret;
        u8 annotation = *_annotation;
        u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
@@ -281,6 +283,8 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
        len = skb->len - offset;
        if (subpacket < sp->nr_subpackets - 1)
                len = RXRPC_JUMBO_DATALEN;
+       else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
+               last = true;
 
        if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
                ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -291,6 +295,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        *_offset = offset;
        *_len = len;
+       *_last = last;
        call->security->locate_data(call, skb, _offset, _len);
        return 0;
 }
@@ -309,7 +314,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top, seq;
        size_t remain;
-       bool last;
+       bool rx_pkt_last;
        unsigned int rx_pkt_offset, rx_pkt_len;
        int ix, copy, ret = -EAGAIN, ret2;
 
@@ -319,6 +324,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
 
        rx_pkt_offset = call->rx_pkt_offset;
        rx_pkt_len = call->rx_pkt_len;
+       rx_pkt_last = call->rx_pkt_last;
 
        if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
                seq = call->rx_hard_ack;
@@ -329,6 +335,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        /* Barriers against rxrpc_input_data(). */
        hard_ack = call->rx_hard_ack;
        seq = hard_ack + 1;
+
        while (top = smp_load_acquire(&call->rx_top),
               before_eq(seq, top)
               ) {
@@ -356,7 +363,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                if (rx_pkt_offset == 0) {
                        ret2 = rxrpc_locate_data(call, skb,
                                                 &call->rxtx_annotations[ix],
-                                                &rx_pkt_offset, &rx_pkt_len);
+                                                &rx_pkt_offset, &rx_pkt_len,
+                                                &rx_pkt_last);
                        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
                                            rx_pkt_offset, rx_pkt_len, ret2);
                        if (ret2 < 0) {
@@ -396,13 +404,12 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                }
 
                /* The whole packet has been transferred. */
-               last = sp->hdr.flags & RXRPC_LAST_PACKET;
                if (!(flags & MSG_PEEK))
                        rxrpc_rotate_rx_window(call);
                rx_pkt_offset = 0;
                rx_pkt_len = 0;
 
-               if (last) {
+               if (rx_pkt_last) {
                        ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
                        ret = 1;
                        goto out;
@@ -415,6 +422,7 @@ out:
        if (!(flags & MSG_PEEK)) {
                call->rx_pkt_offset = rx_pkt_offset;
                call->rx_pkt_len = rx_pkt_len;
+               call->rx_pkt_last = rx_pkt_last;
        }
 done:
        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
index 8717c0b..20d60b8 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/rhashtable.h>
+#include <linux/jhash.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/netlink.h>
@@ -47,6 +48,62 @@ static LIST_HEAD(tcf_proto_base);
 /* Protects list of registered TC modules. It is pure SMP lock. */
 static DEFINE_RWLOCK(cls_mod_lock);
 
+static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
+{
+       return jhash_3words(tp->chain->index, tp->prio,
+                           (__force __u32)tp->protocol, 0);
+}
+
+static void tcf_proto_signal_destroying(struct tcf_chain *chain,
+                                       struct tcf_proto *tp)
+{
+       struct tcf_block *block = chain->block;
+
+       mutex_lock(&block->proto_destroy_lock);
+       hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
+                    destroy_obj_hashfn(tp));
+       mutex_unlock(&block->proto_destroy_lock);
+}
+
+static bool tcf_proto_cmp(const struct tcf_proto *tp1,
+                         const struct tcf_proto *tp2)
+{
+       return tp1->chain->index == tp2->chain->index &&
+              tp1->prio == tp2->prio &&
+              tp1->protocol == tp2->protocol;
+}
+
+static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
+                                       struct tcf_proto *tp)
+{
+       u32 hash = destroy_obj_hashfn(tp);
+       struct tcf_proto *iter;
+       bool found = false;
+
+       rcu_read_lock();
+       hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
+                                  destroy_ht_node, hash) {
+               if (tcf_proto_cmp(tp, iter)) {
+                       found = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return found;
+}
+
+static void
+tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
+{
+       struct tcf_block *block = chain->block;
+
+       mutex_lock(&block->proto_destroy_lock);
+       if (hash_hashed(&tp->destroy_ht_node))
+               hash_del_rcu(&tp->destroy_ht_node);
+       mutex_unlock(&block->proto_destroy_lock);
+}
+
 /* Find classifier type by string name */
 
 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
@@ -234,9 +291,11 @@ static void tcf_proto_get(struct tcf_proto *tp)
 static void tcf_chain_put(struct tcf_chain *chain);
 
 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
-                             struct netlink_ext_ack *extack)
+                             bool sig_destroy, struct netlink_ext_ack *extack)
 {
        tp->ops->destroy(tp, rtnl_held, extack);
+       if (sig_destroy)
+               tcf_proto_signal_destroyed(tp->chain, tp);
        tcf_chain_put(tp->chain);
        module_put(tp->ops->owner);
        kfree_rcu(tp, rcu);
@@ -246,7 +305,7 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
                          struct netlink_ext_ack *extack)
 {
        if (refcount_dec_and_test(&tp->refcnt))
-               tcf_proto_destroy(tp, rtnl_held, extack);
+               tcf_proto_destroy(tp, rtnl_held, true, extack);
 }
 
 static int walker_check_empty(struct tcf_proto *tp, void *fh,
@@ -370,6 +429,7 @@ static bool tcf_chain_detach(struct tcf_chain *chain)
 static void tcf_block_destroy(struct tcf_block *block)
 {
        mutex_destroy(&block->lock);
+       mutex_destroy(&block->proto_destroy_lock);
        kfree_rcu(block, rcu);
 }
 
@@ -545,6 +605,12 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
 
        mutex_lock(&chain->filter_chain_lock);
        tp = tcf_chain_dereference(chain->filter_chain, chain);
+       while (tp) {
+               tp_next = rcu_dereference_protected(tp->next, 1);
+               tcf_proto_signal_destroying(chain, tp);
+               tp = tp_next;
+       }
+       tp = tcf_chain_dereference(chain->filter_chain, chain);
        RCU_INIT_POINTER(chain->filter_chain, NULL);
        tcf_chain0_head_change(chain, NULL);
        chain->flushing = true;
@@ -844,6 +910,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
                return ERR_PTR(-ENOMEM);
        }
        mutex_init(&block->lock);
+       mutex_init(&block->proto_destroy_lock);
        init_rwsem(&block->cb_lock);
        flow_block_init(&block->flow_block);
        INIT_LIST_HEAD(&block->chain_list);
@@ -1621,6 +1688,12 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
 
        mutex_lock(&chain->filter_chain_lock);
 
+       if (tcf_proto_exists_destroying(chain, tp_new)) {
+               mutex_unlock(&chain->filter_chain_lock);
+               tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
+               return ERR_PTR(-EAGAIN);
+       }
+
        tp = tcf_chain_tp_find(chain, &chain_info,
                               protocol, prio, false);
        if (!tp)
@@ -1628,10 +1701,10 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
        mutex_unlock(&chain->filter_chain_lock);
 
        if (tp) {
-               tcf_proto_destroy(tp_new, rtnl_held, NULL);
+               tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
                tp_new = tp;
        } else if (err) {
-               tcf_proto_destroy(tp_new, rtnl_held, NULL);
+               tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
                tp_new = ERR_PTR(err);
        }
 
@@ -1669,6 +1742,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
                return;
        }
 
+       tcf_proto_signal_destroying(chain, tp);
        next = tcf_chain_dereference(chain_info.next, chain);
        if (tp == chain->filter_chain)
                tcf_chain0_head_change(chain, next);
@@ -2188,6 +2262,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
                err = -EINVAL;
                goto errout_locked;
        } else if (t->tcm_handle == 0) {
+               tcf_proto_signal_destroying(chain, tp);
                tcf_chain_tp_remove(chain, &chain_info, tp);
                mutex_unlock(&chain->filter_chain_lock);
 
index bf10bda..8229ed4 100644 (file)
@@ -162,16 +162,20 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        cls_bpf.name = obj->bpf_name;
        cls_bpf.exts_integrated = obj->exts_integrated;
 
-       if (oldprog)
+       if (oldprog && prog)
                err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
                                          skip_sw, &oldprog->gen_flags,
                                          &oldprog->in_hw_count,
                                          &prog->gen_flags, &prog->in_hw_count,
                                          true);
-       else
+       else if (prog)
                err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
                                      skip_sw, &prog->gen_flags,
                                      &prog->in_hw_count, true);
+       else
+               err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
+                                         skip_sw, &oldprog->gen_flags,
+                                         &oldprog->in_hw_count, true);
 
        if (prog && err) {
                cls_bpf_offload_cmd(tp, oldprog, prog, extack);
index 17bd8f5..8769b4b 100644 (file)
@@ -799,9 +799,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
-static struct lock_class_key qdisc_tx_busylock;
-static struct lock_class_key qdisc_running_key;
-
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          const struct Qdisc_ops *ops,
                          struct netlink_ext_ack *extack)
@@ -854,17 +851,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        }
 
        spin_lock_init(&sch->busylock);
-       lockdep_set_class(&sch->busylock,
-                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
        spin_lock_init(&sch->seqlock);
-       lockdep_set_class(&sch->busylock,
-                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
        seqcount_init(&sch->running);
-       lockdep_set_class(&sch->running,
-                         dev->qdisc_running_key ?: &qdisc_running_key);
 
        sch->ops = ops;
        sch->flags = ops->static_flags;
@@ -875,6 +864,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        dev_hold(dev);
        refcount_set(&sch->refcnt, 1);
 
+       if (sch != &noop_qdisc) {
+               lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
+               lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
+               lockdep_set_class(&sch->running, &dev->qdisc_running_key);
+       }
+
        return sch;
 errout1:
        kfree(p);
@@ -1043,6 +1038,8 @@ static void attach_one_default_qdisc(struct net_device *dev,
 
        if (dev->priv_flags & IFF_NO_QUEUE)
                ops = &noqueue_qdisc_ops;
+       else if(dev->type == ARPHRD_CAN)
+               ops = &pfifo_fast_ops;
 
        qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
        if (!qdisc) {
index 23cd1c8..be35f03 100644 (file)
@@ -5,11 +5,11 @@
  * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
  */
 
-#include <linux/jhash.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
+#include <linux/siphash.h>
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 
@@ -126,7 +126,7 @@ struct wdrr_bucket {
 
 struct hhf_sched_data {
        struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
-       u32                perturbation;   /* hash perturbation */
+       siphash_key_t      perturbation;   /* hash perturbation */
        u32                quantum;        /* psched_mtu(qdisc_dev(sch)); */
        u32                drop_overlimit; /* number of times max qdisc packet
                                            * limit was hit
@@ -264,7 +264,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        /* Get hashed flow-id of the skb. */
-       hash = skb_get_hash_perturb(skb, q->perturbation);
+       hash = skb_get_hash_perturb(skb, &q->perturbation);
 
        /* Check if this packet belongs to an already established HH flow. */
        flow_pos = hash & HHF_BIT_MASK;
@@ -582,7 +582,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
 
        sch->limit = 1000;
        q->quantum = psched_mtu(qdisc_dev(sch));
-       q->perturbation = prandom_u32();
+       get_random_bytes(&q->perturbation, sizeof(q->perturbation));
        INIT_LIST_HEAD(&q->new_buckets);
        INIT_LIST_HEAD(&q->old_buckets);
 
index d448fe3..4074c50 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/random.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
@@ -45,7 +45,7 @@ struct sfb_bucket {
  * (Section 4.4 of SFB reference : moving hash functions)
  */
 struct sfb_bins {
-       u32               perturbation; /* jhash perturbation */
+       siphash_key_t     perturbation; /* siphash key */
        struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
 };
 
@@ -217,7 +217,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
 
 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
 {
-       q->bins[slot].perturbation = prandom_u32();
+       get_random_bytes(&q->bins[slot].perturbation,
+                        sizeof(q->bins[slot].perturbation));
 }
 
 static void sfb_swap_slot(struct sfb_sched_data *q)
@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                /* If using external classifiers, get result and record it. */
                if (!sfb_classify(skb, fl, &ret, &salt))
                        goto other_drop;
-               sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+               sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
        } else {
-               sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
+               sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
        }
 
 
@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                /* Inelastic flow */
                if (q->double_buffering) {
                        sfbhash = skb_get_hash_perturb(skb,
-                           q->bins[slot].perturbation);
+                           &q->bins[slot].perturbation);
                        if (!sfbhash)
                                sfbhash = 1;
                        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
index 68404a9..c787d4d 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
@@ -117,7 +117,7 @@ struct sfq_sched_data {
        u8              headdrop;
        u8              maxdepth;       /* limit of packets per flow */
 
-       u32             perturbation;
+       siphash_key_t   perturbation;
        u8              cur_depth;      /* depth of longest slot */
        u8              flags;
        unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
@@ -157,7 +157,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
 static unsigned int sfq_hash(const struct sfq_sched_data *q,
                             const struct sk_buff *skb)
 {
-       return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
+       return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -607,9 +607,11 @@ static void sfq_perturbation(struct timer_list *t)
        struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
        struct Qdisc *sch = q->sch;
        spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       siphash_key_t nkey;
 
+       get_random_bytes(&nkey, sizeof(nkey));
        spin_lock(root_lock);
-       q->perturbation = prandom_u32();
+       q->perturbation = nkey;
        if (!q->filter_list && q->tail)
                sfq_rehash(sch);
        spin_unlock(root_lock);
@@ -688,7 +690,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        del_timer(&q->perturb_timer);
        if (q->perturb_period) {
                mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
-               q->perturbation = prandom_u32();
+               get_random_bytes(&q->perturbation, sizeof(q->perturbation));
        }
        sch_tree_unlock(sch);
        kfree(p);
@@ -745,7 +747,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
        q->quantum = psched_mtu(qdisc_dev(sch));
        q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
        q->perturb_period = 0;
-       q->perturbation = prandom_u32();
+       get_random_bytes(&q->perturbation, sizeof(q->perturbation));
 
        if (opt) {
                int err = sfq_change(sch, opt);
index 6719a65..7cd6862 100644 (file)
@@ -1152,7 +1152,7 @@ EXPORT_SYMBOL_GPL(taprio_offload_free);
  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
  * This is left as TODO.
  */
-void taprio_offload_config_changed(struct taprio_sched *q)
+static void taprio_offload_config_changed(struct taprio_sched *q)
 {
        struct sched_gate_list *oper, *admin;
 
@@ -1224,8 +1224,6 @@ static int taprio_enable_offload(struct net_device *dev,
                goto done;
        }
 
-       taprio_offload_config_changed(q);
-
 done:
        taprio_offload_free(offload);
 
@@ -1505,6 +1503,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
                        call_rcu(&admin->rcu, taprio_free_sched_cb);
 
                spin_unlock_irqrestore(&q->current_entry_lock, flags);
+
+               if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
+                       taprio_offload_config_changed(q);
        }
 
        new_admin = NULL;
index 5ca0ec0..ffd3262 100644 (file)
@@ -8476,7 +8476,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
        mask = 0;
 
        /* Is there any exceptional events?  */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
        if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -8485,7 +8485,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
                mask |= EPOLLHUP;
 
        /* Is it readable?  Reconsider this code with TCP-style support.  */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* The association is either gone or not ready.  */
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                if (sk_can_busy_loop(sk)) {
                        sk_busy_loop(sk, noblock);
 
-                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                                continue;
                }
 
@@ -9306,7 +9306,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
        newinet->inet_dport = htons(asoc->peer.port);
        newinet->pmtudisc = inet->pmtudisc;
-       newinet->inet_id = asoc->next_tsn ^ jiffies;
+       newinet->inet_id = prandom_u32();
 
        newinet->uc_ttl = inet->uc_ttl;
        newinet->mc_loop = 1;
index 5b93258..737b499 100644 (file)
@@ -123,6 +123,12 @@ struct proto smc_proto6 = {
 };
 EXPORT_SYMBOL_GPL(smc_proto6);
 
+static void smc_restore_fallback_changes(struct smc_sock *smc)
+{
+       smc->clcsock->file->private_data = smc->sk.sk_socket;
+       smc->clcsock->file = NULL;
+}
+
 static int __smc_release(struct smc_sock *smc)
 {
        struct sock *sk = &smc->sk;
@@ -141,6 +147,7 @@ static int __smc_release(struct smc_sock *smc)
                }
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk);
+               smc_restore_fallback_changes(smc);
        }
 
        sk->sk_prot->unhash(sk);
@@ -700,8 +707,6 @@ static int __smc_connect(struct smc_sock *smc)
        int smc_type;
        int rc = 0;
 
-       sock_hold(&smc->sk); /* sock put in passive closing */
-
        if (smc->use_fallback)
                return smc_connect_fallback(smc, smc->fallback_rsn);
 
@@ -791,6 +796,7 @@ static void smc_connect_work(struct work_struct *work)
                        smc->sk.sk_err = EPIPE;
                else if (signal_pending(current))
                        smc->sk.sk_err = -sock_intr_errno(timeo);
+               sock_put(&smc->sk); /* passive closing */
                goto out;
        }
 
@@ -846,6 +852,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
        rc = kernel_connect(smc->clcsock, addr, alen, flags);
        if (rc && rc != -EINPROGRESS)
                goto out;
+
+       sock_hold(&smc->sk); /* sock put in passive closing */
        if (flags & O_NONBLOCK) {
                if (schedule_work(&smc->connect_work))
                        smc->connect_nonblock = 1;
@@ -1291,8 +1299,8 @@ static void smc_listen_work(struct work_struct *work)
        /* check if RDMA is available */
        if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
                /* prepare RDMA check */
-               memset(&ini, 0, sizeof(ini));
                ini.is_smcd = false;
+               ini.ism_dev = NULL;
                ini.ib_lcl = &pclc->lcl;
                rc = smc_find_rdma_device(new_smc, &ini);
                if (rc) {
@@ -1724,7 +1732,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        case TCP_FASTOPEN_KEY:
        case TCP_FASTOPEN_NO_COOKIE:
                /* option not supported by SMC */
-               if (sk->sk_state == SMC_INIT) {
+               if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
                        smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
index 88556f0..2ba97ff 100644 (file)
@@ -561,7 +561,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
        }
 
        rtnl_lock();
-       nest_lvl = dev_get_nest_level(ndev);
+       nest_lvl = ndev->lower_level;
        for (i = 0; i < nest_lvl; i++) {
                struct list_head *lower = &ndev->adj_list.lower;
 
index bab2da8..571e6d8 100644 (file)
@@ -376,8 +376,6 @@ static int smc_pnet_fill_entry(struct net *net,
        return 0;
 
 error:
-       if (pnetelem->ndev)
-               dev_put(pnetelem->ndev);
        return rc;
 }
 
@@ -718,7 +716,7 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
        int i, nest_lvl;
 
        rtnl_lock();
-       nest_lvl = dev_get_nest_level(ndev);
+       nest_lvl = ndev->lower_level;
        for (i = 0; i < nest_lvl; i++) {
                struct list_head *lower = &ndev->adj_list.lower;
 
index 339e8c0..195b40c 100644 (file)
@@ -220,7 +220,7 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
                goto out;
 
        spin_lock_bh(&xprt->bc_pa_lock);
-       xprt->bc_alloc_max -= max_reqs;
+       xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
        list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
                dprintk("RPC:        req=%p\n", req);
                list_del(&req->rq_bc_pa_list);
@@ -307,8 +307,8 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
                 */
                dprintk("RPC:       Last session removed req=%p\n", req);
                xprt_free_allocation(req);
-               return;
        }
+       xprt_put(xprt);
 }
 
 /*
@@ -339,7 +339,7 @@ found:
                spin_unlock(&xprt->bc_pa_lock);
                if (new) {
                        if (req != new)
-                               xprt_free_bc_rqst(new);
+                               xprt_free_allocation(new);
                        break;
                } else if (req)
                        break;
@@ -368,6 +368,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
        dprintk("RPC:       add callback request to list\n");
+       xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
        wake_up(&bc_serv->sv_cb_waitq);
index 8a45b3c..41df4c5 100644 (file)
@@ -1943,6 +1943,11 @@ static void xprt_destroy_cb(struct work_struct *work)
        rpc_destroy_wait_queue(&xprt->backlog);
        kfree(xprt->servername);
        /*
+        * Destroy any existing back channel
+        */
+       xprt_destroy_backchannel(xprt, UINT_MAX);
+
+       /*
         * Tear down transport state and free the rpc_xprt
         */
        xprt->ops->destroy(xprt);
index 50e075f..b458bf5 100644 (file)
@@ -163,6 +163,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
        spin_lock(&xprt->bc_pa_lock);
        list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
        spin_unlock(&xprt->bc_pa_lock);
+       xprt_put(xprt);
 }
 
 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
@@ -259,6 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
 
        /* Queue rqst for ULP's callback service */
        bc_serv = xprt->bc_serv;
+       xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
        spin_unlock(&bc_serv->sv_cb_lock);
index 23cb379..8f35060 100644 (file)
@@ -34,8 +34,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include "core.h"
 #include "name_table.h"
 #include "subscr.h"
index 60d8295..3042f65 100644 (file)
 #include <linux/rhashtable.h>
 #include <net/genetlink.h>
 
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 struct tipc_node;
 struct tipc_bearer;
 struct tipc_bc_base;
index f8bbc4a..4b92b19 100644 (file)
@@ -740,7 +740,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
                /* fall through */
        case TIPC_LISTEN:
        case TIPC_CONNECTING:
-               if (!skb_queue_empty(&sk->sk_receive_queue))
+               if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                        revents |= EPOLLIN | EPOLLRDNORM;
                break;
        case TIPC_OPEN:
@@ -748,7 +748,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
                        revents |= EPOLLOUT;
                if (!tipc_sk_type_connectionless(sk))
                        break;
-               if (skb_queue_empty(&sk->sk_receive_queue))
+               if (skb_queue_empty_lockless(&sk->sk_receive_queue))
                        break;
                revents |= EPOLLIN | EPOLLRDNORM;
                break;
index f959487..683d008 100644 (file)
@@ -523,8 +523,10 @@ last_record:
 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
        unsigned char record_type = TLS_RECORD_TYPE_DATA;
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
        int rc;
 
+       mutex_lock(&tls_ctx->tx_lock);
        lock_sock(sk);
 
        if (unlikely(msg->msg_controllen)) {
@@ -538,12 +540,14 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
 out:
        release_sock(sk);
+       mutex_unlock(&tls_ctx->tx_lock);
        return rc;
 }
 
 int tls_device_sendpage(struct sock *sk, struct page *page,
                        int offset, size_t size, int flags)
 {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct iov_iter msg_iter;
        char *kaddr = kmap(page);
        struct kvec iov;
@@ -552,6 +556,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
        if (flags & MSG_SENDPAGE_NOTLAST)
                flags |= MSG_MORE;
 
+       mutex_lock(&tls_ctx->tx_lock);
        lock_sock(sk);
 
        if (flags & MSG_OOB) {
@@ -568,6 +573,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
 
 out:
        release_sock(sk);
+       mutex_unlock(&tls_ctx->tx_lock);
        return rc;
 }
 
@@ -623,9 +629,11 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
 
 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
 {
-       if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
+       if (tls_is_partially_sent_record(ctx)) {
                gfp_t sk_allocation = sk->sk_allocation;
 
+               WARN_ON_ONCE(sk->sk_write_pending);
+
                sk->sk_allocation = GFP_ATOMIC;
                tls_push_partial_record(sk, ctx,
                                        MSG_DONTWAIT | MSG_NOSIGNAL |
index ac88877..0775ae4 100644 (file)
@@ -267,6 +267,7 @@ void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
 
        memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
        memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+       mutex_destroy(&ctx->tx_lock);
 
        if (sk)
                kfree_rcu(ctx, rcu);
@@ -612,6 +613,7 @@ static struct tls_context *create_ctx(struct sock *sk)
        if (!ctx)
                return NULL;
 
+       mutex_init(&ctx->tx_lock);
        rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
        ctx->sk_proto = sk->sk_prot;
        return ctx;
index c2b5e0d..446f23c 100644 (file)
@@ -897,15 +897,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
                return -ENOTSUPP;
 
+       mutex_lock(&tls_ctx->tx_lock);
        lock_sock(sk);
 
-       /* Wait till there is any pending write on socket */
-       if (unlikely(sk->sk_write_pending)) {
-               ret = wait_on_pending_writer(sk, &timeo);
-               if (unlikely(ret))
-                       goto send_end;
-       }
-
        if (unlikely(msg->msg_controllen)) {
                ret = tls_proccess_cmsg(sk, msg, &record_type);
                if (ret) {
@@ -1091,6 +1085,7 @@ send_end:
        ret = sk_stream_error(sk, msg->msg_flags, ret);
 
        release_sock(sk);
+       mutex_unlock(&tls_ctx->tx_lock);
        return copied ? copied : ret;
 }
 
@@ -1114,13 +1109,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
        eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
-       /* Wait till there is any pending write on socket */
-       if (unlikely(sk->sk_write_pending)) {
-               ret = wait_on_pending_writer(sk, &timeo);
-               if (unlikely(ret))
-                       goto sendpage_end;
-       }
-
        /* Call the sk_stream functions to manage the sndbuf mem. */
        while (size > 0) {
                size_t copy, required_size;
@@ -1219,15 +1207,18 @@ sendpage_end:
 int tls_sw_sendpage(struct sock *sk, struct page *page,
                    int offset, size_t size, int flags)
 {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
        int ret;
 
        if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
                      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
                return -ENOTSUPP;
 
+       mutex_lock(&tls_ctx->tx_lock);
        lock_sock(sk);
        ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
        release_sock(sk);
+       mutex_unlock(&tls_ctx->tx_lock);
        return ret;
 }
 
@@ -2170,9 +2161,11 @@ static void tx_work_handler(struct work_struct *work)
 
        if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
                return;
+       mutex_lock(&tls_ctx->tx_lock);
        lock_sock(sk);
        tls_tx_records(sk, -1);
        release_sock(sk);
+       mutex_unlock(&tls_ctx->tx_lock);
 }
 
 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
@@ -2180,12 +2173,9 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
        struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
 
        /* Schedule the transmission if tx list is ready */
-       if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
-               /* Schedule the transmission */
-               if (!test_and_set_bit(BIT_TX_SCHEDULED,
-                                     &tx_ctx->tx_bitmask))
-                       schedule_delayed_work(&tx_ctx->tx_work.work, 0);
-       }
+       if (is_tx_ready(tx_ctx) &&
+           !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
+               schedule_delayed_work(&tx_ctx->tx_work.work, 0);
 }
 
 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
index 67e87db..0d8da80 100644 (file)
@@ -2599,7 +2599,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
                mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
@@ -2628,7 +2628,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
        mask = 0;
 
        /* exceptional events? */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -2638,7 +2638,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
index 2ab43b2..582a3e4 100644 (file)
@@ -870,7 +870,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
                 * the queue and write as long as the socket isn't shutdown for
                 * sending.
                 */
-               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+               if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
                    (sk->sk_shutdown & RCV_SHUTDOWN)) {
                        mask |= EPOLLIN | EPOLLRDNORM;
                }
index 481f7f8..fb2060d 100644 (file)
@@ -947,9 +947,11 @@ virtio_transport_recv_connected(struct sock *sk,
                if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
                        vsk->peer_shutdown |= SEND_SHUTDOWN;
                if (vsk->peer_shutdown == SHUTDOWN_MASK &&
-                   vsock_stream_has_data(vsk) <= 0) {
-                       sock_set_flag(sk, SOCK_DONE);
-                       sk->sk_state = TCP_CLOSING;
+                   vsock_stream_has_data(vsk) <= 0 &&
+                   !sock_flag(sk, SOCK_DONE)) {
+                       (void)virtio_transport_reset(vsk, NULL);
+
+                       virtio_transport_do_close(vsk, true);
                }
                if (le32_to_cpu(pkt->hdr.flags))
                        sk->sk_state_change(sk);
index e851caf..fcac5c6 100644 (file)
@@ -204,6 +204,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
                return false;
        }
 
+       /* channel 14 is only for IEEE 802.11b */
+       if (chandef->center_freq1 == 2484 &&
+           chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
+               return false;
+
        if (cfg80211_chandef_is_edmg(chandef) &&
            !cfg80211_edmg_chandef_valid(chandef))
                return false;
index 4453dd3..7b72286 100644 (file)
@@ -393,7 +393,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
        [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
                                   .len = IEEE80211_MAX_MESH_ID_LEN },
-       [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
+       [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
 
        [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
        [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
index 419eb12..5b4ed5b 100644 (file)
@@ -1559,7 +1559,8 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
        }
 
        if (freq == 2484) {
-               if (chandef->width > NL80211_CHAN_WIDTH_40)
+               /* channel 14 is only for IEEE 802.11b */
+               if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
                        return false;
 
                *op_class = 82; /* channel 14 */
index 16d5f35..3049af2 100644 (file)
@@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
+       if (!xs->tx)
+               return;
+
        spin_lock_irqsave(&umem->xsk_list_lock, flags);
        list_add_rcu(&xs->list, &umem->xsk_list);
        spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
@@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
+       if (!xs->tx)
+               return;
+
        spin_lock_irqsave(&umem->xsk_list_lock, flags);
        list_del_rcu(&xs->list);
        spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
index 9b599ed..2c86a2f 100644 (file)
@@ -480,6 +480,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        else
                                XFRM_INC_STATS(net,
                                               LINUX_MIB_XFRMINSTATEINVALID);
+
+                       if (encap_type == -1)
+                               dev_put(skb->dev);
                        goto drop;
                }
 
index c6f3c4a..f342356 100644 (file)
@@ -495,6 +495,8 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
                x->type->destructor(x);
                xfrm_put_type(x->type);
        }
+       if (x->xfrag.page)
+               put_page(x->xfrag.page);
        xfrm_dev_state_free(x);
        security_xfrm_state_free(x);
        xfrm_state_free(x);
index 1d9be26..42b571c 100644 (file)
@@ -176,6 +176,7 @@ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
 KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
+KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0
 
 HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
 
index 7b7c2fa..be984aa 100644 (file)
@@ -99,7 +99,8 @@ lx-symbols command."""
             attrs[n]['name'].string(): attrs[n]['address']
             for n in range(int(sect_attrs['nsections']))}
         args = []
-        for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
+        for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
+                             ".text", ".text.hot", ".text.unlikely"]:
             address = section_name_to_address.get(section_name)
             if address:
                 args.append(" -s {name} {addr}".format(
index 936d3ad..d2a30a7 100644 (file)
@@ -348,26 +348,38 @@ static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
                return export_unknown;
 }
 
-static char *sym_extract_namespace(const char **symname)
+static const char *namespace_from_kstrtabns(struct elf_info *info,
+                                           Elf_Sym *kstrtabns)
 {
-       char *namespace = NULL;
-       char *ns_separator;
+       char *value = info->ksymtab_strings + kstrtabns->st_value;
+       return value[0] ? value : NULL;
+}
+
+static void sym_update_namespace(const char *symname, const char *namespace)
+{
+       struct symbol *s = find_symbol(symname);
 
-       ns_separator = strchr(*symname, '.');
-       if (ns_separator) {
-               namespace = NOFAIL(strndup(*symname, ns_separator - *symname));
-               *symname = ns_separator + 1;
+       /*
+        * That symbol should have been created earlier and thus this is
+        * actually an assertion.
+        */
+       if (!s) {
+               merror("Could not update namespace(%s) for symbol %s\n",
+                      namespace, symname);
+               return;
        }
 
-       return namespace;
+       free(s->namespace);
+       s->namespace =
+               namespace && namespace[0] ? NOFAIL(strdup(namespace)) : NULL;
 }
 
 /**
  * Add an exported symbol - it may have already been added without a
  * CRC, in this case just update the CRC
  **/
-static struct symbol *sym_add_exported(const char *name, const char *namespace,
-                                      struct module *mod, enum export export)
+static struct symbol *sym_add_exported(const char *name, struct module *mod,
+                                      enum export export)
 {
        struct symbol *s = find_symbol(name);
 
@@ -383,8 +395,6 @@ static struct symbol *sym_add_exported(const char *name, const char *namespace,
                        s->module = mod;
                }
        }
-       free(s->namespace);
-       s->namespace = namespace ? strdup(namespace) : NULL;
        s->preloaded = 0;
        s->vmlinux   = is_vmlinux(mod->name);
        s->kernel    = 0;
@@ -583,6 +593,10 @@ static int parse_elf(struct elf_info *info, const char *filename)
                        info->export_unused_gpl_sec = i;
                else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
                        info->export_gpl_future_sec = i;
+               else if (strcmp(secname, "__ksymtab_strings") == 0)
+                       info->ksymtab_strings = (void *)hdr +
+                                               sechdrs[i].sh_offset -
+                                               sechdrs[i].sh_addr;
 
                if (sechdrs[i].sh_type == SHT_SYMTAB) {
                        unsigned int sh_link_idx;
@@ -672,7 +686,6 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        enum export export;
        bool is_crc = false;
        const char *name;
-       char *namespace;
 
        if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
            strstarts(symname, "__ksymtab"))
@@ -745,9 +758,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                /* All exported symbols */
                if (strstarts(symname, "__ksymtab_")) {
                        name = symname + strlen("__ksymtab_");
-                       namespace = sym_extract_namespace(&name);
-                       sym_add_exported(name, namespace, mod, export);
-                       free(namespace);
+                       sym_add_exported(name, mod, export);
                }
                if (strcmp(symname, "init_module") == 0)
                        mod->has_init = 1;
@@ -2043,6 +2054,16 @@ static void read_symbols(const char *modname)
                handle_moddevtable(mod, &info, sym, symname);
        }
 
+       /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
+       for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+               symname = remove_dot(info.strtab + sym->st_name);
+
+               if (strstarts(symname, "__kstrtabns_"))
+                       sym_update_namespace(symname + strlen("__kstrtabns_"),
+                                            namespace_from_kstrtabns(&info,
+                                                                     sym));
+       }
+
        // check for static EXPORT_SYMBOL_* functions && global vars
        for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
                unsigned char bind = ELF_ST_BIND(sym->st_info);
@@ -2196,7 +2217,7 @@ static int check_exports(struct module *mod)
                else
                        basename = mod->name;
 
-               if (exp->namespace && exp->namespace[0]) {
+               if (exp->namespace) {
                        add_namespace(&mod->required_namespaces,
                                      exp->namespace);
 
@@ -2454,12 +2475,12 @@ static void read_dump(const char *fname, unsigned int kernel)
                        mod = new_module(modname);
                        mod->skip = 1;
                }
-               s = sym_add_exported(symname, namespace, mod,
-                                    export_no(export));
+               s = sym_add_exported(symname, mod, export_no(export));
                s->kernel    = kernel;
                s->preloaded = 1;
                s->is_static = 0;
                sym_update_crc(symname, mod, crc, export_no(export));
+               sym_update_namespace(symname, namespace);
        }
        release_file(file, size);
        return;
index 92a926d..ad271bc 100644 (file)
@@ -143,6 +143,7 @@ struct elf_info {
        Elf_Section  export_gpl_sec;
        Elf_Section  export_unused_gpl_sec;
        Elf_Section  export_gpl_future_sec;
+       char         *ksymtab_strings;
        char         *strtab;
        char         *modinfo;
        unsigned int modinfo_len;
index 3754dac..04cea09 100644 (file)
@@ -31,12 +31,12 @@ generate_deps() {
        local mod_file=`echo $@ | sed -e 's/\.ko/\.mod/'`
        local ns_deps_file=`echo $@ | sed -e 's/\.ko/\.ns_deps/'`
        if [ ! -f "$ns_deps_file" ]; then return; fi
-       local mod_source_files=`cat $mod_file | sed -n 1p                      \
+       local mod_source_files="`cat $mod_file | sed -n 1p                      \
                                              | sed -e 's/\.o/\.c/g'           \
-                                             | sed "s/[^ ]* */${srctree}\/&/g"`
+                                             | sed "s|[^ ]* *|${srctree}/&|g"`"
        for ns in `cat $ns_deps_file`; do
                echo "Adding namespace $ns to module $mod_name (if needed)."
-               generate_deps_for_ns $ns $mod_source_files
+               generate_deps_for_ns $ns "$mod_source_files"
                # sort the imports
                for source_file in $mod_source_files; do
                        sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp
index 97a2c84..45e8aa3 100755 (executable)
@@ -4,13 +4,13 @@
 tmp_file=$(mktemp)
 trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
 
-cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
+cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
 void *p = &p;
 END
-"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
 
 # Despite printing an error message, GNU nm still exits with exit code 0 if it
 # sees a relr section. So we need to check that nothing is printed to stderr.
-test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"
+test -z "$($NM $tmp_file 2>&1 >/dev/null)"
 
-"$OBJCOPY" -O binary $tmp_file $tmp_file.bin
+$OBJCOPY -O binary $tmp_file $tmp_file.bin
index 8a10b43..40b7905 100644 (file)
@@ -20,6 +20,7 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
        [LOCKDOWN_NONE] = "none",
        [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
        [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+       [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
        [LOCKDOWN_KEXEC] = "kexec of unsigned images",
        [LOCKDOWN_HIBERNATION] = "hibernation",
        [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
index 41905af..f34ce56 100644 (file)
@@ -528,7 +528,7 @@ static int snd_compress_check_input(struct snd_compr_params *params)
 {
        /* first let's check the buffer parameter's */
        if (params->buffer.fragment_size == 0 ||
-           params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+           params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
            params->buffer.fragments == 0)
                return -EINVAL;
 
index d80041e..2236b5e 100644 (file)
@@ -1782,11 +1782,14 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
        struct snd_pcm_runtime *runtime;
        unsigned long flags;
 
-       if (PCM_RUNTIME_CHECK(substream))
+       if (snd_BUG_ON(!substream))
                return;
-       runtime = substream->runtime;
 
        snd_pcm_stream_lock_irqsave(substream, flags);
+       if (PCM_RUNTIME_CHECK(substream))
+               goto _unlock;
+       runtime = substream->runtime;
+
        if (!snd_pcm_running(substream) ||
            snd_pcm_update_hw_ptr0(substream, 1) < 0)
                goto _end;
@@ -1797,6 +1800,7 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
 #endif
  _end:
        kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ _unlock:
        snd_pcm_stream_unlock_irqrestore(substream, flags);
 }
 EXPORT_SYMBOL(snd_pcm_period_elapsed);
index 5c9fbf3..59ae21b 100644 (file)
@@ -226,7 +226,8 @@ static int snd_timer_check_master(struct snd_timer_instance *master)
        return 0;
 }
 
-static int snd_timer_close_locked(struct snd_timer_instance *timeri);
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+                                 struct device **card_devp_to_put);
 
 /*
  * open a timer instance
@@ -238,6 +239,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
 {
        struct snd_timer *timer;
        struct snd_timer_instance *timeri = NULL;
+       struct device *card_dev_to_put = NULL;
        int err;
 
        mutex_lock(&register_mutex);
@@ -261,7 +263,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
                list_add_tail(&timeri->open_list, &snd_timer_slave_list);
                err = snd_timer_check_slave(timeri);
                if (err < 0) {
-                       snd_timer_close_locked(timeri);
+                       snd_timer_close_locked(timeri, &card_dev_to_put);
                        timeri = NULL;
                }
                goto unlock;
@@ -282,11 +284,11 @@ int snd_timer_open(struct snd_timer_instance **ti,
                goto unlock;
        }
        if (!list_empty(&timer->open_list_head)) {
-               timeri = list_entry(timer->open_list_head.next,
+               struct snd_timer_instance *t =
+                       list_entry(timer->open_list_head.next,
                                    struct snd_timer_instance, open_list);
-               if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
+               if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
                        err = -EBUSY;
-                       timeri = NULL;
                        goto unlock;
                }
        }
@@ -313,7 +315,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
                        timeri = NULL;
 
                        if (timer->card)
-                               put_device(&timer->card->card_dev);
+                               card_dev_to_put = &timer->card->card_dev;
                        module_put(timer->module);
                        goto unlock;
                }
@@ -323,12 +325,15 @@ int snd_timer_open(struct snd_timer_instance **ti,
        timer->num_instances++;
        err = snd_timer_check_master(timeri);
        if (err < 0) {
-               snd_timer_close_locked(timeri);
+               snd_timer_close_locked(timeri, &card_dev_to_put);
                timeri = NULL;
        }
 
  unlock:
        mutex_unlock(&register_mutex);
+       /* put_device() is called after unlock for avoiding deadlock */
+       if (card_dev_to_put)
+               put_device(card_dev_to_put);
        *ti = timeri;
        return err;
 }
@@ -338,7 +343,8 @@ EXPORT_SYMBOL(snd_timer_open);
  * close a timer instance
  * call this with register_mutex down.
  */
-static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+                                 struct device **card_devp_to_put)
 {
        struct snd_timer *timer = timeri->timer;
        struct snd_timer_instance *slave, *tmp;
@@ -395,7 +401,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
                        timer->hw.close(timer);
                /* release a card refcount for safe disconnection */
                if (timer->card)
-                       put_device(&timer->card->card_dev);
+                       *card_devp_to_put = &timer->card->card_dev;
                module_put(timer->module);
        }
 
@@ -407,14 +413,18 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
  */
 int snd_timer_close(struct snd_timer_instance *timeri)
 {
+       struct device *card_dev_to_put = NULL;
        int err;
 
        if (snd_BUG_ON(!timeri))
                return -ENXIO;
 
        mutex_lock(&register_mutex);
-       err = snd_timer_close_locked(timeri);
+       err = snd_timer_close_locked(timeri, &card_dev_to_put);
        mutex_unlock(&register_mutex);
+       /* put_device() is called after unlock for avoiding deadlock */
+       if (card_dev_to_put)
+               put_device(card_dev_to_put);
        return err;
 }
 EXPORT_SYMBOL(snd_timer_close);
index 32b864b..06d6a37 100644 (file)
@@ -27,6 +27,8 @@
 #define SAFFIRE_CLOCK_SOURCE_SPDIF             1
 
 /* clock sources as returned from register of Saffire Pro 10 and 26 */
+#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK    0x000000ff
+#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK    0x0000ff00
 #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL       0
 #define SAFFIREPRO_CLOCK_SOURCE_SKIP           1 /* never used on hardware */
 #define SAFFIREPRO_CLOCK_SOURCE_SPDIF          2
@@ -189,6 +191,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
                map = saffirepro_clk_maps[1];
 
        /* In a case that this driver cannot handle the value of register. */
+       value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK;
        if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
                err = -EIO;
                goto end;
index 73fee99..6c1497d 100644 (file)
@@ -252,8 +252,7 @@ end:
        return err;
 }
 
-static unsigned int
-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
+static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
 {
        unsigned int sec, sections, ch, channels;
        unsigned int pcm, midi, location;
index d3999e7..7e7be8e 100644 (file)
@@ -447,8 +447,6 @@ static void azx_int_disable(struct hdac_bus *bus)
        list_for_each_entry(azx_dev, &bus->stream_list, list)
                snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
 
-       synchronize_irq(bus->irq);
-
        /* disable SIE for all streams */
        snd_hdac_chip_writeb(bus, INTCTL, 0);
 
index 240f4ca..c524193 100644 (file)
@@ -1348,9 +1348,9 @@ static int azx_free(struct azx *chip)
        }
 
        if (bus->chip_init) {
-               azx_stop_chip(chip);
                azx_clear_irq_pending(chip);
                azx_stop_all_streams(chip);
+               azx_stop_chip(chip);
        }
 
        if (bus->irq >= 0)
@@ -2396,9 +2396,18 @@ static const struct pci_device_id azx_ids[] = {
        /* CometLake-H */
        { PCI_DEVICE(0x8086, 0x06C8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* CometLake-S */
+       { PCI_DEVICE(0x8086, 0xa3f0),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Jasperlake */
+       { PCI_DEVICE(0x8086, 0x38c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake */
+       { PCI_DEVICE(0x8086, 0xa0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 6d1fb7c..b7a1abb 100644 (file)
@@ -7604,7 +7604,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
        /* Delay enabling the HP amp, to let the mic-detection
         * state machine run.
         */
-       cancel_delayed_work_sync(&spec->unsol_hp_work);
+       cancel_delayed_work(&spec->unsol_hp_work);
        schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
        tbl = snd_hda_jack_tbl_get(codec, cb->nid);
        if (tbl)
index 795cbda..78bd2e3 100644 (file)
@@ -46,10 +46,12 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
                                ((codec)->core.vendor_id == 0x80862800))
 #define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
 #define is_icelake(codec) ((codec)->core.vendor_id == 0x8086280f)
+#define is_tigerlake(codec) ((codec)->core.vendor_id == 0x80862812)
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
                                || is_skylake(codec) || is_broxton(codec) \
                                || is_kabylake(codec) || is_geminilake(codec) \
-                               || is_cannonlake(codec) || is_icelake(codec))
+                               || is_cannonlake(codec) || is_icelake(codec) \
+                               || is_tigerlake(codec))
 #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
 #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
 #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -145,6 +147,7 @@ struct hdmi_spec {
        struct snd_array pins; /* struct hdmi_spec_per_pin */
        struct hdmi_pcm pcm_rec[16];
        struct mutex pcm_lock;
+       struct mutex bind_lock; /* for audio component binding */
        /* pcm_bitmap means which pcms have been assigned to pins*/
        unsigned long pcm_bitmap;
        int pcm_used;   /* counter of pcm_rec[] */
@@ -2258,7 +2261,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int pin_idx;
 
-       mutex_lock(&spec->pcm_lock);
+       mutex_lock(&spec->bind_lock);
        spec->use_jack_detect = !codec->jackpoll_interval;
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2275,7 +2278,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
                        snd_hda_jack_detect_enable_callback(codec, pin_nid,
                                                            jack_callback);
        }
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&spec->bind_lock);
        return 0;
 }
 
@@ -2382,6 +2385,7 @@ static int alloc_generic_hdmi(struct hda_codec *codec)
        spec->ops = generic_standard_hdmi_ops;
        spec->dev_num = 1;      /* initialize to 1 */
        mutex_init(&spec->pcm_lock);
+       mutex_init(&spec->bind_lock);
        snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
 
        spec->chmap.ops.get_chmap = hdmi_get_chmap;
@@ -2451,7 +2455,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
        int i;
 
        spec = container_of(acomp->audio_ops, struct hdmi_spec, drm_audio_ops);
-       mutex_lock(&spec->pcm_lock);
+       mutex_lock(&spec->bind_lock);
        spec->use_acomp_notifier = use_acomp;
        spec->codec->relaxed_resume = use_acomp;
        /* reprogram each jack detection logic depending on the notifier */
@@ -2461,7 +2465,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
                                              get_pin(spec, i)->pin_nid,
                                              use_acomp);
        }
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&spec->bind_lock);
 }
 
 /* enable / disable the notifier via master bind / unbind */
@@ -2849,6 +2853,18 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
        return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
 }
 
+static int patch_i915_tgl_hdmi(struct hda_codec *codec)
+{
+       /*
+        * pin to port mapping table where the value indicate the pin number and
+        * the index indicate the port number with 1 base.
+        */
+       static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+
+       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
+}
+
+
 /* Intel Baytrail and Braswell; with eld notifier */
 static int patch_i915_byt_hdmi(struct hda_codec *codec)
 {
@@ -4151,6 +4167,7 @@ HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",      patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",    patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index ce4f116..80f66ba 100644 (file)
@@ -393,6 +393,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -408,6 +409,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0672:
                alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
                break;
+       case 0x10ec0623:
+               alc_update_coef_idx(codec, 0x19, 1<<13, 0);
+               break;
        case 0x10ec0668:
                alc_update_coef_idx(codec, 0x7, 3<<13, 0);
                break;
@@ -2919,6 +2923,7 @@ enum {
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
        ALC269_TYPE_ALC300,
+       ALC269_TYPE_ALC623,
        ALC269_TYPE_ALC700,
 };
 
@@ -2954,6 +2959,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
        case ALC269_TYPE_ALC300:
+       case ALC269_TYPE_ALC623:
        case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
@@ -7215,6 +7221,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8016,9 +8024,13 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC300;
                spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
                break;
+       case 0x10ec0623:
+               spec->codec_variant = ALC269_TYPE_ALC623;
+               break;
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -9216,6 +9228,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269),
        HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
        HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
        HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
@@ -9233,6 +9246,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index 91242b6..4570f66 100644 (file)
@@ -410,8 +410,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component)
                return;
        }
 
-       snd_hdac_ext_bus_link_put(hdev->bus, hlink);
        pm_runtime_disable(&hdev->dev);
+       snd_hdac_ext_bus_link_put(hdev->bus, hlink);
 }
 
 static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
index b5fd8f0..f8b5b96 100644 (file)
@@ -274,7 +274,7 @@ struct hdmi_codec_priv {
        uint8_t eld[MAX_ELD_BYTES];
        struct snd_pcm_chmap *chmap_info;
        unsigned int chmap_idx;
-       struct mutex lock;
+       unsigned long busy;
        struct snd_soc_jack *jack;
        unsigned int jack_status;
 };
@@ -390,8 +390,8 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
        struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
        int ret = 0;
 
-       ret = mutex_trylock(&hcp->lock);
-       if (!ret) {
+       ret = test_and_set_bit(0, &hcp->busy);
+       if (ret) {
                dev_err(dai->dev, "Only one simultaneous stream supported!\n");
                return -EINVAL;
        }
@@ -419,7 +419,7 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
 
 err:
        /* Release the exclusive lock on error */
-       mutex_unlock(&hcp->lock);
+       clear_bit(0, &hcp->busy);
        return ret;
 }
 
@@ -431,7 +431,7 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
        hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
        hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
 
-       mutex_unlock(&hcp->lock);
+       clear_bit(0, &hcp->busy);
 }
 
 static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
@@ -811,8 +811,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        hcp->hcd = *hcd;
-       mutex_init(&hcp->lock);
-
        daidrv = devm_kcalloc(dev, dai_count, sizeof(*daidrv), GFP_KERNEL);
        if (!daidrv)
                return -ENOMEM;
index e609abc..cae1def 100644 (file)
@@ -901,16 +901,20 @@ static void max98373_slot_config(struct i2c_client *i2c,
                max98373->i_slot = value & 0xF;
        else
                max98373->i_slot = 1;
-
-       max98373->reset_gpio = of_get_named_gpio(dev->of_node,
+       if (dev->of_node) {
+               max98373->reset_gpio = of_get_named_gpio(dev->of_node,
                                                "maxim,reset-gpio", 0);
-       if (!gpio_is_valid(max98373->reset_gpio)) {
-               dev_err(dev, "Looking up %s property in node %s failed %d\n",
-                       "maxim,reset-gpio", dev->of_node->full_name,
-                       max98373->reset_gpio);
+               if (!gpio_is_valid(max98373->reset_gpio)) {
+                       dev_err(dev, "Looking up %s property in node %s failed %d\n",
+                               "maxim,reset-gpio", dev->of_node->full_name,
+                               max98373->reset_gpio);
+               } else {
+                       dev_dbg(dev, "maxim,reset-gpio=%d",
+                               max98373->reset_gpio);
+               }
        } else {
-               dev_dbg(dev, "maxim,reset-gpio=%d",
-                       max98373->reset_gpio);
+               /* this makes reset_gpio as invalid */
+               max98373->reset_gpio = -1;
        }
 
        if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
@@ -956,11 +960,11 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
 
        /* Power on device */
        if (gpio_is_valid(max98373->reset_gpio)) {
-               ret = gpio_request(max98373->reset_gpio, "MAX98373_RESET");
+               ret = devm_gpio_request(&i2c->dev, max98373->reset_gpio,
+                                       "MAX98373_RESET");
                if (ret) {
                        dev_err(&i2c->dev, "%s: Failed to request gpio %d\n",
                                __func__, max98373->reset_gpio);
-                       gpio_free(max98373->reset_gpio);
                        return -EINVAL;
                }
                gpio_direction_output(max98373->reset_gpio, 0);
index 667e9f7..e3d311f 100644 (file)
@@ -306,7 +306,7 @@ struct pm8916_wcd_analog_priv {
 };
 
 static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
-static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" };
+static const char *const rdac2_mux_text[] = { "RX1", "RX2" };
 static const char *const hph_text[] = { "ZERO", "Switch", };
 
 static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
@@ -321,7 +321,7 @@ static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT(
 
 /* RDAC2 MUX */
 static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE(
-                       CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text);
+                       CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 2, rdac2_mux_text);
 
 static const struct snd_kcontrol_new spkr_switch[] = {
        SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0)
index 9fa5d44..58b2468 100644 (file)
@@ -243,6 +243,10 @@ static const char *const rx_mix1_text[] = {
        "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
 };
 
+static const char * const rx_mix2_text[] = {
+       "ZERO", "IIR1", "IIR2"
+};
+
 static const char *const dec_mux_text[] = {
        "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
 };
@@ -270,6 +274,16 @@ static const struct soc_enum rx3_mix1_inp_enum[] = {
        SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
 };
 
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+       SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B3_CTL,
+               0, 3, rx_mix2_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+       SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B3_CTL,
+               0, 3, rx_mix2_text);
+
 /* DEC */
 static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
                                LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
@@ -309,6 +323,10 @@ static const struct snd_kcontrol_new rx3_mix1_inp2_mux = SOC_DAPM_ENUM(
                                "RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
 static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
                                "RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
+                               "RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
+                               "RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
 
 /* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
 static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
@@ -740,6 +758,10 @@ static const struct snd_soc_dapm_widget msm8916_wcd_digital_dapm_widgets[] = {
                         &rx3_mix1_inp2_mux),
        SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
                         &rx3_mix1_inp3_mux),
+       SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+                        &rx1_mix2_inp1_mux),
+       SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+                        &rx2_mix2_inp1_mux),
 
        SND_SOC_DAPM_MUX("CIC1 MUX", SND_SOC_NOPM, 0, 0, &cic1_mux),
        SND_SOC_DAPM_MUX("CIC2 MUX", SND_SOC_NOPM, 0, 0, &cic2_mux),
index 762595d..c506c93 100644 (file)
@@ -1770,6 +1770,9 @@ static int rt5651_detect_headset(struct snd_soc_component *component)
 
 static bool rt5651_support_button_press(struct rt5651_priv *rt5651)
 {
+       if (!rt5651->hp_jack)
+               return false;
+
        /* Button press support only works with internal jack-detection */
        return (rt5651->hp_jack->status & SND_JACK_MICROPHONE) &&
                rt5651->gpiod_hp_det == NULL;
index 1ef4707..c50b75c 100644 (file)
@@ -995,6 +995,16 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
 {
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
 
+       rt5682->hs_jack = hs_jack;
+
+       if (!hs_jack) {
+               regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+                                  RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+               regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+                                  RT5682_POW_JDH | RT5682_POW_JDL, 0);
+               return 0;
+       }
+
        switch (rt5682->pdata.jd_src) {
        case RT5682_JD1:
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
@@ -1032,8 +1042,6 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
                break;
        }
 
-       rt5682->hs_jack = hs_jack;
-
        return 0;
 }
 
index c3d06e8..d5fb7f5 100644 (file)
@@ -533,13 +533,10 @@ static SOC_ENUM_SINGLE_DECL(dac_osr,
 static SOC_ENUM_SINGLE_DECL(adc_osr,
                            WM8994_OVERSAMPLING, 1, osr_text);
 
-static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+static const struct snd_kcontrol_new wm8994_common_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
                 WM8994_AIF1_ADC1_RIGHT_VOLUME,
                 1, 119, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
-                WM8994_AIF1_ADC2_RIGHT_VOLUME,
-                1, 119, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
                 WM8994_AIF2_ADC_RIGHT_VOLUME,
                 1, 119, 0, digital_tlv),
@@ -556,8 +553,6 @@ SOC_ENUM("AIF2DACR Source", aif2dacr_src),
 
 SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
                 WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
-                WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8994_AIF2_DAC_LEFT_VOLUME,
                 WM8994_AIF2_DAC_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 
@@ -565,17 +560,12 @@ SOC_SINGLE_TLV("AIF1 Boost Volume", WM8994_AIF1_CONTROL_2, 10, 3, 0, aif_tlv),
 SOC_SINGLE_TLV("AIF2 Boost Volume", WM8994_AIF2_CONTROL_2, 10, 3, 0, aif_tlv),
 
 SOC_SINGLE("AIF1DAC1 EQ Switch", WM8994_AIF1_DAC1_EQ_GAINS_1, 0, 1, 0),
-SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
 SOC_SINGLE("AIF2 EQ Switch", WM8994_AIF2_EQ_GAINS_1, 0, 1, 0),
 
 WM8994_DRC_SWITCH("AIF1DAC1 DRC Switch", WM8994_AIF1_DRC1_1, 2),
 WM8994_DRC_SWITCH("AIF1ADC1L DRC Switch", WM8994_AIF1_DRC1_1, 1),
 WM8994_DRC_SWITCH("AIF1ADC1R DRC Switch", WM8994_AIF1_DRC1_1, 0),
 
-WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
-WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
-WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
-
 WM8994_DRC_SWITCH("AIF2DAC DRC Switch", WM8994_AIF2_DRC_1, 2),
 WM8994_DRC_SWITCH("AIF2ADCL DRC Switch", WM8994_AIF2_DRC_1, 1),
 WM8994_DRC_SWITCH("AIF2ADCR DRC Switch", WM8994_AIF2_DRC_1, 0),
@@ -594,9 +584,6 @@ SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0),
 SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
 SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
 
-SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
-SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
-
 SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
 SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
 
@@ -637,6 +624,24 @@ SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
           8, 1, 0),
 };
 
+/* Controls not available on WM1811 */
+static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
+                WM8994_AIF1_ADC2_RIGHT_VOLUME,
+                1, 119, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
+                WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+
+SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
+
+WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
+WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
+WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+};
+
 static const struct snd_kcontrol_new wm8994_eq_controls[] = {
 SOC_SINGLE_TLV("AIF1DAC1 EQ1 Volume", WM8994_AIF1_DAC1_EQ_GAINS_1, 11, 31, 0,
               eq_tlv),
@@ -4258,13 +4263,15 @@ static int wm8994_component_probe(struct snd_soc_component *component)
        wm8994_handle_pdata(wm8994);
 
        wm_hubs_add_analogue_controls(component);
-       snd_soc_add_component_controls(component, wm8994_snd_controls,
-                            ARRAY_SIZE(wm8994_snd_controls));
+       snd_soc_add_component_controls(component, wm8994_common_snd_controls,
+                                      ARRAY_SIZE(wm8994_common_snd_controls));
        snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
                                  ARRAY_SIZE(wm8994_dapm_widgets));
 
        switch (control->type) {
        case WM8994:
+               snd_soc_add_component_controls(component, wm8994_snd_controls,
+                                              ARRAY_SIZE(wm8994_snd_controls));
                snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
                                          ARRAY_SIZE(wm8994_specific_dapm_widgets));
                if (control->revision < 4) {
@@ -4284,8 +4291,10 @@ static int wm8994_component_probe(struct snd_soc_component *component)
                }
                break;
        case WM8958:
+               snd_soc_add_component_controls(component, wm8994_snd_controls,
+                                              ARRAY_SIZE(wm8994_snd_controls));
                snd_soc_add_component_controls(component, wm8958_snd_controls,
-                                    ARRAY_SIZE(wm8958_snd_controls));
+                                              ARRAY_SIZE(wm8958_snd_controls));
                snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
                                          ARRAY_SIZE(wm8958_dapm_widgets));
                if (control->revision < 1) {
index ae28d99..9b8bb7b 100644 (file)
@@ -1259,8 +1259,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len)
        }
 
        if (in) {
-               if (in & WMFW_CTL_FLAG_READABLE)
-                       out |= rd;
+               out |= rd;
                if (in & WMFW_CTL_FLAG_WRITEABLE)
                        out |= wr;
                if (in & WMFW_CTL_FLAG_VOLATILE)
@@ -3697,11 +3696,16 @@ static int wm_adsp_buffer_parse_legacy(struct wm_adsp *dsp)
        u32 xmalg, addr, magic;
        int i, ret;
 
+       alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
+       if (!alg_region) {
+               adsp_err(dsp, "No algorithm region found\n");
+               return -EINVAL;
+       }
+
        buf = wm_adsp_buffer_alloc(dsp);
        if (!buf)
                return -ENOMEM;
 
-       alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
        xmalg = dsp->ops->sys_config_size / sizeof(__be32);
 
        addr = alg_region->base + xmalg + ALG_XM_FIELD(magic);
index a437567..4f6e58c 100644 (file)
@@ -308,6 +308,9 @@ static const struct snd_soc_dapm_widget sof_widgets[] = {
        SND_SOC_DAPM_HP("Headphone Jack", NULL),
        SND_SOC_DAPM_MIC("Headset Mic", NULL),
        SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
        SND_SOC_DAPM_MIC("SoC DMIC", NULL),
 };
 
@@ -318,10 +321,6 @@ static const struct snd_soc_dapm_route sof_map[] = {
 
        /* other jacks */
        { "IN1P", NULL, "Headset Mic" },
-
-       /* digital mics */
-       {"DMic", NULL, "SoC DMIC"},
-
 };
 
 static const struct snd_soc_dapm_route speaker_map[] = {
@@ -329,6 +328,11 @@ static const struct snd_soc_dapm_route speaker_map[] = {
        { "Spk", NULL, "Speaker" },
 };
 
+static const struct snd_soc_dapm_route dmic_map[] = {
+       /* digital mics */
+       {"DMic", NULL, "SoC DMIC"},
+};
+
 static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_card *card = rtd->card;
@@ -342,6 +346,28 @@ static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+       struct snd_soc_card *card = rtd->card;
+       int ret;
+
+       ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+                                       ARRAY_SIZE(dmic_widgets));
+       if (ret) {
+               dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+               /* Don't need to add routes if widget addition failed */
+               return ret;
+       }
+
+       ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+                                     ARRAY_SIZE(dmic_map));
+
+       if (ret)
+               dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+       return ret;
+}
+
 /* sof audio machine driver for rt5682 codec */
 static struct snd_soc_card sof_audio_card_rt5682 = {
        .name = "sof_rt5682",
@@ -445,6 +471,7 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
                links[id].name = "dmic01";
                links[id].cpus = &cpus[id];
                links[id].cpus->dai_name = "DMIC01 Pin";
+               links[id].init = dmic_init;
                if (dmic_be_num > 1) {
                        /* set up 2 BE links at most */
                        links[id + 1].name = "dmic16k";
@@ -576,6 +603,15 @@ static int sof_audio_probe(struct platform_device *pdev)
        /* need to get main clock from pmc */
        if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
                ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+               if (IS_ERR(ctx->mclk)) {
+                       ret = PTR_ERR(ctx->mclk);
+
+                       dev_err(&pdev->dev,
+                               "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+                               ret);
+                       return ret;
+               }
+
                ret = clk_prepare_enable(ctx->mclk);
                if (ret < 0) {
                        dev_err(&pdev->dev,
@@ -621,8 +657,24 @@ static int sof_audio_probe(struct platform_device *pdev)
                                          &sof_audio_card_rt5682);
 }
 
+static int sof_rt5682_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct snd_soc_component *component = NULL;
+
+       for_each_card_components(card, component) {
+               if (!strcmp(component->name, rt5682_component[0].name)) {
+                       snd_soc_component_set_jack(component, NULL, NULL);
+                       break;
+               }
+       }
+
+       return 0;
+}
+
 static struct platform_driver sof_audio = {
        .probe = sof_audio_probe,
+       .remove = sof_rt5682_remove,
        .driver = {
                .name = "sof_rt5682",
                .pm = &snd_soc_pm_ops,
index 61226fe..2a4ffe9 100644 (file)
@@ -555,10 +555,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
                return PTR_ERR(priv->clk);
        }
 
-       err = clk_prepare_enable(priv->clk);
-       if (err < 0)
-               return err;
-
        priv->extclk = devm_clk_get(&pdev->dev, "extclk");
        if (IS_ERR(priv->extclk)) {
                if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
@@ -574,6 +570,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
                }
        }
 
+       err = clk_prepare_enable(priv->clk);
+       if (err < 0)
+               return err;
+
        /* Some sensible defaults - this reflects the powerup values */
        priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24;
        priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
@@ -587,7 +587,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
                priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_128;
        }
 
-       err = devm_snd_soc_register_component(&pdev->dev, &kirkwood_soc_component,
+       err = snd_soc_register_component(&pdev->dev, &kirkwood_soc_component,
                                         soc_dai, 2);
        if (err) {
                dev_err(&pdev->dev, "snd_soc_register_component failed\n");
@@ -610,6 +610,7 @@ static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
 {
        struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
 
+       snd_soc_unregister_component(&pdev->dev);
        if (!IS_ERR(priv->extclk))
                clk_disable_unprepare(priv->extclk);
        clk_disable_unprepare(priv->clk);
index af2d5a6..61c984f 100644 (file)
@@ -677,7 +677,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        ret = rockchip_pcm_platform_register(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Could not register PCM\n");
-               return ret;
+               goto err_suspend;
        }
 
        return 0;
index 0097df1..e80b091 100644 (file)
@@ -66,10 +66,13 @@ static int rk_jack_event(struct notifier_block *nb, unsigned long event,
        struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
        struct snd_soc_dapm_context *dapm = &jack->card->dapm;
 
-       if (event & SND_JACK_MICROPHONE)
+       if (event & SND_JACK_MICROPHONE) {
                snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
-       else
+               snd_soc_dapm_force_enable_pin(dapm, "SHDN");
+       } else {
                snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+               snd_soc_dapm_disable_pin(dapm, "SHDN");
+       }
 
        snd_soc_dapm_sync(dapm);
 
index c213913..fd8c664 100644 (file)
@@ -5,6 +5,7 @@
 //  Author: Claude <claude@insginal.co.kr>
 
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 
@@ -74,6 +75,17 @@ static struct snd_soc_card arndale_rt5631 = {
        .num_links = ARRAY_SIZE(arndale_rt5631_dai),
 };
 
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+       struct snd_soc_dai_link *dai_link;
+       int i;
+
+       for_each_card_prelinks(card, i, dai_link) {
+               of_node_put(dai_link->cpus->of_node);
+               of_node_put(dai_link->codecs->of_node);
+       }
+}
+
 static int arndale_audio_probe(struct platform_device *pdev)
 {
        int n, ret;
@@ -103,18 +115,31 @@ static int arndale_audio_probe(struct platform_device *pdev)
                if (!arndale_rt5631_dai[0].codecs->of_node) {
                        dev_err(&pdev->dev,
                        "Property 'samsung,audio-codec' missing or invalid\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_put_of_nodes;
                }
        }
 
        ret = devm_snd_soc_register_card(card->dev, card);
+       if (ret) {
+               dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+               goto err_put_of_nodes;
+       }
+       return 0;
 
-       if (ret)
-               dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
-
+err_put_of_nodes:
+       arndale_put_of_nodes(card);
        return ret;
 }
 
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+       arndale_put_of_nodes(card);
+       return 0;
+}
+
 static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
        { .compatible = "samsung,arndale-rt5631", },
        { .compatible = "samsung,arndale-alc5631", },
@@ -129,6 +154,7 @@ static struct platform_driver arndale_audio_driver = {
                .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
        },
        .probe = arndale_audio_probe,
+       .remove = arndale_audio_remove,
 };
 
 module_platform_driver(arndale_audio_driver);
index bda5b95..e9596c2 100644 (file)
@@ -761,6 +761,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        /* set format */
+       rdai->bit_clk_inv = 0;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                rdai->sys_delay = 0;
index 0324a5c..28f65eb 100644 (file)
@@ -508,10 +508,10 @@ static struct rsnd_mod_ops rsnd_dmapp_ops = {
 #define RDMA_SSI_I_N(addr, i)  (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
 #define RDMA_SSI_O_N(addr, i)  (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
 
-#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
 #define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
 
-#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
 #define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
 
 #define RDMA_SRC_I_N(addr, i)  (addr ##_reg - 0x00500000 + (0x400 * i))
index e163dde..b600d3e 100644 (file)
@@ -1070,7 +1070,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                        return ret;
        }
 
-       snd_soc_dai_trigger(cpu_dai, substream, cmd);
+       ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
        if (ret < 0)
                return ret;
 
@@ -1097,7 +1097,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
                        return ret;
        }
 
-       snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
+       ret = snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
        if (ret < 0)
                return ret;
 
@@ -1146,6 +1146,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        unsigned long flags;
+       char *name;
 
        /* only add new dpcms */
        for_each_dpcm_be(fe, stream, dpcm) {
@@ -1171,9 +1172,15 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                        stream ? "<-" : "->", be->dai_link->name);
 
 #ifdef CONFIG_DEBUG_FS
-       dpcm->debugfs_state = debugfs_create_dir(be->dai_link->name,
-                                                fe->debugfs_dpcm_root);
-       debugfs_create_u32("state", 0644, dpcm->debugfs_state, &dpcm->state);
+       name = kasprintf(GFP_KERNEL, "%s:%s", be->dai_link->name,
+                        stream ? "capture" : "playback");
+       if (name) {
+               dpcm->debugfs_state = debugfs_create_dir(name,
+                                                        fe->debugfs_dpcm_root);
+               debugfs_create_u32("state", 0644, dpcm->debugfs_state,
+                                  &dpcm->state);
+               kfree(name);
+       }
 #endif
        return 1;
 }
index aa9a1fc..0fd0329 100644 (file)
@@ -1582,7 +1582,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 
        /* map user to kernel widget ID */
        template.id = get_widget_id(le32_to_cpu(w->id));
-       if (template.id < 0)
+       if ((int)template.id < 0)
                return template.id;
 
        /* strings are allocated here, but used and freed by the widget */
index a4983f9..2b8711e 100644 (file)
@@ -60,13 +60,16 @@ int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value =
-                       mixer_to_ipc(ucontrol->value.integer.value[i],
+               value = mixer_to_ipc(ucontrol->value.integer.value[i],
                                     scontrol->volume_table, sm->max + 1);
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of mixer updates */
@@ -76,8 +79,7 @@ int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_TYPE_VALUE_CHAN_GET,
                                              SOF_CTRL_CMD_VOLUME,
                                              true);
-
-       return 0;
+       return change;
 }
 
 int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
@@ -105,11 +107,15 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value = ucontrol->value.integer.value[i];
+               value = ucontrol->value.integer.value[i];
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of mixer updates */
@@ -120,7 +126,7 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_CMD_SWITCH,
                                              true);
 
-       return 0;
+       return change;
 }
 
 int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
@@ -148,11 +154,15 @@ int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value = ucontrol->value.enumerated.item[i];
+               value = ucontrol->value.enumerated.item[i];
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of enum updates */
@@ -163,7 +173,7 @@ int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_CMD_ENUM,
                                              true);
 
-       return 0;
+       return change;
 }
 
 int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
index 54cd431..5529e8e 100644 (file)
@@ -152,8 +152,10 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
         */
        dentry = file->f_path.dentry;
        if (strcmp(dentry->d_name.name, "ipc_flood_count") &&
-           strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
-               return -EINVAL;
+           strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
                flood_duration_test = true;
index 479ba24..d62f51d 100644 (file)
@@ -273,6 +273,16 @@ config SND_SOC_SOF_HDA_AUDIO_CODEC
          Say Y if you want to enable HDAudio codecs with SOF.
          If unsure select "N".
 
+config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
+       bool "SOF enable DMI Link L1"
+       help
+         This option enables DMI L1 for both playback and capture
+         and disables known workarounds for specific HDaudio platforms.
+         Only use to look into power optimizations on platforms not
+         affected by DMI L1 issues. This option is not recommended.
+         Say Y if you want to enable DMI Link L1
+         If unsure, select "N".
+
 endif ## SND_SOC_SOF_HDA_COMMON
 
 config SND_SOC_SOF_HDA_LINK_BASELINE
index e282179..80e2826 100644 (file)
@@ -37,6 +37,7 @@
 #define MBOX_SIZE       0x1000
 #define MBOX_DUMP_SIZE 0x30
 #define EXCEPT_OFFSET  0x800
+#define EXCEPT_MAX_HDR_SIZE    0x400
 
 /* DSP peripherals */
 #define DMAC0_OFFSET    0xFE000
@@ -228,6 +229,11 @@ static void bdw_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
@@ -451,6 +457,7 @@ static int bdw_probe(struct snd_sof_dev *sdev)
        /* TODO: add offsets */
        sdev->mmio_bar = BDW_DSP_BAR;
        sdev->mailbox_bar = BDW_DSP_BAR;
+       sdev->dsp_oops_offset = MBOX_OFFSET;
 
        /* PCI base */
        mmio = platform_get_resource(pdev, IORESOURCE_MEM,
index 5e7a6aa..a1e514f 100644 (file)
@@ -28,6 +28,7 @@
 #define MBOX_OFFSET            0x144000
 #define MBOX_SIZE              0x1000
 #define EXCEPT_OFFSET          0x800
+#define EXCEPT_MAX_HDR_SIZE    0x400
 
 /* DSP peripherals */
 #define DMAC0_OFFSET           0x098000
@@ -126,6 +127,11 @@ static void byt_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
index bc41028..df1909e 100644 (file)
@@ -139,20 +139,16 @@ void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
  */
 int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
 {
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       struct hdac_bus *bus = sof_to_bus(sdev);
-#endif
        u32 val;
 
        /* enable/disable audio dsp clock gating */
        val = enable ? PCI_CGCTL_ADSPDCGE : 0;
        snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       /* enable/disable L1 support */
-       val = enable ? SOF_HDA_VS_EM2_L1SEN : 0;
-       snd_hdac_chip_updatel(bus, VS_EM2, SOF_HDA_VS_EM2_L1SEN, val);
-#endif
+       /* enable/disable DMI Link L1 support */
+       val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+                               HDA_VS_INTEL_EM2_L1SEN, val);
 
        /* enable/disable audio dsp power gating */
        val = enable ? 0 : PCI_PGCTL_ADSPPGD;
index 6427f0b..65c2af3 100644 (file)
@@ -44,6 +44,7 @@ static int cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
                return -ENODEV;
        }
        hstream = &dsp_stream->hstream;
+       hstream->substream = NULL;
 
        /* allocate DMA buffer */
        ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
index ad8d41f..0c11fce 100644 (file)
@@ -185,6 +185,17 @@ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
                        direction == SNDRV_PCM_STREAM_PLAYBACK ?
                        "playback" : "capture");
 
+       /*
+        * Disable DMI Link L1 entry when capture stream is opened.
+        * Workaround to address a known issue with host DMA that results
+        * in xruns during pause/release in capture scenarios.
+        */
+       if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+               if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
+                       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                               HDA_VS_INTEL_EM2,
+                                               HDA_VS_INTEL_EM2_L1SEN, 0);
+
        return stream;
 }
 
@@ -193,23 +204,43 @@ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
 {
        struct hdac_bus *bus = sof_to_bus(sdev);
        struct hdac_stream *s;
+       bool active_capture_stream = false;
+       bool found = false;
 
        spin_lock_irq(&bus->reg_lock);
 
-       /* find used stream */
+       /*
+        * close stream matching the stream tag
+        * and check if there are any open capture streams.
+        */
        list_for_each_entry(s, &bus->stream_list, list) {
-               if (s->direction == direction &&
-                   s->opened && s->stream_tag == stream_tag) {
+               if (!s->opened)
+                       continue;
+
+               if (s->direction == direction && s->stream_tag == stream_tag) {
                        s->opened = false;
-                       spin_unlock_irq(&bus->reg_lock);
-                       return 0;
+                       found = true;
+               } else if (s->direction == SNDRV_PCM_STREAM_CAPTURE) {
+                       active_capture_stream = true;
                }
        }
 
        spin_unlock_irq(&bus->reg_lock);
 
-       dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
-       return -ENODEV;
+       /* Enable DMI L1 entry if there are no capture streams open */
+       if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+               if (!active_capture_stream)
+                       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                               HDA_VS_INTEL_EM2,
+                                               HDA_VS_INTEL_EM2_L1SEN,
+                                               HDA_VS_INTEL_EM2_L1SEN);
+
+       if (!found) {
+               dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+               return -ENODEV;
+       }
+
+       return 0;
 }
 
 int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
index c72e9a0..06e8467 100644 (file)
@@ -35,6 +35,8 @@
 #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
 #define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
 
+#define EXCEPT_MAX_HDR_SIZE    0x400
+
 /*
  * Debug
  */
@@ -131,6 +133,11 @@ static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_block_read(sdev, sdev->mmio_bar, offset,
                       panic_info, sizeof(*panic_info));
index 5591841..23e430d 100644 (file)
@@ -39,7 +39,6 @@
 #define SOF_HDA_WAKESTS                        0x0E
 #define SOF_HDA_WAKESTS_INT_MASK       ((1 << 8) - 1)
 #define SOF_HDA_RIRBSTS                        0x5d
-#define SOF_HDA_VS_EM2_L1SEN            BIT(13)
 
 /* SOF_HDA_GCTL register bist */
 #define SOF_HDA_GCTL_RESET             BIT(0)
 #define HDA_DSP_REG_HIPCIE             (HDA_DSP_IPC_BASE + 0x0C)
 #define HDA_DSP_REG_HIPCCTL            (HDA_DSP_IPC_BASE + 0x10)
 
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2               0x1030
+#define HDA_VS_INTEL_EM2_L1SEN         BIT(13)
+
 /*  HIPCI */
 #define HDA_DSP_REG_HIPCI_BUSY         BIT(31)
 #define HDA_DSP_REG_HIPCI_MSG_MASK     0x7FFFFFFF
index b2f359d..086eeea 100644 (file)
@@ -572,8 +572,10 @@ static int sof_set_get_large_ctrl_data(struct snd_sof_dev *sdev,
        else
                err = sof_get_ctrl_copy_params(cdata->type, partdata, cdata,
                                               sparams);
-       if (err < 0)
+       if (err < 0) {
+               kfree(partdata);
                return err;
+       }
 
        msg_bytes = sparams->msg_bytes;
        pl_size = sparams->pl_size;
index d7f3274..9a9a381 100644 (file)
@@ -546,10 +546,10 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev)
                                 msecs_to_jiffies(sdev->boot_timeout));
        if (ret == 0) {
                dev_err(sdev->dev, "error: firmware boot failure\n");
-               /* after this point FW_READY msg should be ignored */
-               sdev->boot_complete = true;
                snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
                        SOF_DBG_TEXT | SOF_DBG_PCI);
+               /* after this point FW_READY msg should be ignored */
+               sdev->boot_complete = true;
                return -EIO;
        }
 
index e3f6a6d..2b876d4 100644 (file)
@@ -244,7 +244,7 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
                snd_soc_rtdcom_lookup(rtd, DRV_NAME);
        struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
        struct snd_sof_pcm *spcm;
-       int ret;
+       int ret, err = 0;
 
        /* nothing to do for BE */
        if (rtd->dai_link->no_pcm)
@@ -254,26 +254,26 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
        if (!spcm)
                return -EINVAL;
 
-       if (!spcm->prepared[substream->stream])
-               return 0;
-
        dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
                substream->stream);
 
-       ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+       if (spcm->prepared[substream->stream]) {
+               ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+               if (ret < 0)
+                       err = ret;
+       }
 
        snd_pcm_lib_free_pages(substream);
 
        cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
 
-       if (ret < 0)
-               return ret;
-
        ret = snd_sof_pcm_platform_hw_free(sdev, substream);
-       if (ret < 0)
+       if (ret < 0) {
                dev_err(sdev->dev, "error: platform hw free failed\n");
+               err = ret;
+       }
 
-       return ret;
+       return err;
 }
 
 static int sof_pcm_prepare(struct snd_pcm_substream *substream)
@@ -323,6 +323,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        struct sof_ipc_stream stream;
        struct sof_ipc_reply reply;
        bool reset_hw_params = false;
+       bool ipc_first = false;
        int ret;
 
        /* nothing to do for BE */
@@ -343,6 +344,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+               ipc_first = true;
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
@@ -363,6 +365,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+               ipc_first = true;
                reset_hw_params = true;
                break;
        default:
@@ -370,12 +373,22 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                return -EINVAL;
        }
 
-       snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+       /*
+        * DMA and IPC sequence is different for start and stop. Need to send
+        * STOP IPC before stop DMA
+        */
+       if (!ipc_first)
+               snd_sof_pcm_platform_trigger(sdev, substream, cmd);
 
        /* send IPC to the DSP */
        ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
                                 sizeof(stream), &reply, sizeof(reply));
 
+       /* need to STOP DMA even if STOP IPC failed */
+       if (ipc_first)
+               snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+       /* free PCM if reset_hw_params is set and the STOP IPC is successful */
        if (!ret && reset_hw_params)
                ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
 
index fc85efb..4452594 100644 (file)
@@ -543,15 +543,16 @@ static int sof_control_load_bytes(struct snd_soc_component *scomp,
        struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
        int max_size = sbe->max;
 
-       if (le32_to_cpu(control->priv.size) > max_size) {
+       /* init the get/put bytes data */
+       scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+               le32_to_cpu(control->priv.size);
+
+       if (scontrol->size > max_size) {
                dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
-                       control->priv.size, max_size);
+                       scontrol->size, max_size);
                return -EINVAL;
        }
 
-       /* init the get/put bytes data */
-       scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
-               le32_to_cpu(control->priv.size);
        scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
        cdata = scontrol->control_data;
        if (!scontrol->control_data)
@@ -920,7 +921,9 @@ static void sof_parse_word_tokens(struct snd_soc_component *scomp,
                for (j = 0; j < count; j++) {
                        /* match token type */
                        if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
-                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT))
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
                                continue;
 
                        /* match token id */
index d7501f8..48e629a 100644 (file)
@@ -505,10 +505,20 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
                ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
                                         SAI_XCR1_NODIV,
-                                        (unsigned int)~SAI_XCR1_NODIV);
+                                        freq ? 0 : SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
 
+               /* Assume shutdown if requested frequency is 0Hz */
+               if (!freq) {
+                       /* Release mclk rate only if rate was actually set */
+                       if (sai->mclk_rate) {
+                               clk_rate_exclusive_put(sai->sai_mclk);
+                               sai->mclk_rate = 0;
+                       }
+                       return 0;
+               }
+
                /* If master clock is used, set parent clock now */
                ret = stm32_sai_set_parent_clock(sai, freq);
                if (ret)
@@ -1093,15 +1103,6 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
 
        regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
-       regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
-                          SAI_XCR1_NODIV);
-
-       /* Release mclk rate only if rate was actually set */
-       if (sai->mclk_rate) {
-               clk_rate_exclusive_put(sai->sai_mclk);
-               sai->mclk_rate = 0;
-       }
-
        clk_disable_unprepare(sai->sai_ck);
 
        spin_lock_irqsave(&sai->irq_lock, flags);
@@ -1217,6 +1218,16 @@ static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
        return 0;
 }
 
+/* No support of mmap in S/PDIF mode */
+static const struct snd_pcm_hardware stm32_sai_pcm_hw_spdif = {
+       .info = SNDRV_PCM_INFO_INTERLEAVED,
+       .buffer_bytes_max = 8 * PAGE_SIZE,
+       .period_bytes_min = 1024,
+       .period_bytes_max = PAGE_SIZE,
+       .periods_min = 2,
+       .periods_max = 8,
+};
+
 static const struct snd_pcm_hardware stm32_sai_pcm_hw = {
        .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP,
        .buffer_bytes_max = 8 * PAGE_SIZE,
@@ -1269,7 +1280,7 @@ static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config = {
 };
 
 static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config_spdif = {
-       .pcm_hardware = &stm32_sai_pcm_hw,
+       .pcm_hardware = &stm32_sai_pcm_hw_spdif,
        .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
        .process = stm32_sai_pcm_process_spdif,
 };
index a236350..2b0bc23 100644 (file)
@@ -62,7 +62,7 @@ int sdma_pcm_platform_register(struct device *dev,
        config->chan_names[0] = txdmachan;
        config->chan_names[1] = rxdmachan;
 
-       return devm_snd_dmaengine_pcm_register(dev, config, 0);
+       return devm_snd_dmaengine_pcm_register(dev, config, flags);
 }
 EXPORT_SYMBOL_GPL(sdma_pcm_platform_register);
 
index a2ab8e8..4a9a2f6 100644 (file)
@@ -388,6 +388,9 @@ static void snd_complete_urb(struct urb *urb)
                }
 
                prepare_outbound_urb(ep, ctx);
+               /* can be stopped during prepare callback */
+               if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+                       goto exit_clear;
        } else {
                retire_inbound_urb(ep, ctx);
                /* can be stopped during retire callback */
index 3fd1d17..45eee5c 100644 (file)
@@ -1229,7 +1229,8 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
                if (cval->min + cval->res < cval->max) {
                        int last_valid_res = cval->res;
                        int saved, test, check;
-                       get_cur_mix_raw(cval, minchn, &saved);
+                       if (get_cur_mix_raw(cval, minchn, &saved) < 0)
+                               goto no_res_check;
                        for (;;) {
                                test = saved;
                                if (test < cval->max)
@@ -1249,6 +1250,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
                        snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
                }
 
+no_res_check:
                cval->initialized = 1;
        }
 
index fbfde99..349e1e5 100644 (file)
@@ -248,8 +248,8 @@ static int create_yamaha_midi_quirk(struct snd_usb_audio *chip,
                                        NULL, USB_MS_MIDI_OUT_JACK);
        if (!injd && !outjd)
                return -ENODEV;
-       if (!(injd && snd_usb_validate_midi_desc(injd)) ||
-           !(outjd && snd_usb_validate_midi_desc(outjd)))
+       if ((injd && !snd_usb_validate_midi_desc(injd)) ||
+           (outjd && !snd_usb_validate_midi_desc(outjd)))
                return -ENODEV;
        if (injd && (injd->bLength < 5 ||
                     (injd->bJackType != USB_MS_EMBEDDED &&
@@ -1657,6 +1657,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case 0x23ba:  /* Playback Designs */
        case 0x25ce:  /* Mytek devices */
        case 0x278b:  /* Rotel? */
+       case 0x292b:  /* Gustard/Ess based devices */
        case 0x2ab6:  /* T+A devices */
        case 0x3842:  /* EVGA */
        case 0xc502:  /* HiBy devices */
index 3c8f73a..389e865 100644 (file)
@@ -75,15 +75,15 @@ static bool validate_processing_unit(const void *p,
 
        if (d->bLength < sizeof(*d))
                return false;
-       len = d->bLength < sizeof(*d) + d->bNrInPins;
+       len = sizeof(*d) + d->bNrInPins;
        if (d->bLength < len)
                return false;
        switch (v->protocol) {
        case UAC_VERSION_1:
        default:
-               /* bNrChannels, wChannelConfig, iChannelNames, bControlSize */
-               len += 1 + 2 + 1 + 1;
-               if (d->bLength < len) /* bControlSize */
+               /* bNrChannels, wChannelConfig, iChannelNames */
+               len += 1 + 2 + 1;
+               if (d->bLength < len + 1) /* bControlSize */
                        return false;
                m = hdr[len];
                len += 1 + m + 1; /* bControlSize, bmControls, iProcessing */
index a9731f8..2e8a30f 100644 (file)
@@ -75,6 +75,7 @@
 #define SVM_EXIT_MWAIT         0x08b
 #define SVM_EXIT_MWAIT_COND    0x08c
 #define SVM_EXIT_XSETBV        0x08d
+#define SVM_EXIT_RDPRU         0x08e
 #define SVM_EXIT_NPF           0x400
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI           0x401
 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS     0x402
index f01950a..3eb8411 100644 (file)
@@ -86,6 +86,8 @@
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
+#define EXIT_REASON_UMWAIT              67
+#define EXIT_REASON_TPAUSE              68
 
 #define VMX_EXIT_REASONS \
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_RDSEED,                "RDSEED" }, \
        { EXIT_REASON_PML_FULL,              "PML_FULL" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
-       { EXIT_REASON_XRSTORS,               "XRSTORS" }
+       { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
+       { EXIT_REASON_UMWAIT,                "UMWAIT" }, \
+       { EXIT_REASON_TPAUSE,                "TPAUSE" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
index 6ecdd10..1178d30 100644 (file)
@@ -3,7 +3,11 @@ include ../scripts/Makefile.include
 
 bindir ?= /usr/bin
 
-ifeq ($(srctree),)
+# This will work when gpio is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
index 233efbb..52641d8 100644 (file)
@@ -999,6 +999,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172
 #define KVM_CAP_PMU_EVENT_FILTER 173
 #define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
+#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1145,6 +1146,7 @@ struct kvm_dirty_tlb {
 #define KVM_REG_S390           0x5000000000000000ULL
 #define KVM_REG_ARM64          0x6000000000000000ULL
 #define KVM_REG_MIPS           0x7000000000000000ULL
+#define KVM_REG_RISCV          0x8000000000000000ULL
 
 #define KVM_REG_SIZE_SHIFT     52
 #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
index b3105ac..99335e1 100644 (file)
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags:       Flags for the new process as listed above.
+ *               All flags are valid except for CSIGNAL and
+ *               CLONE_DETACHED.
+ * @pidfd:       If CLONE_PIDFD is set, a pidfd will be
+ *               returned in this argument.
+ * @child_tid:   If CLONE_CHILD_SETTID is set, the TID of the
+ *               child process will be returned in the child's
+ *               memory.
+ * @parent_tid:  If CLONE_PARENT_SETTID is set, the TID of
+ *               the child process will be returned in the
+ *               parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ *               sent when the child exits.
+ * @stack:       Specify the location of the stack for the
+ *               child process.
+ * @stack_size:  The size of the stack for the child process.
+ * @tls:         If CLONE_SETTLS is set, the tls descriptor
+ *               is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
  */
 struct clone_args {
        __aligned_u64 flags;
@@ -46,6 +69,9 @@ struct clone_args {
        __aligned_u64 stack_size;
        __aligned_u64 tls;
 };
+#endif
+
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
 
 /*
  * Scheduling policies
index 3542b6a..e69f449 100644 (file)
@@ -2635,6 +2635,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
        bool add_sym   = false;
        bool add_dso   = false;
        bool add_src   = false;
+       int ret = 0;
 
        if (!buf)
                return -ENOMEM;
@@ -2653,7 +2654,8 @@ static int build_cl_output(char *cl_sort, bool no_source)
                        add_dso = true;
                } else if (strcmp(tok, "offset")) {
                        pr_err("unrecognized sort token: %s\n", tok);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err;
                }
        }
 
@@ -2676,13 +2678,15 @@ static int build_cl_output(char *cl_sort, bool no_source)
                add_sym ? "symbol," : "",
                add_dso ? "dso," : "",
                add_src ? "cl_srcline," : "",
-               "node") < 0)
-               return -ENOMEM;
+               "node") < 0) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        c2c.show_src = add_src;
-
+err:
        free(buf);
-       return 0;
+       return ret;
 }
 
 static int setup_coalesce(const char *coalesce, bool no_source)
index 1e61e35..9661671 100644 (file)
@@ -691,6 +691,7 @@ static char *compact_gfp_flags(char *gfp_flags)
                        new = realloc(new_flags, len + strlen(cpt) + 2);
                        if (new == NULL) {
                                free(new_flags);
+                               free(orig_flags);
                                return NULL;
                        }
 
index 1e148bb..202cada 100644 (file)
@@ -2,7 +2,7 @@ jvmti-y += libjvmti.o
 jvmti-y += jvmti_agent.o
 
 # For strlcpy
-jvmti-y += libstring.o
+jvmti-y += libstring.o libctype.o
 
 CFLAGS_jvmti         = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux
 CFLAGS_REMOVE_jvmti  = -Wmissing-declarations
@@ -15,3 +15,7 @@ CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PE
 $(OUTPUT)jvmti/libstring.o: ../lib/string.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)jvmti/libctype.o: ../lib/ctype.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
index 63e4349..15e458e 100644 (file)
@@ -15,7 +15,9 @@ void test_attr__init(void);
 void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
                     int fd, int group_fd, unsigned long flags);
 
-#define HAVE_ATTR_TEST
+#ifndef HAVE_ATTR_TEST
+#define HAVE_ATTR_TEST 1
+#endif
 
 static inline int
 sys_perf_event_open(struct perf_event_attr *attr,
@@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
        fd = syscall(__NR_perf_event_open, attr, pid, cpu,
                     group_fd, flags);
 
-#ifdef HAVE_ATTR_TEST
+#if HAVE_ATTR_TEST
        if (unlikely(test_attr__enabled))
                test_attr__open(attr, pid, cpu, fd, group_fd, flags);
 #endif
index 4036c7f..e42bf57 100644 (file)
@@ -1758,7 +1758,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
        info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
                                                 dso->bpf_prog.id);
        if (!info_node) {
-               return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+               ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
                goto out;
        }
        info_linear = info_node->info_linear;
index 3fa0db1..47e03de 100644 (file)
@@ -101,14 +101,16 @@ static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
        if (tofd < 0)
                goto out;
 
-       if (fchmod(tofd, mode))
-               goto out_close_to;
-
        if (st.st_size == 0) { /* /proc? do it slowly... */
                err = slow_copyfile(from, tmp, nsi);
+               if (!err && fchmod(tofd, mode))
+                       err = -1;
                goto out_close_to;
        }
 
+       if (fchmod(tofd, mode))
+               goto out_close_to;
+
        nsinfo__mountns_enter(nsi, &nsc);
        fromfd = open(from, O_RDONLY);
        nsinfo__mountns_exit(&nsc);
index d277a98..de79c73 100644 (file)
@@ -1659,7 +1659,7 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
                        is_open = false;
                if (c2->leader == leader) {
                        if (is_open)
-                               perf_evsel__close(&evsel->core);
+                               perf_evsel__close(&c2->core);
                        c2->leader = c2;
                        c2->core.nr_members = 0;
                }
index 86d9396..becc2d1 100644 (file)
@@ -1296,8 +1296,10 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
                        continue;
 
                if (WARN_ONCE(cnt >= size,
-                             "failed to write MEM_TOPOLOGY, way too many nodes\n"))
+                       "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
+                       closedir(dir);
                        return -1;
+               }
 
                ret = memory_node__read(&nodes[cnt++], idx);
        }
index 679a1d7..7b6eaf5 100644 (file)
@@ -1625,7 +1625,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
        return 0;
 }
 
-static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
+static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
 {
        struct hists *hists = a->hists;
        struct perf_hpp_fmt *fmt;
index 1596185..741f040 100644 (file)
@@ -539,10 +539,11 @@ static int perl_stop_script(void)
 
 static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
 {
+       int i, not_first, count, nr_events;
+       struct tep_event **all_events;
        struct tep_event *event = NULL;
        struct tep_format_field *f;
        char fname[PATH_MAX];
-       int not_first, count;
        FILE *ofp;
 
        sprintf(fname, "%s.pl", outfile);
@@ -603,8 +604,11 @@ sub print_backtrace\n\
 }\n\n\
 ");
 
+       nr_events = tep_get_events_count(pevent);
+       all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
 
-       while ((event = trace_find_next_event(pevent, event))) {
+       for (i = 0; all_events && i < nr_events; i++) {
+               event = all_events[i];
                fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
                fprintf(ofp, "\tmy (");
 
index 5d341ef..93c03b3 100644 (file)
@@ -1687,10 +1687,11 @@ static int python_stop_script(void)
 
 static int python_generate_script(struct tep_handle *pevent, const char *outfile)
 {
+       int i, not_first, count, nr_events;
+       struct tep_event **all_events;
        struct tep_event *event = NULL;
        struct tep_format_field *f;
        char fname[PATH_MAX];
-       int not_first, count;
        FILE *ofp;
 
        sprintf(fname, "%s.py", outfile);
@@ -1735,7 +1736,11 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
        fprintf(ofp, "def trace_end():\n");
        fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
 
-       while ((event = trace_find_next_event(pevent, event))) {
+       nr_events = tep_get_events_count(pevent);
+       all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
+
+       for (i = 0; all_events && i < nr_events; i++) {
+               event = all_events[i];
                fprintf(ofp, "def %s__%s(", event->system, event->name);
                fprintf(ofp, "event_name, ");
                fprintf(ofp, "context, ");
index 5d6bfc7..9634f0a 100644 (file)
@@ -173,37 +173,6 @@ int parse_event_file(struct tep_handle *pevent,
        return tep_parse_event(pevent, buf, size, sys);
 }
 
-struct tep_event *trace_find_next_event(struct tep_handle *pevent,
-                                       struct tep_event *event)
-{
-       static int idx;
-       int events_count;
-       struct tep_event *all_events;
-
-       all_events = tep_get_first_event(pevent);
-       events_count = tep_get_events_count(pevent);
-       if (!pevent || !all_events || events_count < 1)
-               return NULL;
-
-       if (!event) {
-               idx = 0;
-               return all_events;
-       }
-
-       if (idx < events_count && event == (all_events + idx)) {
-               idx++;
-               if (idx == events_count)
-                       return NULL;
-               return (all_events + idx);
-       }
-
-       for (idx = 1; idx < events_count; idx++) {
-               if (event == (all_events + (idx - 1)))
-                       return (all_events + idx);
-       }
-       return NULL;
-}
-
 struct flag {
        const char *name;
        unsigned long long value;
index 2e15838..72fdf2a 100644 (file)
@@ -47,8 +47,6 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
 
 ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
 
-struct tep_event *trace_find_next_event(struct tep_handle *pevent,
-                                       struct tep_event *event);
 unsigned long long read_size(struct tep_event *event, void *ptr, int size);
 unsigned long long eval_flag(const char *flag);
 
index 5eda6e1..ae56c76 100644 (file)
@@ -154,8 +154,10 @@ static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
                if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
                        continue;
 
-               if (!match_pat(d->d_name, pat))
-                       return -2;
+               if (!match_pat(d->d_name, pat)) {
+                       ret =  -2;
+                       break;
+               }
 
                scnprintf(namebuf, sizeof(namebuf), "%s/%s",
                          path, d->d_name);
index 15a6663..1afa22c 100755 (executable)
@@ -22,6 +22,7 @@ import os
 import pprint
 import random
 import re
+import stat
 import string
 import struct
 import subprocess
@@ -311,7 +312,11 @@ class DebugfsDir:
         for f in out.split():
             if f == "ports":
                 continue
+
             p = os.path.join(path, f)
+            if not os.stat(p).st_mode & stat.S_IRUSR:
+                continue
+
             if os.path.isfile(p):
                 _, out = cmd('cat %s/%s' % (path, f))
                 dfs[f] = out.strip()
index a320e38..7c6e5b1 100644 (file)
@@ -161,9 +161,14 @@ static struct sysctl_test tests[] = {
                .descr = "ctx:file_pos sysctl:read read ok narrow",
                .insns = {
                        /* If (file_pos == X) */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
                                    offsetof(struct bpf_sysctl, file_pos)),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+#else
+                       BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
+                                   offsetof(struct bpf_sysctl, file_pos) + 3),
+#endif
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2),
 
                        /* return ALLOW; */
                        BPF_MOV64_IMM(BPF_REG_0, 1),
@@ -176,6 +181,7 @@ static struct sysctl_test tests[] = {
                .attach_type = BPF_CGROUP_SYSCTL,
                .sysctl = "kernel/ostype",
                .open_flags = O_RDONLY,
+               .seek = 4,
                .result = SUCCESS,
        },
        {
index f38567e..daa7d1b 100755 (executable)
@@ -59,7 +59,7 @@ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
 
 # start the listener
 ip netns exec ${NS_DST} bash -c \
-       "nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &"
+       "nc -4 -l -p 9000 >/dev/null &"
 declare -i NC_PID=$!
 sleep 1
 
index ae6146e..4632f51 100755 (executable)
@@ -112,14 +112,16 @@ sanitization_single_dev_mcast_group_test()
        RET=0
 
        ip link add dev br0 type bridge mcast_snooping 0
+       ip link add name dummy1 up type dummy
 
        ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
                ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
-               dev $swp2 group 239.0.0.1
+               dev dummy1 group 239.0.0.1
 
        sanitization_single_dev_test_fail
 
        ip link del dev vxlan0
+       ip link del dev dummy1
        ip link del dev br0
 
        log_test "vxlan device with a multicast group"
@@ -181,13 +183,15 @@ sanitization_single_dev_local_interface_test()
        RET=0
 
        ip link add dev br0 type bridge mcast_snooping 0
+       ip link add name dummy1 up type dummy
 
        ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
-               ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev $swp2
+               ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev dummy1
 
        sanitization_single_dev_test_fail
 
        ip link del dev vxlan0
+       ip link del dev dummy1
        ip link del dev br0
 
        log_test "vxlan device with local interface"
index b35da37..409c1fa 100644 (file)
@@ -1,4 +1,5 @@
 /s390x/sync_regs_test
+/s390x/memop
 /x86_64/cr4_cpuid_sync_test
 /x86_64/evmcs_test
 /x86_64/hyperv_cpuid
@@ -9,6 +10,7 @@
 /x86_64/state_test
 /x86_64/sync_regs_test
 /x86_64/vmx_close_while_nested_test
+/x86_64/vmx_dirty_log_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
 /clear_dirty_log_test
index 6ae5a47..f52e0ba 100644 (file)
@@ -580,6 +580,8 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
 bool load_vmcs(struct vmx_pages *vmx);
 
+void nested_vmx_check_supported(void);
+
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                   uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
index 4911fc7..d1cf9f6 100644 (file)
@@ -55,7 +55,7 @@ static void test_dump_stack(void)
 #pragma GCC diagnostic pop
 }
 
-static pid_t gettid(void)
+static pid_t _gettid(void)
 {
        return syscall(SYS_gettid);
 }
@@ -72,7 +72,7 @@ test_assert(bool exp, const char *exp_str,
                fprintf(stderr, "==== Test Assertion Failure ====\n"
                        "  %s:%u: %s\n"
                        "  pid=%d tid=%d - %s\n",
-                       file, line, exp_str, getpid(), gettid(),
+                       file, line, exp_str, getpid(), _gettid(),
                        strerror(errno));
                test_dump_stack();
                if (fmt) {
index fab8f6b..f6ec97b 100644 (file)
@@ -376,6 +376,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
 
+void nested_vmx_check_supported(void)
+{
+       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+       if (!(entry->ecx & CPUID_VMX)) {
+               fprintf(stderr, "nested VMX not enabled, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+}
+
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                   uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
 {
index 11c2a70..5c82242 100644 (file)
 
 #define VCPU_ID 5
 
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+/*
+ * ucall is embedded here to protect against compiler reshuffling registers
+ * before calling a function. In this test we only need to get KVM_EXIT_IO
+ * vmexit and preserve RBX, no additional information is needed.
+ */
 void guest_code(void)
 {
-       /*
-        * use a callee-save register, otherwise the compiler
-        * saves it around the call to GUEST_SYNC.
-        */
-       register u32 stage asm("rbx");
-       for (;;) {
-               GUEST_SYNC(0);
-               stage++;
-               asm volatile ("" : : "r" (stage));
-       }
+       asm volatile("1: in %[port], %%al\n"
+                    "add $0x1, %%rbx\n"
+                    "jmp 1b"
+                    : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
 }
 
 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
index 3b0ffe0..5dfb535 100644 (file)
@@ -53,12 +53,8 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index 0bca1cf..a223a64 100644 (file)
@@ -78,6 +78,8 @@ int main(int argc, char *argv[])
        struct ucall uc;
        bool done = false;
 
+       nested_vmx_check_supported();
+
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index 853e370..9ef7fab 100644 (file)
@@ -224,7 +224,6 @@ int main(int argc, char *argv[])
 {
        struct kvm_vm *vm;
        struct kvm_nested_state state;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
        have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
 
@@ -237,10 +236,7 @@ int main(int argc, char *argv[])
         * AMD currently does not implement set_nested_state, so for now we
         * just early out.
         */
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, 0);
 
@@ -271,12 +267,7 @@ int main(int argc, char *argv[])
        state.flags = KVM_STATE_NESTED_RUN_PENDING;
        test_nested_state_expect_einval(vm, &state);
 
-       /*
-        * TODO: When SVM support is added for KVM_SET_NESTED_STATE
-        *       add tests here to support it like VMX.
-        */
-       if (entry->ecx & CPUID_VMX)
-               test_vmx_nested_state(vm);
+       test_vmx_nested_state(vm);
 
        kvm_vm_free(vm);
        return 0;
index f36c10e..5590fd2 100644 (file)
@@ -128,12 +128,8 @@ static void report(int64_t val)
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index c4ba0ff..76c1897 100755 (executable)
@@ -1438,6 +1438,27 @@ ipv4_addr_metric_test()
        fi
        log_test $rc 0 "Prefix route with metric on link up"
 
+       # explicitly check for metric changes on edge scenarios
+       run_cmd "$IP addr flush dev dummy2"
+       run_cmd "$IP addr add dev dummy2 172.16.104.0/24 metric 259"
+       run_cmd "$IP addr change dev dummy2 172.16.104.0/24 metric 260"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.0/24 dev dummy2 proto kernel scope link src 172.16.104.0 metric 260"
+               rc=$?
+       fi
+       log_test $rc 0 "Modify metric of .0/24 address"
+
+       run_cmd "$IP addr flush dev dummy2"
+       run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
+       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+               rc=$?
+       fi
+       log_test $rc 0 "Modify metric of address with peer route"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
old mode 100644 (file)
new mode 100755 (executable)
index fe3230c..fb7a59e 100644 (file)
@@ -129,7 +129,7 @@ static void test(int *rcv_fds, int count, int proto)
 {
        struct epoll_event ev;
        int epfd, i, test_fd;
-       uint16_t test_family;
+       int test_family;
        socklen_t len;
 
        epfd = epoll_create(1);
@@ -146,6 +146,7 @@ static void test(int *rcv_fds, int count, int proto)
        send_from_v4(proto);
 
        test_fd = receive_once(epfd, proto);
+       len = sizeof(test_family);
        if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
                error(1, errno, "failed to read socket domain");
        if (test_family != AF_INET)
index 4c285b6..1c8f194 100644 (file)
@@ -898,6 +898,114 @@ TEST_F(tls, nonblocking)
        }
 }
 
+static void
+test_mutliproc(struct __test_metadata *_metadata, struct _test_data_tls *self,
+              bool sendpg, unsigned int n_readers, unsigned int n_writers)
+{
+       const unsigned int n_children = n_readers + n_writers;
+       const size_t data = 6 * 1000 * 1000;
+       const size_t file_sz = data / 100;
+       size_t read_bias, write_bias;
+       int i, fd, child_id;
+       char buf[file_sz];
+       pid_t pid;
+
+       /* Only allow multiples for simplicity */
+       ASSERT_EQ(!(n_readers % n_writers) || !(n_writers % n_readers), true);
+       read_bias = n_writers / n_readers ?: 1;
+       write_bias = n_readers / n_writers ?: 1;
+
+       /* prep a file to send */
+       fd = open("/tmp/", O_TMPFILE | O_RDWR, 0600);
+       ASSERT_GE(fd, 0);
+
+       memset(buf, 0xac, file_sz);
+       ASSERT_EQ(write(fd, buf, file_sz), file_sz);
+
+       /* spawn children */
+       for (child_id = 0; child_id < n_children; child_id++) {
+               pid = fork();
+               ASSERT_NE(pid, -1);
+               if (!pid)
+                       break;
+       }
+
+       /* parent waits for all children */
+       if (pid) {
+               for (i = 0; i < n_children; i++) {
+                       int status;
+
+                       wait(&status);
+                       EXPECT_EQ(status, 0);
+               }
+
+               return;
+       }
+
+       /* Split threads for reading and writing */
+       if (child_id < n_readers) {
+               size_t left = data * read_bias;
+               char rb[8001];
+
+               while (left) {
+                       int res;
+
+                       res = recv(self->cfd, rb,
+                                  left > sizeof(rb) ? sizeof(rb) : left, 0);
+
+                       EXPECT_GE(res, 0);
+                       left -= res;
+               }
+       } else {
+               size_t left = data * write_bias;
+
+               while (left) {
+                       int res;
+
+                       ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
+                       if (sendpg)
+                               res = sendfile(self->fd, fd, NULL,
+                                              left > file_sz ? file_sz : left);
+                       else
+                               res = send(self->fd, buf,
+                                          left > file_sz ? file_sz : left, 0);
+
+                       EXPECT_GE(res, 0);
+                       left -= res;
+               }
+       }
+}
+
+TEST_F(tls, mutliproc_even)
+{
+       test_mutliproc(_metadata, self, false, 6, 6);
+}
+
+TEST_F(tls, mutliproc_readers)
+{
+       test_mutliproc(_metadata, self, false, 4, 12);
+}
+
+TEST_F(tls, mutliproc_writers)
+{
+       test_mutliproc(_metadata, self, false, 10, 2);
+}
+
+TEST_F(tls, mutliproc_sendpage_even)
+{
+       test_mutliproc(_metadata, self, true, 6, 6);
+}
+
+TEST_F(tls, mutliproc_sendpage_readers)
+{
+       test_mutliproc(_metadata, self, true, 4, 12);
+}
+
+TEST_F(tls, mutliproc_sendpage_writers)
+{
+       test_mutliproc(_metadata, self, true, 10, 2);
+}
+
 TEST_F(tls, control_msg)
 {
        if (self->notls)
index bd4a724..c0dd102 100644 (file)
@@ -44,6 +44,46 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
 }
 #endif
 
+static void show_flag_test(int rq_index, unsigned int flags, int err)
+{
+       printf("PTP_EXTTS_REQUEST%c flags 0x%08x : (%d) %s\n",
+              rq_index ? '1' + rq_index : ' ',
+              flags, err, strerror(errno));
+       /* sigh, uClibc ... */
+       errno = 0;
+}
+
+static void do_flag_test(int fd, unsigned int index)
+{
+       struct ptp_extts_request extts_request;
+       unsigned long request[2] = {
+               PTP_EXTTS_REQUEST,
+               PTP_EXTTS_REQUEST2,
+       };
+       unsigned int enable_flags[5] = {
+               PTP_ENABLE_FEATURE,
+               PTP_ENABLE_FEATURE | PTP_RISING_EDGE,
+               PTP_ENABLE_FEATURE | PTP_FALLING_EDGE,
+               PTP_ENABLE_FEATURE | PTP_RISING_EDGE | PTP_FALLING_EDGE,
+               PTP_ENABLE_FEATURE | (PTP_EXTTS_VALID_FLAGS + 1),
+       };
+       int err, i, j;
+
+       memset(&extts_request, 0, sizeof(extts_request));
+       extts_request.index = index;
+
+       for (i = 0; i < 2; i++) {
+               for (j = 0; j < 5; j++) {
+                       extts_request.flags = enable_flags[j];
+                       err = ioctl(fd, request[i], &extts_request);
+                       show_flag_test(i, extts_request.flags, err);
+
+                       extts_request.flags = 0;
+                       err = ioctl(fd, request[i], &extts_request);
+               }
+       }
+}
+
 static clockid_t get_clockid(int fd)
 {
 #define CLOCKFD 3
@@ -96,7 +136,8 @@ static void usage(char *progname)
                " -s         set the ptp clock time from the system time\n"
                " -S         set the system time from the ptp clock time\n"
                " -t val     shift the ptp clock time by 'val' seconds\n"
-               " -T val     set the ptp clock time to 'val' seconds\n",
+               " -T val     set the ptp clock time to 'val' seconds\n"
+               " -z         test combinations of rising/falling external time stamp flags\n",
                progname);
 }
 
@@ -122,6 +163,7 @@ int main(int argc, char *argv[])
        int adjtime = 0;
        int capabilities = 0;
        int extts = 0;
+       int flagtest = 0;
        int gettime = 0;
        int index = 0;
        int list_pins = 0;
@@ -138,7 +180,7 @@ int main(int argc, char *argv[])
 
        progname = strrchr(argv[0], '/');
        progname = progname ? 1+progname : argv[0];
-       while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
+       while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) {
                switch (c) {
                case 'c':
                        capabilities = 1;
@@ -191,6 +233,9 @@ int main(int argc, char *argv[])
                        settime = 3;
                        seconds = atoi(optarg);
                        break;
+               case 'z':
+                       flagtest = 1;
+                       break;
                case 'h':
                        usage(progname);
                        return 0;
@@ -322,6 +367,10 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (flagtest) {
+               do_flag_test(fd, index);
+       }
+
        if (list_pins) {
                int n_pins = 0;
                if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
index cb3fc09..485cf06 100644 (file)
@@ -71,7 +71,7 @@ int main(int argc, char **argv)
                        flags |= MAP_SHARED;
                        break;
                case 'H':
-                       flags |= MAP_HUGETLB;
+                       flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
                        break;
                default:
                        return -1;
index 051d7d3..927a151 100644 (file)
@@ -69,7 +69,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
        FILE *fd = NULL;
        struct udev_device *plat;
        const char *speed;
-       int ret = 0;
+       size_t ret;
 
        plat = udev_device_get_parent(sdev);
        path = udev_device_get_syspath(plat);
@@ -79,8 +79,10 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
        if (!fd)
                return -1;
        ret = fread((char *) &descr, sizeof(descr), 1, fd);
-       if (ret < 0)
+       if (ret != 1) {
+               err("Cannot read vudc device descr file: %s", strerror(errno));
                goto err;
+       }
        fclose(fd);
 
        copy_descr_attr(dev, &descr, bDeviceClass);
index 362a018..8731dfe 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/uaccess.h>
 #include <asm/kvm_emulate.h>
 #include <kvm/arm_pmu.h>
@@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
        if (kvm_pmu_pmc_is_chained(pmc) &&
            kvm_pmu_idx_is_high_counter(select_idx))
                counter = upper_32_bits(counter);
-
-       else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+       else if (select_idx != ARMV8_PMU_CYCLE_IDX)
                counter = lower_32_bits(counter);
 
        return counter;
@@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
  */
 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 {
-       u64 counter, reg;
+       u64 counter, reg, val;
 
        pmc = kvm_pmu_get_canonical_pmc(pmc);
        if (!pmc->perf_event)
@@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 
        counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 
-       if (kvm_pmu_pmc_is_chained(pmc)) {
-               reg = PMEVCNTR0_EL0 + pmc->idx;
-               __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
-               __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+       if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+               reg = PMCCNTR_EL0;
+               val = counter;
        } else {
-               reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
-                      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
-               __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
+               reg = PMEVCNTR0_EL0 + pmc->idx;
+               val = lower_32_bits(counter);
        }
 
+       __vcpu_sys_reg(vcpu, reg) = val;
+
+       if (kvm_pmu_pmc_is_chained(pmc))
+               __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+
        kvm_pmu_release_perf_event(pmc);
 }
 
@@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
                                  struct pt_regs *regs)
 {
        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
        struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
        int idx = pmc->idx;
+       u64 period;
+
+       cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
+
+       /*
+        * Reset the sample period to the architectural limit,
+        * i.e. the point where the counter overflows.
+        */
+       period = -(local64_read(&perf_event->count));
+
+       if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+               period &= GENMASK(31, 0);
+
+       local64_set(&perf_event->hw.period_left, 0);
+       perf_event->attr.sample_period = period;
+       perf_event->hw.sample_period = period;
 
        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
 
@@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
                kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
                kvm_vcpu_kick(vcpu);
        }
+
+       cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
 }
 
 /**
@@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
                 * high counter.
                 */
                attr.sample_period = (-counter) & GENMASK(63, 0);
+               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
+                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+
                event = perf_event_create_kernel_counter(&attr, -1, current,
                                                         kvm_pmu_perf_overflow,
                                                         pmc + 1);
-
-               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
-                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
        } else {
                /* The initial sample period (overflow count) of an event. */
                if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
index fd68fbe..13efc29 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/bsearch.h>
 #include <linux/io.h>
 #include <linux/lockdep.h>
+#include <linux/kthread.h>
 
 #include <asm/processor.h>
 #include <asm/ioctl.h>
@@ -121,9 +122,22 @@ static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
                                  unsigned long arg);
 #define KVM_COMPAT(c)  .compat_ioctl   = (c)
 #else
+/*
+ * For architectures that don't implement a compat infrastructure,
+ * adopt a double line of defense:
+ * - Prevent a compat task from opening /dev/kvm
+ * - If the open has been done by a 64bit task, and the KVM fd
+ *   passed to a compat task, let the ioctls fail.
+ */
 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
                                unsigned long arg) { return -EINVAL; }
-#define KVM_COMPAT(c)  .compat_ioctl   = kvm_no_compat_ioctl
+
+static int kvm_no_compat_open(struct inode *inode, struct file *file)
+{
+       return is_compat_task() ? -ENODEV : 0;
+}
+#define KVM_COMPAT(c)  .compat_ioctl   = kvm_no_compat_ioctl,  \
+                       .open           = kvm_no_compat_open
 #endif
 static int hardware_enable_all(void);
 static void hardware_disable_all(void);
@@ -149,10 +163,30 @@ __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
        return 0;
 }
 
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+{
+       /*
+        * The metadata used by is_zone_device_page() to determine whether or
+        * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
+        * the device has been pinned, e.g. by get_user_pages().  WARN if the
+        * page_count() is zero to help detect bad usage of this helper.
+        */
+       if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
+               return false;
+
+       return is_zone_device_page(pfn_to_page(pfn));
+}
+
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
 {
+       /*
+        * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
+        * perspective they are "normal" pages, albeit with slightly different
+        * usage rules.
+        */
        if (pfn_valid(pfn))
-               return PageReserved(pfn_to_page(pfn));
+               return PageReserved(pfn_to_page(pfn)) &&
+                      !kvm_is_zone_device_pfn(pfn);
 
        return true;
 }
@@ -625,10 +659,28 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
        return 0;
 }
 
+/*
+ * Called after the VM is otherwise initialized, but just before adding it to
+ * the vm_list.
+ */
+int __weak kvm_arch_post_init_vm(struct kvm *kvm)
+{
+       return 0;
+}
+
+/*
+ * Called just after removing the VM from the vm_list, but before doing any
+ * other destruction.
+ */
+void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+}
+
 static struct kvm *kvm_create_vm(unsigned long type)
 {
-       int r, i;
        struct kvm *kvm = kvm_arch_alloc_vm();
+       int r = -ENOMEM;
+       int i;
 
        if (!kvm)
                return ERR_PTR(-ENOMEM);
@@ -640,46 +692,51 @@ static struct kvm *kvm_create_vm(unsigned long type)
        mutex_init(&kvm->lock);
        mutex_init(&kvm->irq_lock);
        mutex_init(&kvm->slots_lock);
-       refcount_set(&kvm->users_count, 1);
        INIT_LIST_HEAD(&kvm->devices);
 
-       r = kvm_arch_init_vm(kvm, type);
-       if (r)
-               goto out_err_no_disable;
-
-       r = hardware_enable_all();
-       if (r)
-               goto out_err_no_disable;
-
-#ifdef CONFIG_HAVE_KVM_IRQFD
-       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
-#endif
-
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
-       r = -ENOMEM;
+       if (init_srcu_struct(&kvm->srcu))
+               goto out_err_no_srcu;
+       if (init_srcu_struct(&kvm->irq_srcu))
+               goto out_err_no_irq_srcu;
+
+       refcount_set(&kvm->users_count, 1);
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                struct kvm_memslots *slots = kvm_alloc_memslots();
+
                if (!slots)
-                       goto out_err_no_srcu;
+                       goto out_err_no_arch_destroy_vm;
                /* Generations must be different for each address space. */
                slots->generation = i;
                rcu_assign_pointer(kvm->memslots[i], slots);
        }
 
-       if (init_srcu_struct(&kvm->srcu))
-               goto out_err_no_srcu;
-       if (init_srcu_struct(&kvm->irq_srcu))
-               goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                rcu_assign_pointer(kvm->buses[i],
                        kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
                if (!kvm->buses[i])
-                       goto out_err;
+                       goto out_err_no_arch_destroy_vm;
        }
 
+       r = kvm_arch_init_vm(kvm, type);
+       if (r)
+               goto out_err_no_arch_destroy_vm;
+
+       r = hardware_enable_all();
+       if (r)
+               goto out_err_no_disable;
+
+#ifdef CONFIG_HAVE_KVM_IRQFD
+       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
+#endif
+
        r = kvm_init_mmu_notifier(kvm);
        if (r)
+               goto out_err_no_mmu_notifier;
+
+       r = kvm_arch_post_init_vm(kvm);
+       if (r)
                goto out_err;
 
        mutex_lock(&kvm_lock);
@@ -691,17 +748,24 @@ static struct kvm *kvm_create_vm(unsigned long type)
        return kvm;
 
 out_err:
-       cleanup_srcu_struct(&kvm->irq_srcu);
-out_err_no_irq_srcu:
-       cleanup_srcu_struct(&kvm->srcu);
-out_err_no_srcu:
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+       if (kvm->mmu_notifier.ops)
+               mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
+#endif
+out_err_no_mmu_notifier:
        hardware_disable_all();
 out_err_no_disable:
-       refcount_set(&kvm->users_count, 0);
+       kvm_arch_destroy_vm(kvm);
+out_err_no_arch_destroy_vm:
+       WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm_get_bus(kvm, i));
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
                kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
+       cleanup_srcu_struct(&kvm->irq_srcu);
+out_err_no_irq_srcu:
+       cleanup_srcu_struct(&kvm->srcu);
+out_err_no_srcu:
        kvm_arch_free_vm(kvm);
        mmdrop(current->mm);
        return ERR_PTR(r);
@@ -733,6 +797,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
        mutex_lock(&kvm_lock);
        list_del(&kvm->vm_list);
        mutex_unlock(&kvm_lock);
+       kvm_arch_pre_destroy_vm(kvm);
+
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++) {
                struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@ -1853,7 +1919,7 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
 
 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
 {
-       if (!kvm_is_reserved_pfn(pfn)) {
+       if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
 
                SetPageDirty(page);
@@ -1863,7 +1929,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
 {
-       if (!kvm_is_reserved_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
                mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
@@ -2360,20 +2426,23 @@ out:
        kvm_arch_vcpu_unblocking(vcpu);
        block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
-       if (!vcpu_valid_wakeup(vcpu))
-               shrink_halt_poll_ns(vcpu);
-       else if (halt_poll_ns) {
-               if (block_ns <= vcpu->halt_poll_ns)
-                       ;
-               /* we had a long block, shrink polling */
-               else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+       if (!kvm_arch_no_poll(vcpu)) {
+               if (!vcpu_valid_wakeup(vcpu)) {
                        shrink_halt_poll_ns(vcpu);
-               /* we had a short halt and our poll time is too small */
-               else if (vcpu->halt_poll_ns < halt_poll_ns &&
-                       block_ns < halt_poll_ns)
-                       grow_halt_poll_ns(vcpu);
-       } else
-               vcpu->halt_poll_ns = 0;
+               } else if (halt_poll_ns) {
+                       if (block_ns <= vcpu->halt_poll_ns)
+                               ;
+                       /* we had a long block, shrink polling */
+                       else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+                               shrink_halt_poll_ns(vcpu);
+                       /* we had a short halt and our poll time is too small */
+                       else if (vcpu->halt_poll_ns < halt_poll_ns &&
+                               block_ns < halt_poll_ns)
+                               grow_halt_poll_ns(vcpu);
+               } else {
+                       vcpu->halt_poll_ns = 0;
+               }
+       }
 
        trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
        kvm_arch_vcpu_block_finish(vcpu);
@@ -4364,3 +4433,86 @@ void kvm_exit(void)
        kvm_vfio_ops_exit();
 }
 EXPORT_SYMBOL_GPL(kvm_exit);
+
+struct kvm_vm_worker_thread_context {
+       struct kvm *kvm;
+       struct task_struct *parent;
+       struct completion init_done;
+       kvm_vm_thread_fn_t thread_fn;
+       uintptr_t data;
+       int err;
+};
+
+static int kvm_vm_worker_thread(void *context)
+{
+       /*
+        * The init_context is allocated on the stack of the parent thread, so
+        * we have to locally copy anything that is needed beyond initialization
+        */
+       struct kvm_vm_worker_thread_context *init_context = context;
+       struct kvm *kvm = init_context->kvm;
+       kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
+       uintptr_t data = init_context->data;
+       int err;
+
+       err = kthread_park(current);
+       /* kthread_park(current) is never supposed to return an error */
+       WARN_ON(err != 0);
+       if (err)
+               goto init_complete;
+
+       err = cgroup_attach_task_all(init_context->parent, current);
+       if (err) {
+               kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
+                       __func__, err);
+               goto init_complete;
+       }
+
+       set_user_nice(current, task_nice(init_context->parent));
+
+init_complete:
+       init_context->err = err;
+       complete(&init_context->init_done);
+       init_context = NULL;
+
+       if (err)
+               return err;
+
+       /* Wait to be woken up by the spawner before proceeding. */
+       kthread_parkme();
+
+       if (!kthread_should_stop())
+               err = thread_fn(kvm, data);
+
+       return err;
+}
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+                               uintptr_t data, const char *name,
+                               struct task_struct **thread_ptr)
+{
+       struct kvm_vm_worker_thread_context init_context = {};
+       struct task_struct *thread;
+
+       *thread_ptr = NULL;
+       init_context.kvm = kvm;
+       init_context.parent = current;
+       init_context.thread_fn = thread_fn;
+       init_context.data = data;
+       init_completion(&init_context.init_done);
+
+       thread = kthread_run(kvm_vm_worker_thread, &init_context,
+                            "%s-%d", name, task_pid_nr(current));
+       if (IS_ERR(thread))
+               return PTR_ERR(thread);
+
+       /* kthread_run is never supposed to return NULL */
+       WARN_ON(thread == NULL);
+
+       wait_for_completion(&init_context.init_done);
+
+       if (!init_context.err)
+               *thread_ptr = thread;
+
+       return init_context.err;
+}