OSDN Git Service

Merge "cnss2: Add support for genoa sdio"
authorLinux Build Service Account <lnxbuild@localhost>
Mon, 14 Oct 2019 11:48:43 +0000 (04:48 -0700)
committerGerrit - the friendly Code Review server <code-review@localhost>
Mon, 14 Oct 2019 11:48:42 +0000 (04:48 -0700)
587 files changed:
Documentation/ABI/testing/procfs-concurrent_time [new file with mode: 0644]
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/devicetree/bindings/arm/msm/diagfwd_sdio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/msm/msm_ipc_router_sdio_xprt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/qcn-sdio-txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
Documentation/filesystems/f2fs.txt
Documentation/kernel-parameters.txt
Documentation/siphash.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/kernel/traps.c
arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
arch/arm/boot/dts/qcom/msm-arm-smmu-8996.dtsi
arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
arch/arm/boot/dts/qcom/msm-audio.dtsi
arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
arch/arm/boot/dts/qcom/msm8996-vidc.dtsi
arch/arm/boot/dts/qcom/msm8996.dtsi
arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
arch/arm/boot/dts/qcom/msm8998-audio.dtsi
arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
arch/arm/boot/dts/qcom/msm8998-sim.dtsi
arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-audio-common.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/common/Kconfig
arch/arm/common/Makefile
arch/arm/common/fiq_glue.S [deleted file]
arch/arm/common/fiq_glue_setup.c [deleted file]
arch/arm/kvm/mmio.c
arch/arm/mach-davinci/sleep.S
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-rpc/dma.c
arch/arm/mm/init.c
arch/arm64/configs/msm-perf_defconfig
arch/arm64/configs/msm_defconfig
arch/arm64/configs/msmcortex-perf_defconfig
arch/arm64/configs/msmcortex_defconfig
arch/arm64/configs/sdm660-perf_defconfig
arch/arm64/configs/sdm660_defconfig
arch/arm64/crypto/sha1-ce-glue.c
arch/arm64/crypto/sha2-ce-glue.c
arch/arm64/include/asm/cpufeature.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/hw_breakpoint.c
arch/mips/Kconfig
arch/mips/boot/compressed/Makefile
arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
arch/mips/include/asm/mach-ath79/ar933x_uart.h
arch/mips/include/asm/netlogic/xlr/fmn.h
arch/mips/include/asm/smp.h
arch/mips/kernel/i8253.c
arch/mips/lantiq/irq.c
arch/mips/sibyte/common/Makefile
arch/mips/sibyte/common/dma.c [deleted file]
arch/mips/vdso/Makefile
arch/parisc/kernel/ptrace.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/swsusp_32.S
arch/powerpc/platforms/powermac/sleep.S
arch/powerpc/sysdev/uic.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sh/include/asm/io.h
arch/sh/kernel/hw_breakpoint.c
arch/um/include/asm/mmu_context.h
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/configs/x86_64_cuttlefish_defconfig
arch/x86/entry/calling.h
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/entry/vdso/vdso-layout.lds.S
arch/x86/include/asm/apic.h
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/clocksource.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/suspend_32.h
arch/x86/include/asm/suspend_64.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mkcapflags.sh
arch/x86/kernel/hpet.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/sysfb_efi.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/pmu.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_emu.h
arch/x86/math-emu/reg_constant.c
arch/x86/mm/fault.c
arch/x86/power/cpu.c
block/blk-core.c
block/compat_ioctl.c
crypto/ghash-generic.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libahci_platform.c
drivers/ata/libata-sff.c
drivers/ata/libata-zpodd.c
drivers/atm/Kconfig
drivers/atm/iphase.c
drivers/base/core.c
drivers/base/regmap/regmap.c
drivers/block/floppy.c
drivers/bluetooth/btqca.c
drivers/bluetooth/hci_ath.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_bcsp.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_uart.h
drivers/char/Kconfig
drivers/char/Makefile
drivers/char/adsprpc.c
drivers/char/diag/Kconfig
drivers/char/diag/Makefile
drivers/char/diag/diag_dci.c
drivers/char/diag/diag_masks.c
drivers/char/diag/diag_masks.h
drivers/char/diag/diagchar_core.c
drivers/char/diag/diagfwd_bridge.c
drivers/char/diag/diagfwd_bridge.h
drivers/char/diag/diagfwd_sdio.c [new file with mode: 0644]
drivers/char/diag/diagfwd_sdio.h [new file with mode: 0644]
drivers/char/hpet.c
drivers/char/qti_sdio_client.c [new file with mode: 0644]
drivers/clk/clk-s2mps11.c
drivers/clk/rockchip/clk-mmc-phase.c
drivers/cpufreq/cpufreq_times.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/crypto/talitos.c
drivers/devfreq/governor_gpubw_mon.c
drivers/dma/imx-sdma.c
drivers/dma/omap-dma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_module.h
drivers/firmware/Kconfig
drivers/firmware/iscsi_ibft.c
drivers/gpio/gpio-omap.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/ipu-v3/ipu-ic.c
drivers/gpu/msm/adreno_ringbuffer.c
drivers/gpu/msm/adreno_ringbuffer.h
drivers/gpu/msm/kgsl.c
drivers/gpu/msm/kgsl.h
drivers/gpu/msm/kgsl_device.h
drivers/gpu/msm/kgsl_drawobj.c
drivers/gpu/msm/kgsl_iommu.c
drivers/hid/hid-core.c
drivers/hid/hid-holtek-kbd.c
drivers/hid/hid-ids.h
drivers/hid/hid-tmff.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_wac.c
drivers/hwmon/nct6775.c
drivers/hwmon/nct7802.c
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/stm/core.c
drivers/i2c/busses/i2c-msm-v2.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/mr.c
drivers/input/joystick/iforce/iforce-usb.c
drivers/input/mouse/trackpoint.h
drivers/input/tablet/gtco.c
drivers/input/tablet/kbtab.c
drivers/iommu/amd_iommu_init.c
drivers/irqchip/irq-imx-gpcv2.c
drivers/isdn/capi/capi.c
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/mailbox/mailbox.c
drivers/md/bcache/super.c
drivers/md/dm-table.c
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/media/dvb-frontends/tua6100.c
drivers/media/i2c/Makefile
drivers/media/i2c/adv7511-v4l2.c [moved from drivers/media/i2c/adv7511.c with 99% similarity]
drivers/media/platform/coda/coda-bit.c
drivers/media/platform/davinci/vpss.c
drivers/media/platform/marvell-ccic/mcam-core.c
drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
drivers/media/platform/msm/vidc/hfi_response_handler.c
drivers/media/radio/radio-raremono.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/media/usb/cpia2/cpia2_usb.c
drivers/media/usb/dvb-usb/dvb-usb-init.c
drivers/media/usb/dvb-usb/technisat-usb2.c
drivers/media/usb/tm6000/tm6000-dvb.c
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/memstick/core/memstick.c
drivers/mfd/arizona-core.c
drivers/mfd/mfd-core.c
drivers/misc/qseecom.c
drivers/misc/vmw_vmci/vmci_doorbell.c
drivers/mmc/core/core.c
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/can/dev.c
drivers/net/can/sja1000/peak_pcmcia.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/phy/phy_device.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pppox.c
drivers/net/ppp/pptp.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/kalmia.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/cnss2/main.c
drivers/net/wireless/cnss2/main.h
drivers/net/wireless/cnss2/power.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/net/wireless/mediatek/mt7601u/tx.c
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/st-nci/se.c
drivers/nfc/st21nfca/se.c
drivers/pci/host/pci-msm.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/phy/phy-rcar-gen2.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/platform/msm/Kconfig
drivers/platform/msm/Makefile
drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
drivers/platform/msm/ipa/ipa_v3/ipa_i.h
drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
drivers/platform/msm/qcn/Kconfig [new file with mode: 0644]
drivers/platform/msm/qcn/Makefile [new file with mode: 0644]
drivers/platform/msm/qcn/qcn_sdio.c [new file with mode: 0644]
drivers/platform/msm/qcn/qcn_sdio.h [new file with mode: 0644]
drivers/platform/msm/qcn/qcn_sdio_hwio.h [new file with mode: 0644]
drivers/pps/pps.c
drivers/regulator/s2mps11.c
drivers/s390/block/dasd_alias.c
drivers/s390/cio/qdio_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/qcom/Kconfig
drivers/soc/qcom/Makefile
drivers/soc/qcom/avtimer.c
drivers/soc/qcom/hab/ghs_comm.c
drivers/soc/qcom/hab/hab_ghs.c
drivers/soc/qcom/icnss.c
drivers/soc/qcom/ipc_router_sdio_xprt.c [new file with mode: 0644]
drivers/soc/qcom/qdsp6v2/Makefile
drivers/soc/qcom/qdsp6v2/apr.c
drivers/soc/qcom/qdsp6v2/apr_dummy.c [new file with mode: 0644]
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bcm2835aux.c
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/fiq_debugger/Kconfig [deleted file]
drivers/staging/android/fiq_debugger/Makefile [deleted file]
drivers/staging/android/fiq_debugger/fiq_debugger.c [deleted file]
drivers/staging/android/fiq_debugger/fiq_debugger.h [deleted file]
drivers/staging/android/fiq_debugger/fiq_debugger_arm.c [deleted file]
drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c [deleted file]
drivers/staging/android/fiq_debugger/fiq_debugger_priv.h [deleted file]
drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h [deleted file]
drivers/staging/android/fiq_debugger/fiq_watchdog.c [deleted file]
drivers/staging/android/fiq_debugger/fiq_watchdog.h [deleted file]
drivers/staging/comedi/drivers/dt3000.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/digicolor-usart.c
drivers/tty/serial/max310x.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sprd_serial.c
drivers/tty/tty_ldsem.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/config.c
drivers/usb/core/file.c
drivers/usb/core/hub.c
drivers/usb/dwc3/dwc3-msm.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/u_f.h
drivers/usb/host/fotg210-hcd.c
drivers/usb/host/hwa-hc.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/pci-quirks.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/ks_bridge.c
drivers/usb/misc/yurex.c
drivers/usb/phy/phy-msm-qusb.c
drivers/usb/serial/option.c
drivers/usb/storage/realtek_cr.c
drivers/usb/storage/unusual_devs.h
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/watchdog/bcm2835_wdt.c
drivers/xen/swiotlb-xen.c
drivers/xen/xen-pciback/conf_space_capability.c
fs/9p/vfs_addr.c
fs/adfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/connect.c
fs/cifs/smb2pdu.c
fs/coda/file.c
fs/coda/psdev.c
fs/compat_ioctl.c
fs/ecryptfs/crypto.c
fs/exec.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/extent_cache.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/f2fs/xattr.c
fs/gfs2/rgrp.c
fs/inode.c
fs/namei.c
fs/nfs/inode.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfssvc.c
fs/ocfs2/xattr.c
fs/open.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/userfaultfd.c
include/asm-generic/bug.h
include/asm-generic/getorder.h
include/linux/acpi.h
include/linux/ceph/buffer.h
include/linux/coda.h
include/linux/coda_psdev.h
include/linux/compiler.h
include/linux/cred.h
include/linux/diagchar.h
include/linux/elevator.h
include/linux/fs.h
include/linux/gpio.h
include/linux/hid.h
include/linux/if_pppox.h
include/linux/mmc/host.h
include/linux/module.h
include/linux/qcn_sdio_al.h [new file with mode: 0644]
include/linux/qdsp6v2/apr.h
include/linux/qdsp6v2/rtac.h
include/linux/rcupdate.h
include/linux/sched.h
include/linux/siphash.h [new file with mode: 0644]
include/net/cnss2.h
include/net/ndisc.h
include/net/netfilter/nf_conntrack.h
include/net/netns/ipv4.h
include/net/tcp.h
include/net/xfrm.h
include/scsi/libfcoe.h
include/sound/compress_driver.h
include/trace/events/f2fs.h
include/trace/events/namei.h [new file with mode: 0644]
include/uapi/linux/coda_psdev.h
include/uapi/linux/fs.h
include/uapi/linux/isdn/capicmd.h
init/Kconfig
ipc/mqueue.c
kernel/bpf/Makefile
kernel/cred.c
kernel/events/core.c
kernel/fork.c
kernel/irq/resend.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/module.c
kernel/padata.c
kernel/pid_namespace.c
kernel/sched/fair.c
kernel/time/ntp.c
kernel/time/timer_list.c
kernel/trace/trace.c
lib/Kconfig.debug
lib/Makefile
lib/reed_solomon/decode_rs.c
lib/scatterlist.c
lib/siphash.c [new file with mode: 0644]
lib/test_siphash.c [new file with mode: 0644]
mm/cma.c
mm/kmemleak.c
mm/memcontrol.c
mm/mmu_notifier.c
mm/vmalloc.c
mm/vmstat.c
net/9p/trans_virtio.c
net/batman-adv/translation-table.c
net/bluetooth/6lowpan.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_core.c
net/bluetooth/smp.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_stp_bpdu.c
net/bridge/br_vlan.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/neighbour.c
net/core/netpoll.c
net/core/stream.c
net/ipv4/devinet.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/output_core.c
net/ipv6/ping.c
net/key/af_key.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/driver-ops.c
net/mac80211/mlme.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_acct.c
net/netrom/af_netrom.c
net/nfc/nci/data.c
net/packet/af_packet.c
net/sched/sch_codel.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/tipc/name_distr.c
net/tipc/netlink_compat.c
net/wireless/reg.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/Makefile.modpost
scripts/decode_stacktrace.sh
scripts/kallsyms.c
scripts/recordmcount.h
security/keys/request_key_auth.c
security/selinux/ss/policydb.c
sound/core/compress_offload.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_fifo.h
sound/firewire/packets-buffer.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/msm/qdsp6v2/rtac.c
sound/sound_core.c
sound/usb/line6/podhd.c
sound/usb/mixer.c
tools/hv/hv_kvp_daemon.c
tools/hv/hv_vss_daemon.c
tools/iio/iio_utils.c
tools/perf/bench/numa.c
tools/perf/builtin-probe.c
tools/perf/tests/mmap-thread-lookup.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/thread.c
tools/power/cpupower/utils/cpufreq-set.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/kvm/config [new file with mode: 0644]
virt/kvm/coalesced_mmio.c

diff --git a/Documentation/ABI/testing/procfs-concurrent_time b/Documentation/ABI/testing/procfs-concurrent_time
new file mode 100644 (file)
index 0000000..55b4142
--- /dev/null
@@ -0,0 +1,16 @@
+What:          /proc/uid_concurrent_active_time
+Date:          December 2018
+Contact:       Connor O'Brien <connoro@google.com>
+Description:
+       The /proc/uid_concurrent_active_time file displays aggregated cputime
+       numbers for each uid, broken down by the total number of cores that were
+       active while the uid's task was running.
+
+What:          /proc/uid_concurrent_policy_time
+Date:          December 2018
+Contact:       Connor O'Brien <connoro@google.com>
+Description:
+       The /proc/uid_concurrent_policy_time file displays aggregated cputime
+       numbers for each uid, broken down based on the cpufreq policy
+       of the core used by the uid's task and the number of cores associated
+       with that policy that were active while the uid's task was running.
index e916c1e..4c73245 100644 (file)
@@ -243,3 +243,11 @@ Description:
                 - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list
                 - [h] means add/del hot file extension
                 - [c] means add/del cold file extension
+
+What:          /sys/fs/f2fs/<disk>/unusable
+Date           April 2019
+Contact:       "Daniel Rosenberg" <drosen@google.com>
+Description:
+               If checkpoint=disable, it displays the number of blocks that are unusable.
+                If checkpoint=enable it displays the enumber of blocks that would be unusable
+                if checkpoint=disable were to be set.
diff --git a/Documentation/devicetree/bindings/arm/msm/diagfwd_sdio.txt b/Documentation/devicetree/bindings/arm/msm/diagfwd_sdio.txt
new file mode 100644 (file)
index 0000000..532ed3b
--- /dev/null
@@ -0,0 +1,9 @@
+QTI Diag Forward SDIO Driver
+
+Required properties:
+-compatible : should be "qcom,diagfwd-sdio".
+
+Example:
+       qcom,diag {
+               compatible = "qcom,diagfwd-sdio";
+       };
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_sdio_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_sdio_xprt.txt
new file mode 100644 (file)
index 0000000..97827fd
--- /dev/null
@@ -0,0 +1,19 @@
+IPC Router SDIO Transport
+
+Required properties:
+-compatible:           should be "qcom,ipc_router_sdio_xprt"
+-qcom,ch-name:         the SDIO channel name used by the SDIO transport
+-qcom,xprt-remote:     string that defines the edge of the transport (PIL Name)
+-qcom,xprt-linkid:     unique integer to identify the tier to which the link
+                       belongs to in the network and is used to avoid the
+                       routing loops while forwarding the broadcast messages
+-qcom,xprt-version:    unique version ID used by SDIO transport header
+
+Example:
+       qcom,ipc_router_external_modem_xprt {
+               compatible = "qcom,ipc_router_sdio_xprt";
+               qcom,ch-name = "ipc_bridge_sdio";
+               qcom,xprt-remote = "external-modem";
+               qcom,xprt-linkid = <1>;
+               qcom,xprt-version = <3>;
+       };
diff --git a/Documentation/devicetree/bindings/mmc/qcn-sdio-txt b/Documentation/devicetree/bindings/mmc/qcn-sdio-txt
new file mode 100644 (file)
index 0000000..15532b6
--- /dev/null
@@ -0,0 +1,90 @@
+QCN SDIO function-1 driver
+==========================
+
+The QCN7605 device uses a Q6-WCSS DSP co-processor. The Q6-WCSS requires a
+firmware binary to boot. The QCN SDIO function-1 driver provides the means with
+to communicate with the device. The commuincation involves firmware download,
+multiprocessor communication, and other subsytem's coreesponding data download
+and upload.
+
+Example: WLAN traffic, WLAN BDF, M3 or CALDATA download etc.
+
+QCN SDIO Device Node:
+=====================
+A QCN SDIO device node is used to represent the function-1 driver instance. It
+is needed as a child node to SOC parent node through which it is accessible to
+the host.
+
+Required properties:
+--------------------
+- compatible:          Should be one of,
+                               "qcom,qcn-sdio" for MSM8996 SOCs
+
+Example:
+--------
+/* MSM8996 */
+soc {
+       qcom,qcn-sdio {
+               compatible = "qcom,qcn-sdio";
+       }
+};
+
+QCN SDIO bridge driver
+======================
+
+The Q6-WCSS runs few services that need to be interfaced on the host device. The
+QCN SDIO bridge driver provides the interfacing between the function-1 driver
+and the services running on the host device, ther by maintaining a streamlined
+communication between Q6-WCSS and host device.
+
+Example Services: Diag, IPC router, QMI
+
+QCN SDIO Bridge Node:
+=====================
+A QCN SDIO bridge driver node is used to represent the client driver instance.
+It is needed as a child node to SOC parent node through which
+it is accessible to host.
+
+Required properties:
+--------------------
+- compatible           Should be one of
+                               "qcom,sdio-bridge" for MSM8996 SOCs
+
+- qcom,client-id       Client ID, Should be on of
+                               1: TTY, 2: WLAN, 3: QMI, 4: DIAG;
+
+- qcom,client-name     Client Name, Should be one of
+                       "SDIO_AL_CLIENT_TTY", "SDIO_AL_CLIENT_WLAN"
+                       "SDIO_AL_CLIENT_QMI", "SDIO_AL_CLIENT_DIAG"
+
+- qcom,ch-name         "SDIO_AL_TTY_CH0", "SDIO_AL_WLAN_CH0",
+                       "SDIO_AL_WLAN_CH1", "SDIO_AL_QMI_CH0",
+
+Example:
+--------
+/* MSM8996 */
+soc {
+       sdio_bridge_tty: qcom,sdio-bridge@0 {
+                compatible = "qcom,sdio-bridge";
+                qcom,client-id = <1>;
+                qcom,client-name = "SDIO_AL_CLIENT_TTY";
+                qcom,ch-name = "SDIO_AL_TTY_CH0";
+                status = "disabled";
+       };
+
+       sdio_bridge_ipc: qcom,sdio-bridge@1 {
+                compatible = "qcom,sdio-bridge";
+                qcom,client-id = <3>;
+                qcom,client-name = "SDIO_AL_CLIENT_QMI";
+                qcom,ch-name = "SDIO_AL_QMI_CH0";
+                status = "disabled";
+       };
+
+       sdio_bridge_diag: qcom,sdio-bridge@3 {
+                 compatible = "qcom,sdio-bridge";
+                 qcom,client-id = <4>;
+                 qcom,client-name = "SDIO_AL_CLIENT_DIAG";
+                 qcom,ch-name = "SDIO_AL_DIAG_CH0";
+                 status = "disabled";
+       };
+};
index 2a737bb..7128a04 100644 (file)
@@ -312,6 +312,20 @@ Required properties:
 
  - compatible : "qcom,msm-pcm-hostless"
 
+* msm-audio-apr
+
+Required properties:
+
+ - compatible : "qcom,msm-audio-apr"
+               This device is added to represent APR module.
+
+Optional properties:
+
+ - compatible : "qcom,msm-audio-apr-dummy"
+               Add this compatible as child device to msm-audio-apr device.
+               This child device is added after lpass is up to invoke
+               deferred probe devices.
+
 * msm-ocmem-audio
 
 Required properties:
index 564ccc6..c2e9413 100644 (file)
@@ -214,11 +214,22 @@ fsync_mode=%s          Control the policy of fsync. Currently supports "posix",
                        non-atomic files likewise "nobarrier" mount option.
 test_dummy_encryption  Enable dummy encryption, which provides a fake fscrypt
                        context. The fake fscrypt context is used by xfstests.
-checkpoint=%s          Set to "disable" to turn off checkpointing. Set to "enable"
+checkpoint=%s[:%u[%]]     Set to "disable" to turn off checkpointing. Set to "enable"
                        to reenable checkpointing. Is enabled by default. While
                        disabled, any unmounting or unexpected shutdowns will cause
                        the filesystem contents to appear as they did when the
                        filesystem was mounted with that option.
+                       While mounting with checkpoint=disabled, the filesystem must
+                       run garbage collection to ensure that all available space can
+                       be used. If this takes too much time, the mount may return
+                       EAGAIN. You may optionally add a value to indicate how much
+                       of the disk you would be willing to temporarily give up to
+                       avoid additional garbage collection. This can be given as a
+                       number of blocks, or as a percent. For instance, mounting
+                       with checkpoint=disable:100% would always succeed, but it may
+                       hide up to all remaining free space. The actual space that
+                       would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
+                       This space is reclaimed once checkpoint=enable.
 
 ================================================================================
 DEBUGFS ENTRIES
@@ -246,11 +257,14 @@ Files in /sys/fs/f2fs/<devname>
 ..............................................................................
  File                         Content
 
- gc_max_sleep_time            This tuning parameter controls the maximum sleep
+ gc_urgent_sleep_time         This parameter controls sleep time for gc_urgent.
+                              500 ms is set by default. See above gc_urgent.
+
+ gc_min_sleep_time            This tuning parameter controls the minimum sleep
                               time for the garbage collection thread. Time is
                               in milliseconds.
 
- gc_min_sleep_time            This tuning parameter controls the minimum sleep
+ gc_max_sleep_time            This tuning parameter controls the maximum sleep
                               time for the garbage collection thread. Time is
                               in milliseconds.
 
@@ -270,9 +284,6 @@ Files in /sys/fs/f2fs/<devname>
                               to 1, background thread starts to do GC by given
                               gc_urgent_sleep_time interval.
 
- gc_urgent_sleep_time         This parameter controls sleep time for gc_urgent.
-                              500 ms is set by default. See above gc_urgent.
-
  reclaim_segments             This parameter controls the number of prefree
                               segments to be reclaimed. If the number of prefree
                              segments is larger than the number of segments
@@ -287,7 +298,16 @@ Files in /sys/fs/f2fs/<devname>
                              checkpoint is triggered, and issued during the
                              checkpoint. By default, it is disabled with 0.
 
- trim_sections                This parameter controls the number of sections
+ discard_granularity         This parameter controls the granularity of discard
+                             command size. It will issue discard commands iif
+                             the size is larger than given granularity. Its
+                             unit size is 4KB, and 4 (=16KB) is set by default.
+                             The maximum value is 128 (=512KB).
+
+ reserved_blocks             This parameter indicates the number of blocks that
+                             f2fs reserves internally for root.
+
+ batched_trim_sections       This parameter controls the number of sections
                               to be trimmed out in batch mode when FITRIM
                               conducts. 32 sections is set by default.
 
@@ -309,11 +329,35 @@ Files in /sys/fs/f2fs/<devname>
                              the number is less than this value, it triggers
                              in-place-updates.
 
+ min_seq_blocks                      This parameter controls the threshold to serialize
+                             write IOs issued by multiple threads in parallel.
+
+ min_hot_blocks                      This parameter controls the threshold to allocate
+                             a hot data log for pending data blocks to write.
+
+ min_ssr_sections            This parameter adds the threshold when deciding
+                             SSR block allocation. If this is large, SSR mode
+                             will be enabled early.
+
+ ram_thresh                   This parameter controls the memory footprint used
+                             by free nids and cached nat entries. By default,
+                             10 is set, which indicates 10 MB / 1 GB RAM.
+
+ ra_nid_pages                When building free nids, F2FS reads NAT blocks
+                             ahead for speed up. Default is 0.
+
+ dirty_nats_ratio            Given dirty ratio of cached nat entries, F2FS
+                             determines flushing them in background.
+
  max_victim_search           This parameter controls the number of trials to
                              find a victim segment when conducting SSR and
                              cleaning operations. The default value is 4096
                              which covers 8GB block address range.
 
+ migration_granularity       For large-sized sections, F2FS can stop GC given
+                             this granularity instead of reclaiming entire
+                             section.
+
  dir_level                    This parameter controls the directory level to
                              support large directory. If a directory has a
                              number of files, it can reduce the file lookup
@@ -321,9 +365,53 @@ Files in /sys/fs/f2fs/<devname>
                              Otherwise, it needs to decrease this value to
                              reduce the space overhead. The default value is 0.
 
- ram_thresh                   This parameter controls the memory footprint used
-                             by free nids and cached nat entries. By default,
-                             10 is set, which indicates 10 MB / 1 GB RAM.
+ cp_interval                 F2FS tries to do checkpoint periodically, 60 secs
+                             by default.
+
+ idle_interval               F2FS detects system is idle, if there's no F2FS
+                             operations during given interval, 5 secs by
+                             default.
+
+ discard_idle_interval       F2FS detects the discard thread is idle, given
+                             time interval. Default is 5 secs.
+
+ gc_idle_interval            F2FS detects the GC thread is idle, given time
+                             interval. Default is 5 secs.
+
+ umount_discard_timeout       When unmounting the disk, F2FS waits for finishing
+                             queued discard commands which can take huge time.
+                             This gives time out for it, 5 secs by default.
+
+ iostat_enable               This controls to enable/disable iostat in F2FS.
+
+ readdir_ra                  This enables/disabled readahead of inode blocks
+                             in readdir, and default is enabled.
+
+ gc_pin_file_thresh          This indicates how many GC can be failed for the
+                             pinned file. If it exceeds this, F2FS doesn't
+                             guarantee its pinning state. 2048 trials is set
+                             by default.
+
+ extension_list                      This enables to change extension_list for hot/cold
+                             files in runtime.
+
+ inject_rate                 This controls injection rate of arbitrary faults.
+
+ inject_type                 This controls injection type of arbitrary faults.
+
+ dirty_segments              This shows # of dirty segments.
+
+ lifetime_write_kbytes       This shows # of data written to the disk.
+
+ features                    This shows current features enabled on F2FS.
+
+ current_reserved_blocks      This shows # of blocks currently reserved.
+
+ unusable                     If checkpoint=disable, this shows the number of
+                              blocks that are unusable.
+                              If checkpoint=enable it shows the number of blocks
+                              that would be unusable if checkpoint=disable were
+                              to be set.
 
 ================================================================================
 USAGE
@@ -656,3 +744,28 @@ algorithm.
 In order to identify whether the data in the victim segment are valid or not,
 F2FS manages a bitmap. Each bit represents the validity of a block, and the
 bitmap is composed of a bit stream covering whole blocks in main area.
+
+Fallocate(2) Policy
+-------------------
+
+The default policy follows the below posix rule.
+
+Allocating disk space
+    The default operation (i.e., mode is zero) of fallocate() allocates
+    the disk space within the range specified by offset and len.  The
+    file size (as reported by stat(2)) will be changed if offset+len is
+    greater than the file size.  Any subregion within the range specified
+    by offset and len that did not contain data before the call will be
+    initialized to zero.  This default behavior closely resembles the
+    behavior of the posix_fallocate(3) library function, and is intended
+    as a method of optimally implementing that function.
+
+However, once F2FS receives ioctl(fd, F2FS_IOC_SET_PIN_FILE) in prior to
+fallocate(fd, DEFAULT_MODE), it allocates on-disk blocks addressess having
+zero or random data, which is useful to the below scenario where:
+ 1. create(fd)
+ 2. ioctl(fd, F2FS_IOC_SET_PIN_FILE)
+ 3. fallocate(fd, 0, 0, size)
+ 4. address = fibmap(fd, offset)
+ 5. open(blkdev)
+ 6. write(blkdev, address)
index f9500fb..b3c7bdf 100644 (file)
@@ -2235,6 +2235,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                improves system performance, but it may also
                                expose users to several CPU vulnerabilities.
                                Equivalent to: nopti [X86]
+                                              nospectre_v1 [X86]
                                               nospectre_v2 [X86]
                                               spectre_v2_user=off [X86]
                                               spec_store_bypass_disable=off [X86]
@@ -2568,9 +2569,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        nohugeiomap     [KNL,x86] Disable kernel huge I/O mappings.
 
-       nospectre_v1    [PPC] Disable mitigations for Spectre Variant 1 (bounds
-                       check bypass). With this option data leaks are possible
-                       in the system.
+       nospectre_v1    [X86,PPC] Disable mitigations for Spectre Variant 1
+                       (bounds check bypass). With this option data leaks are
+                       possible in the system.
 
        nospectre_v2    [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
                        (indirect branch prediction) vulnerability. System may
@@ -3489,6 +3490,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Run specified binary instead of /init from the ramdisk,
                        used for early userspace startup. See initrd.
 
+       rdrand=         [X86]
+                       force - Override the decision by the kernel to hide the
+                               advertisement of RDRAND support (this affects
+                               certain AMD processors because of buggy BIOS
+                               support, specifically around the suspend/resume
+                               path).
+
        reboot=         [KNL]
                        Format (x86 or x86_64):
                                [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt
new file mode 100644 (file)
index 0000000..908d348
--- /dev/null
@@ -0,0 +1,175 @@
+         SipHash - a short input PRF
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+SipHash is a cryptographically secure PRF -- a keyed hash function -- that
+performs very well for short inputs, hence the name. It was designed by
+cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
+as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
+and so forth.
+
+SipHash takes a secret key filled with randomly generated numbers and either
+an input buffer or several input integers. It spits out an integer that is
+indistinguishable from random. You may then use that integer as part of secure
+sequence numbers, secure cookies, or mask it off for use in a hash table.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+siphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u64 siphash(const void *data, size_t len, const siphash_key_t *key);
+
+And:
+
+u64 siphash_1u64(u64, const siphash_key_t *key);
+u64 siphash_2u64(u64, u64, const siphash_key_t *key);
+u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
+u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
+u64 siphash_1u32(u32, const siphash_key_t *key);
+u64 siphash_2u32(u32, u32, const siphash_key_t *key);
+u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
+u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
+
+If you pass the generic siphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+       DECLARE_HASHTABLE(hashtable, 8);
+       siphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+       get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+       return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Security
+
+SipHash has a very high security margin, with its 128-bit key. So long as the
+key is kept secret, it is impossible for an attacker to guess the outputs of
+the function, even if being able to observe many outputs, since 2^128 outputs
+is significant.
+
+Linux implements the "2-4" variant of SipHash.
+
+5. Struct-passing Pitfalls
+
+Often times the XuY functions will not be large enough, and instead you'll
+want to pass a pre-filled struct to siphash. When doing this, it's important
+to always ensure the struct has no padding holes. The easiest way to do this
+is to simply arrange the members of the struct in descending order of size,
+and to use offsetendof() instead of sizeof() for getting the size. For
+performance reasons, if possible, it's probably a good thing to align the
+struct to the right boundary. Here's an example:
+
+const struct {
+       struct in6_addr saddr;
+       u32 counter;
+       u16 dport;
+} __aligned(SIPHASH_ALIGNMENT) combined = {
+       .saddr = *(struct in6_addr *)saddr,
+       .counter = counter,
+       .dport = dport
+};
+u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
+
+6. Resources
+
+Read the SipHash paper if you're interested in learning more:
+https://131002.net/siphash/siphash.pdf
+
+
+~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
+
+HalfSipHash - SipHash's insecure younger cousin
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+On the off-chance that SipHash is not fast enough for your needs, you might be
+able to justify using HalfSipHash, a terrifying but potentially useful
+possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and,
+even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
+instead of SipHash's 128-bit key. However, this may appeal to some
+high-performance `jhash` users.
+
+Danger!
+
+Do not ever use HalfSipHash except for as a hashtable key function, and only
+then when you can be absolutely certain that the outputs will never be
+transmitted out of the kernel. This is only remotely useful over `jhash` as a
+means of mitigating hashtable flooding denial of service attacks.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+hsiphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
+
+And:
+
+u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
+u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
+
+If you pass the generic hsiphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+       DECLARE_HASHTABLE(hashtable, 8);
+       hsiphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+       get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+       return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Performance
+
+HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
+this will not be a problem, as the hashtable lookup isn't the bottleneck. And
+in general, this is probably a good sacrifice to make for the security and DoS
+resistance of HalfSipHash.
index 33b502b..1a92bf2 100644 (file)
@@ -9835,6 +9835,13 @@ F:       arch/arm/mach-s3c24xx/mach-bast.c
 F:     arch/arm/mach-s3c24xx/bast-ide.c
 F:     arch/arm/mach-s3c24xx/bast-irq.c
 
+SIPHASH PRF ROUTINES
+M:     Jason A. Donenfeld <Jason@zx2c4.com>
+S:     Maintained
+F:     lib/siphash.c
+F:     lib/test_siphash.c
+F:     include/linux/siphash.h
+
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
 M:     Kevin Hilman <khilman@deeprootsystems.com>
index 692a1dc..22542ea 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 186
+SUBLEVEL = 194
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
index 3023f91..9843e52 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index f181071..27c6cb5 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 6e1dd85..72f3453 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 86e5a62..c93370c 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
index f68838e..27c7302 100644 (file)
@@ -12,7 +12,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
index 96bd1c2..c360587 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
index a4d7b91..b7dbb20 100644 (file)
@@ -12,7 +12,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
index b3fb49c..ce22594 100644 (file)
@@ -12,7 +12,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
index 710c167..f9e5aef 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
index 2fb0cd3..cd6e361 100644 (file)
@@ -163,3 +163,4 @@ void abort(void)
 {
        __asm__ __volatile__("trap_s  5\n");
 }
+EXPORT_SYMBOL(abort);
index d925cd0..7bf4bfd 100644 (file)
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        qcom,ntn_avb {
                compatible = "qcom,ntn_avb";
 
index cbe65e2..527765d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                "gcc_mmss_bimc_gfx_clk", "gcc_bimc_gfx_clk";
        #clock-cells = <1>;
 };
-
-&soc {
-       iommu_test_device {
-               compatible = "iommu-debug-test";
-               /*
-                * 42 shouldn't be used by anyone on the cpp_fd_smmu.  We just
-                * need _something_ here to get this node recognized by the
-                * SMMU driver. Our test uses ATOS, which doesn't use SIDs
-                * anyways, so using a dummy value is ok.
-                */
-               iommus = <&cpp_fd_smmu 42>;
-       };
-};
index 7fce760..aef9200 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 
 &soc {
 
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        pcm0: qcom,msm-pcm {
                compatible = "qcom,msm-pcm-dsp";
                qcom,msm-pcm-dsp-id = <0>;
index 4b37032..69c7cd4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                qcom,clk-div = <27>;
        };
 
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        tasha_snd: sound-9335 {
                compatible = "qcom,sdm660-asoc-snd-tasha";
                qcom,model = "sdm660-tasha-snd-card";
index 679466b..4cfbe07 100644 (file)
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        qcom,early-cam {
                cell-index = <0>;
                compatible = "qcom,early-cam";
index ded0a5f..8ecacba 100644 (file)
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        qcom,early-cam {
                cell-index = <0>;
                compatible = "qcom,early-cam";
index 4367a87..7d2d209 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        i2c@75ba000 {
                synaptics@20 {
                        compatible = "synaptics,dsx";
index f923d70..1957aac 100644 (file)
                pinctrl-0 = <&quat_tdm_din_active>;
                pinctrl-1 = <&quat_tdm_din_sleep>;
        };
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+
+               };
+       };
 };
 
 &pm8994_gpios {
index 21aa1db..5059b33 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, 2019, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                                <&venus_smmu 0x2d>,
                                <&venus_smmu 0x31>;
                        buffer-types = <0xfff>;
-                       virtual-addr-pool = <0x70800000 0x8F800000>;
+                       virtual-addr-pool = <0x70800000 0x6f800000>;
                };
 
                firmware_cb {
index 62abceb..faba810 100644 (file)
                        interrupts = <0 131 0>;
                        usb-phy = <&qusb_phy0>, <&ssphy>;
                        tx-fifo-resize;
+                       snps,dis_u2_susphy_quirk;
+                       snps,dis_enblslpm_quirk;
                        snps,usb3-u1u2-disable;
                        snps,nominal-elastic-buffer;
                        snps,is-utmi-l1-suspend;
                        interrupts = <0 138 0>;
                        usb-phy = <&qusb_phy1>, <&usb_nop_phy>;
                        maximum-speed = "high-speed";
+                       snps,dis_u2_susphy_quirk;
+                       snps,dis_enblslpm_quirk;
                        snps,nominal-elastic-buffer;
                        snps,is-utmi-l1-suspend;
                        snps,hird-threshold = /bits/ 8 <0x0>;
                hyplog-size-offset = <0x414>;    /* 0x066BFB34 */
        };
 
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        sound-9335 {
                compatible = "qcom,msm8996-asoc-snd-tasha";
                qcom,model = "msm8996-tasha-snd-card";
index b9ae88f..9ed1b09 100644 (file)
 
 /* GPU overrides for auto */
 &msm_gpu {
+       /delete-node/ qcom,gpu-pwrlevel-bins;
+
        qcom,gpu-pwrlevel-bins {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               compatible="qcom,gpu-pwrlevel-bins";
+
                qcom,gpu-pwrlevels-0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       qcom,speed-bin = <0>;
+
                        qcom,initial-pwrlevel = <1>;
+
+                       qcom,gpu-pwrlevel@0 {
+                               reg = <0>;
+                               qcom,gpu-freq = <624000000>;
+                               qcom,bus-freq = <12>;
+                               qcom,bus-min = <11>;
+                               qcom,bus-max = <12>;
+                       };
+
+                       qcom,gpu-pwrlevel@1 {
+                               reg = <1>;
+                               qcom,gpu-freq = <560000000>;
+                               qcom,bus-freq = <11>;
+                               qcom,bus-min = <9>;
+                               qcom,bus-max = <12>;
+                       };
+
+                       qcom,gpu-pwrlevel@2 {
+                               reg = <2>;
+                               qcom,gpu-freq = <510000000>;
+                               qcom,bus-freq = <9>;
+                               qcom,bus-min = <8>;
+                               qcom,bus-max = <11>;
+                       };
+
+                       qcom,gpu-pwrlevel@3 {
+                               reg = <3>;
+                               qcom,gpu-freq = <401800000>;
+                               qcom,bus-freq = <8>;
+                               qcom,bus-min = <7>;
+                               qcom,bus-max = <9>;
+                       };
+
+                       qcom,gpu-pwrlevel@4 {
+                               reg = <4>;
+                               qcom,gpu-freq = <315000000>;
+                               qcom,bus-freq = <6>;
+                               qcom,bus-min = <5>;
+                               qcom,bus-max = <7>;
+                       };
+
+                       qcom,gpu-pwrlevel@5 {
+                               reg = <5>;
+                               qcom,gpu-freq = <0>;
+                               qcom,bus-freq = <0>;
+                               qcom,bus-min = <0>;
+                               qcom,bus-max = <0>;
+                       };
                };
 
-               qcom,gpu-pwrlevels-2 {
+               qcom,gpu-pwrlevels-1 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       qcom,speed-bin = <1>;
+
                        qcom,initial-pwrlevel = <2>;
 
+                       qcom,gpu-pwrlevel@0 {
+                               reg = <0>;
+                               qcom,gpu-freq = <510000000>;
+                               qcom,bus-freq = <9>;
+                               qcom,bus-min = <8>;
+                               qcom,bus-max = <11>;
+                       };
+
+                       qcom,gpu-pwrlevel@1 {
+                               reg = <1>;
+                               qcom,gpu-freq = <401800000>;
+                               qcom,bus-freq = <8>;
+                               qcom,bus-min = <7>;
+                               qcom,bus-max = <9>;
+                       };
+
+                       qcom,gpu-pwrlevel@2 {
+                               reg = <2>;
+                               qcom,gpu-freq = <315000000>;
+                               qcom,bus-freq = <6>;
+                               qcom,bus-min = <5>;
+                               qcom,bus-max = <7>;
+                       };
+
+                       qcom,gpu-pwrlevel@3 {
+                               reg = <3>;
+                               qcom,gpu-freq = <0>;
+                               qcom,bus-freq = <0>;
+                               qcom,bus-min = <0>;
+                               qcom,bus-max = <0>;
+                       };
                };
 
+               qcom,gpu-pwrlevels-2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       qcom,speed-bin = <2>;
+
+                       qcom,initial-pwrlevel = <0>;
+
+                       qcom,gpu-pwrlevel@0 {
+                               reg = <0>;
+                               qcom,gpu-freq = <315000000>;
+                               qcom,bus-freq = <6>;
+                               qcom,bus-min = <5>;
+                               qcom,bus-max = <7>;
+                       };
+
+                       qcom,gpu-pwrlevel@1 {
+                               reg = <1>;
+                               qcom,gpu-freq = <0>;
+                               qcom,bus-freq = <0>;
+                               qcom,bus-min = <0>;
+                               qcom,bus-max = <0>;
+                       };
+               };
        };
 };
index 9a26351..2e37ff6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        qcom,avtimer@170f7000 {
                compatible = "qcom,avtimer";
                reg = <0x170f700c 0x4>,
index 9b11452..acd52a7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        gpio_keys {
                compatible = "gpio-keys";
                input-name = "gpio-keys";
index 3c6b23d..1e8a875 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        sound-9335 {
                qcom,wcn-btfm;
        };
index 8a0f4b3..3c08592 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 };
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        sound_sim {
                compatible = "qcom,msm8998-asoc-snd-stub";
                qcom,model = "msm8998-stub-snd-card";
index 1aec20a..a8698c9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  */
 
 &soc {
+       qcom,msm-audio-apr {
+               compatible = "qcom,msm-audio-apr";
+               msm_audio_apr_dummy {
+                       compatible = "qcom,msm-audio-apr-dummy";
+               };
+       };
+
        sound-adp-agave {
                compatible = "qcom,apq8096-asoc-snd-adp-agave";
                qcom,model = "apq8096-adp-agave-snd-card";
index 04ea209..98abb05 100644 (file)
                             <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
                             <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
                clock-frequency = <24000000>;
+               arm,no-tick-in-suspend;
        };
 
        timer: timer@ff810000 {
index ce01364..9353184 100644 (file)
@@ -17,7 +17,3 @@ config SHARP_PARAM
 
 config SHARP_SCOOP
        bool
-
-config FIQ_GLUE
-       bool
-       select FIQ
index 04aca89..27f23b1 100644 (file)
@@ -4,7 +4,6 @@
 
 obj-y                          += firmware.o
 
-obj-$(CONFIG_FIQ_GLUE)         += fiq_glue.o fiq_glue_setup.o
 obj-$(CONFIG_ICST)             += icst.o
 obj-$(CONFIG_SA1111)           += sa1111.o
 obj-$(CONFIG_DMABOUNCE)                += dmabounce.o
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
deleted file mode 100644 (file)
index 24b42ce..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-               .text
-
-               .global fiq_glue_end
-
-               /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
-
-ENTRY(fiq_glue)
-               /* store pc, cpsr from previous mode, reserve space for spsr */
-               mrs     r12, spsr
-               sub     lr, lr, #4
-               subs    r10, #1
-               bne     nested_fiq
-
-               str     r12, [sp, #-8]!
-               str     lr, [sp, #-4]!
-
-               /* store r8-r14 from previous mode */
-               sub     sp, sp, #(7 * 4)
-               stmia   sp, {r8-r14}^
-               nop
-
-               /* store r0-r7 from previous mode */
-               stmfd   sp!, {r0-r7}
-
-               /* setup func(data,regs) arguments */
-               mov     r0, r9
-               mov     r1, sp
-               mov     r3, r8
-
-               mov     r7, sp
-
-               /* Get sp and lr from non-user modes */
-               and     r4, r12, #MODE_MASK
-               cmp     r4, #USR_MODE
-               beq     fiq_from_usr_mode
-
-               mov     r7, sp
-               orr     r4, r4, #(PSR_I_BIT | PSR_F_BIT)
-               msr     cpsr_c, r4
-               str     sp, [r7, #(4 * 13)]
-               str     lr, [r7, #(4 * 14)]
-               mrs     r5, spsr
-               str     r5, [r7, #(4 * 17)]
-
-               cmp     r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
-               /* use fiq stack if we reenter this mode */
-               subne   sp, r7, #(4 * 3)
-
-fiq_from_usr_mode:
-               msr     cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
-               mov     r2, sp
-               sub     sp, r7, #12
-               stmfd   sp!, {r2, ip, lr}
-               /* call func(data,regs) */
-               blx     r3
-               ldmfd   sp, {r2, ip, lr}
-               mov     sp, r2
-
-               /* restore/discard saved state */
-               cmp     r4, #USR_MODE
-               beq     fiq_from_usr_mode_exit
-
-               msr     cpsr_c, r4
-               ldr     sp, [r7, #(4 * 13)]
-               ldr     lr, [r7, #(4 * 14)]
-               msr     spsr_cxsf, r5
-
-fiq_from_usr_mode_exit:
-               msr     cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
-
-               ldmfd   sp!, {r0-r7}
-               ldr     lr, [sp, #(4 * 7)]
-               ldr     r12, [sp, #(4 * 8)]
-               add     sp, sp, #(10 * 4)
-exit_fiq:
-               msr     spsr_cxsf, r12
-               add     r10, #1
-               cmp     r11, #0
-               moveqs  pc, lr
-               bx      r11 /* jump to custom fiq return function */
-
-nested_fiq:
-               orr     r12, r12, #(PSR_F_BIT)
-               b       exit_fiq
-
-fiq_glue_end:
-
-ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
-               stmfd           sp!, {r4}
-               mrs             r4, cpsr
-               msr             cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
-               movs            r8, r0
-               mov             r9, r1
-               mov             sp, r2
-               mov             r11, r3
-               moveq           r10, #0
-               movne           r10, #1
-               msr             cpsr_c, r4
-               ldmfd           sp!, {r4}
-               bx              lr
-
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
deleted file mode 100644 (file)
index 8cb1b61..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <asm/fiq.h>
-#include <asm/fiq_glue.h>
-
-extern unsigned char fiq_glue, fiq_glue_end;
-extern void fiq_glue_setup(void *func, void *data, void *sp,
-                          fiq_return_handler_t fiq_return_handler);
-
-static struct fiq_handler fiq_debbuger_fiq_handler = {
-       .name = "fiq_glue",
-};
-DEFINE_PER_CPU(void *, fiq_stack);
-static struct fiq_glue_handler *current_handler;
-static fiq_return_handler_t fiq_return_handler;
-static DEFINE_MUTEX(fiq_glue_lock);
-
-static void fiq_glue_setup_helper(void *info)
-{
-       struct fiq_glue_handler *handler = info;
-       fiq_glue_setup(handler->fiq, handler,
-               __get_cpu_var(fiq_stack) + THREAD_START_SP,
-               fiq_return_handler);
-}
-
-int fiq_glue_register_handler(struct fiq_glue_handler *handler)
-{
-       int ret;
-       int cpu;
-
-       if (!handler || !handler->fiq)
-               return -EINVAL;
-
-       mutex_lock(&fiq_glue_lock);
-       if (fiq_stack) {
-               ret = -EBUSY;
-               goto err_busy;
-       }
-
-       for_each_possible_cpu(cpu) {
-               void *stack;
-               stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
-               if (WARN_ON(!stack)) {
-                       ret = -ENOMEM;
-                       goto err_alloc_fiq_stack;
-               }
-               per_cpu(fiq_stack, cpu) = stack;
-       }
-
-       ret = claim_fiq(&fiq_debbuger_fiq_handler);
-       if (WARN_ON(ret))
-               goto err_claim_fiq;
-
-       current_handler = handler;
-       on_each_cpu(fiq_glue_setup_helper, handler, true);
-       set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
-
-       mutex_unlock(&fiq_glue_lock);
-       return 0;
-
-err_claim_fiq:
-err_alloc_fiq_stack:
-       for_each_possible_cpu(cpu) {
-               __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
-               per_cpu(fiq_stack, cpu) = NULL;
-       }
-err_busy:
-       mutex_unlock(&fiq_glue_lock);
-       return ret;
-}
-
-static void fiq_glue_update_return_handler(void (*fiq_return)(void))
-{
-       fiq_return_handler = fiq_return;
-       if (current_handler)
-               on_each_cpu(fiq_glue_setup_helper, current_handler, true);
-}
-
-int fiq_glue_set_return_handler(void (*fiq_return)(void))
-{
-       int ret;
-
-       mutex_lock(&fiq_glue_lock);
-       if (fiq_return_handler) {
-               ret = -EBUSY;
-               goto err_busy;
-       }
-       fiq_glue_update_return_handler(fiq_return);
-       ret = 0;
-err_busy:
-       mutex_unlock(&fiq_glue_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(fiq_glue_set_return_handler);
-
-int fiq_glue_clear_return_handler(void (*fiq_return)(void))
-{
-       int ret;
-
-       mutex_lock(&fiq_glue_lock);
-       if (WARN_ON(fiq_return_handler != fiq_return)) {
-               ret = -EINVAL;
-               goto err_inval;
-       }
-       fiq_glue_update_return_handler(NULL);
-       ret = 0;
-err_inval:
-       mutex_unlock(&fiq_glue_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(fiq_glue_clear_return_handler);
-
-/**
- * fiq_glue_resume - Restore fiqs after suspend or low power idle states
- *
- * This must be called before calling local_fiq_enable after returning from a
- * power state where the fiq mode registers were lost. If a driver provided
- * a resume hook when it registered the handler it will be called.
- */
-
-void fiq_glue_resume(void)
-{
-       if (!current_handler)
-               return;
-       fiq_glue_setup(current_handler->fiq, current_handler,
-               __get_cpu_var(fiq_stack) + THREAD_START_SP,
-               fiq_return_handler);
-       if (current_handler->resume)
-               current_handler->resume(current_handler);
-}
-
index ae61e2e..d2efc03 100644 (file)
@@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
        unsigned int len;
        int mask;
 
+       /* Detect an already handled MMIO return */
+       if (unlikely(!vcpu->mmio_needed))
+               return 0;
+
+       vcpu->mmio_needed = 0;
+
        if (!run->mmio.is_write) {
                len = run->mmio.len;
                if (len > sizeof(unsigned long))
@@ -206,6 +212,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
        run->mmio.is_write      = is_write;
        run->mmio.phys_addr     = fault_ipa;
        run->mmio.len           = len;
+       vcpu->mmio_needed       = 1;
 
        if (!ret) {
                /* We handled the access successfully in the kernel. */
index a5336a5..459d081 100644 (file)
@@ -37,6 +37,7 @@
 #define DEEPSLEEP_SLEEPENABLE_BIT      BIT(31)
 
        .text
+       .arch   armv5te
 /*
  * Move DaVinci into deep sleep state
  *
index 949696b..511fd08 100644 (file)
@@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
        struct device_node *np;
        struct gen_pool *sram_pool;
 
+       if (!soc_is_omap44xx() && !soc_is_omap54xx())
+               return 0;
+
        np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
        if (!np)
                pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
index 6d3517d..82aac38 100644 (file)
@@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
        } while (1);
 
        idma->state = ~DMA_ST_AB;
-       disable_irq(irq);
+       disable_irq_nosync(irq);
 
        return IRQ_HANDLED;
 }
@@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
                }
 
+               idma->dma_addr = idma->dma.sg->dma_address;
+               idma->dma_len = idma->dma.sg->length;
+
                iomd_writeb(DMA_CR_C, dma_base + CR);
                idma->state = DMA_ST_AB;
        }
index d05984a..a375974 100644 (file)
@@ -790,7 +790,8 @@ static void update_sections_early(struct section_perm perms[], int n)
                if (t->flags & PF_KTHREAD)
                        continue;
                for_each_thread(t, s)
-                       set_section_perms(perms, n, true, s->mm);
+                       if (s->mm)
+                               set_section_perms(perms, n, true, s->mm);
        }
        read_unlock(&tasklist_lock);
        set_section_perms(perms, n, true, current->active_mm);
index 878ac24..95fb475 100644 (file)
@@ -49,6 +49,8 @@ CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8996=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
+CONFIG_ENABLE_FP_SIMD_SETTINGS=y
+CONFIG_MSM_APP_SETTINGS=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
@@ -477,6 +479,7 @@ CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
index 3d0d786..3661419 100644 (file)
@@ -46,6 +46,8 @@ CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8996=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
+CONFIG_ENABLE_FP_SIMD_SETTINGS=y
+CONFIG_MSM_APP_SETTINGS=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
@@ -462,6 +464,7 @@ CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_SPI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_EXYNOS=y
index 7349f53..6e3cbef 100644 (file)
@@ -621,7 +621,6 @@ CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
-CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index de05a73..51d31bc 100644 (file)
@@ -648,7 +648,6 @@ CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
-CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index 1505108..274519f 100644 (file)
@@ -627,7 +627,6 @@ CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
-CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index 2eceb3b..8df8694 100644 (file)
@@ -650,7 +650,6 @@ CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
-CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index ea319c0..1b7b468 100644 (file)
@@ -50,7 +50,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
                         unsigned int len, u8 *out)
 {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
-       bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+       bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
 
        /*
         * Allow the asm code to perform the finalization if there is no
index 0ed9486..356ca93 100644 (file)
@@ -52,7 +52,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
                           unsigned int len, u8 *out)
 {
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-       bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+       bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
 
        /*
         * Allow the asm code to perform the finalization if there is no
index d00e27e..bb71823 100644 (file)
@@ -49,9 +49,10 @@ extern const char *machine_name;
 
 /* CPU feature register tracking */
 enum ftr_type {
-       FTR_EXACT,      /* Use a predefined safe value */
-       FTR_LOWER_SAFE, /* Smaller value is safe */
-       FTR_HIGHER_SAFE,/* Bigger value is safe */
+       FTR_EXACT,                      /* Use a predefined safe value */
+       FTR_LOWER_SAFE,                 /* Smaller value is safe */
+       FTR_HIGHER_SAFE,                /* Bigger value is safe */
+       FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
 };
 
 #define FTR_STRICT     true    /* SANITY check strict matching required */
index d1ce8e2..4d0577d 100644 (file)
@@ -141,10 +141,14 @@ static int __init acpi_fadt_sanity_check(void)
         */
        if (table->revision < 5 ||
           (table->revision == 5 && fadt->minor_revision < 1)) {
-               pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+               pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
                       table->revision, fadt->minor_revision);
-               ret = -EINVAL;
-               goto out;
+
+               if (!fadt->arm_boot_flags) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               pr_err("FADT has ARM boot flags set, assuming 5.1\n");
        }
 
        if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
index 98602e5..cb475b0 100644 (file)
@@ -139,10 +139,12 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
 };
 
 static struct arm64_ftr_bits ftr_ctr[] = {
-       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RAO */
-       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
-       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),        /* CWG */
-       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RES1 */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0),        /* CWG */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0),        /* ERG */
        U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
@@ -353,6 +355,10 @@ static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
        case FTR_LOWER_SAFE:
                ret = new < cur ? new : cur;
                break;
+       case FTR_HIGHER_OR_ZERO_SAFE:
+               if (!cur || !new)
+                       break;
+               /* Fallthrough */
        case FTR_HIGHER_SAFE:
                ret = new > cur ? new : cur;
                break;
index 1c694f3..bef4b65 100644 (file)
@@ -548,13 +548,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                        /* Aligned */
                        break;
                case 1:
-                       /* Allow single byte watchpoint. */
-                       if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
-                               break;
                case 2:
                        /* Allow halfword watchpoints and breakpoints. */
                        if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
                                break;
+               case 3:
+                       /* Allow single byte watchpoint. */
+                       if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                               break;
                default:
                        return -EINVAL;
                }
index f561e36..c6ae78b 100644 (file)
@@ -814,7 +814,6 @@ config SIBYTE_SWARM
        select SYS_SUPPORTS_HIGHMEM
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select ZONE_DMA32 if 64BIT
-       select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SIBYTE_LITTLESUR
        bool "Sibyte BCM91250C2-LittleSur"
@@ -837,7 +836,6 @@ config SIBYTE_SENTOSA
        select SYS_HAS_CPU_SB1
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
-       select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SIBYTE_BIGSUR
        bool "Sibyte BCM91480B-BigSur"
@@ -851,7 +849,6 @@ config SIBYTE_BIGSUR
        select SYS_SUPPORTS_HIGHMEM
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select ZONE_DMA32 if 64BIT
-       select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SNI_RM
        bool "SNI RM200/300/400"
index d5bdee1..d4918a2 100644 (file)
@@ -66,6 +66,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
 $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
        $(call if_changed,objcopy)
 
+HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
+
 # Calculate the load address of the compressed kernel image
 hostprogs-y := calc_vmlinuz_load_addr
 
index 542c3ed..d14f75e 100644 (file)
@@ -13,7 +13,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include "../../../../include/linux/sizes.h"
+#include <linux/sizes.h>
 
 int main(int argc, char *argv[])
 {
index c2917b3..bba2c88 100644 (file)
@@ -27,8 +27,8 @@
 #define AR933X_UART_CS_PARITY_S                0
 #define AR933X_UART_CS_PARITY_M                0x3
 #define          AR933X_UART_CS_PARITY_NONE    0
-#define          AR933X_UART_CS_PARITY_ODD     1
-#define          AR933X_UART_CS_PARITY_EVEN    2
+#define          AR933X_UART_CS_PARITY_ODD     2
+#define          AR933X_UART_CS_PARITY_EVEN    3
 #define AR933X_UART_CS_IF_MODE_S       2
 #define AR933X_UART_CS_IF_MODE_M       0x3
 #define          AR933X_UART_CS_IF_MODE_NONE   0
index 5604db3..d79c68f 100644 (file)
@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
        for (i = 0; i < 8; i++) {
                nlm_msgsnd(dest);
                status = nlm_read_c2_status0();
-               if ((status & 0x2) == 1)
-                       pr_info("Send pending fail!\n");
                if ((status & 0x4) == 0)
                        return 0;
        }
index 03722d4..82852df 100644 (file)
@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
 extern cpumask_t cpu_core_map[];
 extern cpumask_t cpu_foreign_map;
 
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+       extern int vdso_smp_processor_id(void)
+               __compiletime_error("VDSO should not call smp_processor_id()");
+       return vdso_smp_processor_id();
+#else
+       return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
 
 /* Map from cpu id to sequential logical cpu number.  This will only
    not be idempotent when cpus failed to come on-line. */
index c5bc344..7303974 100644 (file)
@@ -31,7 +31,8 @@ void __init setup_pit_timer(void)
 
 static int __init init_pit_clocksource(void)
 {
-       if (num_possible_cpus() > 1) /* PIT does not scale! */
+       if (num_possible_cpus() > 1 || /* PIT does not scale! */
+           !clockevent_state_periodic(&i8253_clockevent))
                return 0;
 
        return clocksource_i8253_init();
index 2e7f60c..a7057a0 100644 (file)
@@ -160,8 +160,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
                        if (edge)
                                irq_set_handler(d->hwirq, handle_edge_irq);
 
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
-                               (val << (i * 4)), LTQ_EIU_EXIN_C);
+                       ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
+                                   (~(7 << (i * 4)))) | (val << (i * 4)),
+                                   LTQ_EIU_EXIN_C);
                }
        }
 
index 3ef3fb6..b3d6bf2 100644 (file)
@@ -1,5 +1,4 @@
 obj-y := cfe.o
-obj-$(CONFIG_SWIOTLB)                  += dma.o
 obj-$(CONFIG_SIBYTE_BUS_WATCHER)       += bus_watcher.o
 obj-$(CONFIG_SIBYTE_CFE_CONSOLE)       += cfe_console.o
 obj-$(CONFIG_SIBYTE_TBPROF)            += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
deleted file mode 100644 (file)
index eb47a94..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- *     DMA support for Broadcom SiByte platforms.
- *
- *     Copyright (c) 2018  Maciej W. Rozycki
- */
-
-#include <linux/swiotlb.h>
-#include <asm/bootinfo.h>
-
-void __init plat_swiotlb_setup(void)
-{
-       swiotlb_init(1);
-}
index 886005b..dfd082e 100644 (file)
@@ -6,7 +6,9 @@ ccflags-vdso := \
        $(filter -I%,$(KBUILD_CFLAGS)) \
        $(filter -E%,$(KBUILD_CFLAGS)) \
        $(filter -mmicromips,$(KBUILD_CFLAGS)) \
-       $(filter -march=%,$(KBUILD_CFLAGS))
+       $(filter -march=%,$(KBUILD_CFLAGS)) \
+       $(filter -m%-float,$(KBUILD_CFLAGS)) \
+       -D__VDSO__
 cflags-vdso := $(ccflags-vdso) \
        $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
        -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
index ce0b2b4..c62522b 100644 (file)
@@ -156,6 +156,9 @@ long arch_ptrace(struct task_struct *child, long request,
                if ((addr & (sizeof(unsigned long)-1)) ||
                     addr >= sizeof(struct pt_regs))
                        break;
+               if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
+                       data |= 3; /* ensure userspace privilege */
+               }
                if ((addr >= PT_GR1 && addr <= PT_GR31) ||
                                addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
                                (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
@@ -189,16 +192,18 @@ long arch_ptrace(struct task_struct *child, long request,
 
 static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
 {
-       if (offset < 0)
-               return sizeof(struct pt_regs);
-       else if (offset <= 32*4)        /* gr[0..31] */
-               return offset * 2 + 4;
-       else if (offset <= 32*4+32*8)   /* gr[0..31] + fr[0..31] */
-               return offset + 32*4;
-       else if (offset < sizeof(struct pt_regs)/2 + 32*4)
-               return offset * 2 + 4 - 32*8;
+       compat_ulong_t pos;
+
+       if (offset < 32*4)      /* gr[0..31] */
+               pos = offset * 2 + 4;
+       else if (offset < 32*4+32*8)    /* fr[0] ... fr[31] */
+               pos = (offset - 32*4) + PT_FR0;
+       else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
+               pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
        else
-               return sizeof(struct pt_regs);
+               pos = sizeof(struct pt_regs);
+
+       return pos;
 }
 
 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@@ -242,9 +247,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        addr = translate_usr_offset(addr);
                        if (addr >= sizeof(struct pt_regs))
                                break;
+                       if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
+                               data |= 3; /* ensure userspace privilege */
+                       }
                        if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
                                /* Special case, fp regs are 64 bits anyway */
-                               *(__u64 *) ((char *) task_regs(child) + addr) = data;
+                               *(__u32 *) ((char *) task_regs(child) + addr) = data;
                                ret = 0;
                        }
                        else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
index 6696c19..16193d7 100644 (file)
@@ -363,10 +363,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
                                           NULL, &hugepage_shift);
        if (!ptep)
                return token;
-       WARN_ON(hugepage_shift);
-       pa = pte_pfn(*ptep) << PAGE_SHIFT;
 
-       return pa | (token & (PAGE_SIZE-1));
+       pa = pte_pfn(*ptep);
+
+       /* On radix we can do hugepage mappings for io, so handle that */
+       if (hugepage_shift) {
+               pa <<= hugepage_shift;
+               pa |= token & ((1ul << hugepage_shift) - 1);
+       } else {
+               pa <<= PAGE_SHIFT;
+               pa |= token & (PAGE_SIZE - 1);
+       }
+
+       return pa;
 }
 
 /*
index 10e7cec..a44f175 100644 (file)
@@ -1719,7 +1719,7 @@ handle_page_fault:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_page_fault
        cmpdi   r3,0
-       beq+    12f
+       beq+    ret_from_except_lite
        bl      save_nvgprs
        mr      r5,r3
        addi    r3,r1,STACK_FRAME_OVERHEAD
@@ -1734,7 +1734,12 @@ handle_dabr_fault:
        ld      r5,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_break
-12:    b       ret_from_except_lite
+       /*
+        * do_break() may have changed the NV GPRS while handling a breakpoint.
+        * If so, we need to restore them with their updated values. Don't use
+        * ret_from_except_lite here.
+        */
+       b       ret_from_except
 
 
 /* We have a page fault that hash_page could handle but HV refused
index 2e710c1..a38d729 100644 (file)
@@ -45,6 +45,8 @@ static unsigned int pci_parse_of_flags(u32 addr0, int bridge)
        if (addr0 & 0x02000000) {
                flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
                flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+               if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+                       flags |= IORESOURCE_MEM_64;
                flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
                if (addr0 & 0x40000000)
                        flags |= IORESOURCE_PREFETCH
index ef7c24e..46f8292 100644 (file)
@@ -1261,6 +1261,9 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
                        goto bad;
 
                if (MSR_TM_ACTIVE(msr_hi<<32)) {
+                       /* Trying to start TM on non TM system */
+                       if (!cpu_has_feature(CPU_FTR_TM))
+                               goto bad;
                        /* We only recheckpoint on return if we're
                         * transaction.
                         */
index c676ece..8be659d 100644 (file)
@@ -695,6 +695,11 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
        if (MSR_TM_ACTIVE(msr)) {
                /* We recheckpoint on return. */
                struct ucontext __user *uc_transact;
+
+               /* Trying to start TM on non TM system */
+               if (!cpu_has_feature(CPU_FTR_TM))
+                       goto badframe;
+
                if (__get_user(uc_transact, &uc->uc_link))
                        goto badframe;
                if (restore_tm_sigcontexts(regs, &uc->uc_mcontext,
index ba4dee3..884d1c3 100644 (file)
 #define SL_IBAT2       0x48
 #define SL_DBAT3       0x50
 #define SL_IBAT3       0x58
-#define SL_TB          0x60
-#define SL_R2          0x68
-#define SL_CR          0x6c
-#define SL_LR          0x70
-#define SL_R12         0x74    /* r12 to r31 */
+#define SL_DBAT4       0x60
+#define SL_IBAT4       0x68
+#define SL_DBAT5       0x70
+#define SL_IBAT5       0x78
+#define SL_DBAT6       0x80
+#define SL_IBAT6       0x88
+#define SL_DBAT7       0x90
+#define SL_IBAT7       0x98
+#define SL_TB          0xa0
+#define SL_R2          0xa8
+#define SL_CR          0xac
+#define SL_LR          0xb0
+#define SL_R12         0xb4    /* r12 to r31 */
 #define SL_SIZE                (SL_R12 + 80)
 
        .section .data
@@ -112,6 +120,41 @@ _GLOBAL(swsusp_arch_suspend)
        mfibatl r4,3
        stw     r4,SL_IBAT3+4(r11)
 
+BEGIN_MMU_FTR_SECTION
+       mfspr   r4,SPRN_DBAT4U
+       stw     r4,SL_DBAT4(r11)
+       mfspr   r4,SPRN_DBAT4L
+       stw     r4,SL_DBAT4+4(r11)
+       mfspr   r4,SPRN_DBAT5U
+       stw     r4,SL_DBAT5(r11)
+       mfspr   r4,SPRN_DBAT5L
+       stw     r4,SL_DBAT5+4(r11)
+       mfspr   r4,SPRN_DBAT6U
+       stw     r4,SL_DBAT6(r11)
+       mfspr   r4,SPRN_DBAT6L
+       stw     r4,SL_DBAT6+4(r11)
+       mfspr   r4,SPRN_DBAT7U
+       stw     r4,SL_DBAT7(r11)
+       mfspr   r4,SPRN_DBAT7L
+       stw     r4,SL_DBAT7+4(r11)
+       mfspr   r4,SPRN_IBAT4U
+       stw     r4,SL_IBAT4(r11)
+       mfspr   r4,SPRN_IBAT4L
+       stw     r4,SL_IBAT4+4(r11)
+       mfspr   r4,SPRN_IBAT5U
+       stw     r4,SL_IBAT5(r11)
+       mfspr   r4,SPRN_IBAT5L
+       stw     r4,SL_IBAT5+4(r11)
+       mfspr   r4,SPRN_IBAT6U
+       stw     r4,SL_IBAT6(r11)
+       mfspr   r4,SPRN_IBAT6L
+       stw     r4,SL_IBAT6+4(r11)
+       mfspr   r4,SPRN_IBAT7U
+       stw     r4,SL_IBAT7(r11)
+       mfspr   r4,SPRN_IBAT7L
+       stw     r4,SL_IBAT7+4(r11)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
 #if  0
        /* Backup various CPU config stuffs */
        bl      __save_cpu_setup
@@ -277,27 +320,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        mtibatu 3,r4
        lwz     r4,SL_IBAT3+4(r11)
        mtibatl 3,r4
-#endif
-
 BEGIN_MMU_FTR_SECTION
-       li      r4,0
+       lwz     r4,SL_DBAT4(r11)
        mtspr   SPRN_DBAT4U,r4
+       lwz     r4,SL_DBAT4+4(r11)
        mtspr   SPRN_DBAT4L,r4
+       lwz     r4,SL_DBAT5(r11)
        mtspr   SPRN_DBAT5U,r4
+       lwz     r4,SL_DBAT5+4(r11)
        mtspr   SPRN_DBAT5L,r4
+       lwz     r4,SL_DBAT6(r11)
        mtspr   SPRN_DBAT6U,r4
+       lwz     r4,SL_DBAT6+4(r11)
        mtspr   SPRN_DBAT6L,r4
+       lwz     r4,SL_DBAT7(r11)
        mtspr   SPRN_DBAT7U,r4
+       lwz     r4,SL_DBAT7+4(r11)
        mtspr   SPRN_DBAT7L,r4
+       lwz     r4,SL_IBAT4(r11)
        mtspr   SPRN_IBAT4U,r4
+       lwz     r4,SL_IBAT4+4(r11)
        mtspr   SPRN_IBAT4L,r4
+       lwz     r4,SL_IBAT5(r11)
        mtspr   SPRN_IBAT5U,r4
+       lwz     r4,SL_IBAT5+4(r11)
        mtspr   SPRN_IBAT5L,r4
+       lwz     r4,SL_IBAT6(r11)
        mtspr   SPRN_IBAT6U,r4
+       lwz     r4,SL_IBAT6+4(r11)
        mtspr   SPRN_IBAT6L,r4
+       lwz     r4,SL_IBAT7(r11)
        mtspr   SPRN_IBAT7U,r4
+       lwz     r4,SL_IBAT7+4(r11)
        mtspr   SPRN_IBAT7L,r4
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+#endif
 
        /* Flush all TLBs */
        lis     r4,0x1000
index 1c2802f..c856cd7 100644 (file)
 #define SL_IBAT2       0x48
 #define SL_DBAT3       0x50
 #define SL_IBAT3       0x58
-#define SL_TB          0x60
-#define SL_R2          0x68
-#define SL_CR          0x6c
-#define SL_R12         0x70    /* r12 to r31 */
+#define SL_DBAT4       0x60
+#define SL_IBAT4       0x68
+#define SL_DBAT5       0x70
+#define SL_IBAT5       0x78
+#define SL_DBAT6       0x80
+#define SL_IBAT6       0x88
+#define SL_DBAT7       0x90
+#define SL_IBAT7       0x98
+#define SL_TB          0xa0
+#define SL_R2          0xa8
+#define SL_CR          0xac
+#define SL_R12         0xb0    /* r12 to r31 */
 #define SL_SIZE                (SL_R12 + 80)
 
        .section .text
@@ -125,6 +133,41 @@ _GLOBAL(low_sleep_handler)
        mfibatl r4,3
        stw     r4,SL_IBAT3+4(r1)
 
+BEGIN_MMU_FTR_SECTION
+       mfspr   r4,SPRN_DBAT4U
+       stw     r4,SL_DBAT4(r1)
+       mfspr   r4,SPRN_DBAT4L
+       stw     r4,SL_DBAT4+4(r1)
+       mfspr   r4,SPRN_DBAT5U
+       stw     r4,SL_DBAT5(r1)
+       mfspr   r4,SPRN_DBAT5L
+       stw     r4,SL_DBAT5+4(r1)
+       mfspr   r4,SPRN_DBAT6U
+       stw     r4,SL_DBAT6(r1)
+       mfspr   r4,SPRN_DBAT6L
+       stw     r4,SL_DBAT6+4(r1)
+       mfspr   r4,SPRN_DBAT7U
+       stw     r4,SL_DBAT7(r1)
+       mfspr   r4,SPRN_DBAT7L
+       stw     r4,SL_DBAT7+4(r1)
+       mfspr   r4,SPRN_IBAT4U
+       stw     r4,SL_IBAT4(r1)
+       mfspr   r4,SPRN_IBAT4L
+       stw     r4,SL_IBAT4+4(r1)
+       mfspr   r4,SPRN_IBAT5U
+       stw     r4,SL_IBAT5(r1)
+       mfspr   r4,SPRN_IBAT5L
+       stw     r4,SL_IBAT5+4(r1)
+       mfspr   r4,SPRN_IBAT6U
+       stw     r4,SL_IBAT6(r1)
+       mfspr   r4,SPRN_IBAT6L
+       stw     r4,SL_IBAT6+4(r1)
+       mfspr   r4,SPRN_IBAT7U
+       stw     r4,SL_IBAT7(r1)
+       mfspr   r4,SPRN_IBAT7L
+       stw     r4,SL_IBAT7+4(r1)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
        /* Backup various CPU config stuffs */
        bl      __save_cpu_setup
 
@@ -325,22 +368,37 @@ grackle_wake_up:
        mtibatl 3,r4
 
 BEGIN_MMU_FTR_SECTION
-       li      r4,0
+       lwz     r4,SL_DBAT4(r1)
        mtspr   SPRN_DBAT4U,r4
+       lwz     r4,SL_DBAT4+4(r1)
        mtspr   SPRN_DBAT4L,r4
+       lwz     r4,SL_DBAT5(r1)
        mtspr   SPRN_DBAT5U,r4
+       lwz     r4,SL_DBAT5+4(r1)
        mtspr   SPRN_DBAT5L,r4
+       lwz     r4,SL_DBAT6(r1)
        mtspr   SPRN_DBAT6U,r4
+       lwz     r4,SL_DBAT6+4(r1)
        mtspr   SPRN_DBAT6L,r4
+       lwz     r4,SL_DBAT7(r1)
        mtspr   SPRN_DBAT7U,r4
+       lwz     r4,SL_DBAT7+4(r1)
        mtspr   SPRN_DBAT7L,r4
+       lwz     r4,SL_IBAT4(r1)
        mtspr   SPRN_IBAT4U,r4
+       lwz     r4,SL_IBAT4+4(r1)
        mtspr   SPRN_IBAT4L,r4
+       lwz     r4,SL_IBAT5(r1)
        mtspr   SPRN_IBAT5U,r4
+       lwz     r4,SL_IBAT5+4(r1)
        mtspr   SPRN_IBAT5L,r4
+       lwz     r4,SL_IBAT6(r1)
        mtspr   SPRN_IBAT6U,r4
+       lwz     r4,SL_IBAT6+4(r1)
        mtspr   SPRN_IBAT6L,r4
+       lwz     r4,SL_IBAT7(r1)
        mtspr   SPRN_IBAT7U,r4
+       lwz     r4,SL_IBAT7+4(r1)
        mtspr   SPRN_IBAT7L,r4
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
 
index 6893d8f..225346d 100644 (file)
@@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 
        mtdcr(uic->dcrbase + UIC_PR, pr);
        mtdcr(uic->dcrbase + UIC_TR, tr);
+       mtdcr(uic->dcrbase + UIC_SR, ~mask);
 
        raw_spin_unlock_irqrestore(&uic->lock, flags);
 
index 6a75352..950b0c0 100644 (file)
@@ -1487,6 +1487,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
        case KVM_S390_MCHK:
                irq->u.mchk.mcic = s390int->parm64;
                break;
+       case KVM_S390_INT_PFAULT_INIT:
+               irq->u.ext.ext_params = s390int->parm;
+               irq->u.ext.ext_params2 = s390int->parm64;
+               break;
+       case KVM_S390_RESTART:
+       case KVM_S390_INT_CLOCK_COMP:
+       case KVM_S390_INT_CPU_TIMER:
+               break;
+       default:
+               return -EINVAL;
        }
        return 0;
 }
index 23911ec..14d2ca9 100644 (file)
@@ -2541,7 +2541,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        }
        case KVM_S390_INTERRUPT: {
                struct kvm_s390_interrupt s390int;
-               struct kvm_s390_irq s390irq;
+               struct kvm_s390_irq s390irq = {};
 
                r = -EFAULT;
                if (copy_from_user(&s390int, argp, sizeof(s390int)))
index 727693e..bcf4099 100644 (file)
@@ -886,7 +886,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                break;
        case BPF_ALU64 | BPF_NEG: /* dst = -dst */
                /* lcgr %dst,%dst */
-               EMIT4(0xb9130000, dst_reg, dst_reg);
+               EMIT4(0xb9030000, dst_reg, dst_reg);
                break;
        /*
         * BPF_FROM_BE/LE
@@ -1067,8 +1067,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                /* llgf %w1,map.max_entries(%b2) */
                EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
                              offsetof(struct bpf_array, map.max_entries));
-               /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
-               EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+               /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+               EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
                                  REG_W1, 0, 0xa);
 
                /*
@@ -1094,8 +1094,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                 *         goto out;
                 */
 
-               /* sllg %r1,%b3,3: %r1 = index * 8 */
-               EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+               /* llgfr %r1,%b3: %r1 = (u32) index */
+               EMIT4(0xb9160000, REG_1, BPF_REG_3);
+               /* sllg %r1,%r1,3: %r1 *= 8 */
+               EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
                /* lg %r1,prog(%b2,%r1) */
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
                              REG_1, offsetof(struct bpf_array, ptrs));
index 3280a6b..b2592c3 100644 (file)
@@ -370,7 +370,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
 
 #define ioremap_nocache        ioremap
 #define ioremap_uc     ioremap
-#define iounmap                __iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+       __iounmap(addr);
+}
 
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
index 2197fc5..000cc33 100644 (file)
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        switch (sh_type) {
        case SH_BREAKPOINT_READ:
                *gen_type = HW_BREAKPOINT_R;
+               break;
        case SH_BREAKPOINT_WRITE:
                *gen_type = HW_BREAKPOINT_W;
                break;
index 941527e..f618f45 100644 (file)
@@ -42,7 +42,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
         * when the new ->mm is used for the first time.
         */
        __switch_mm(&new->context.id);
-       down_write(&new->mmap_sem);
+       down_write_nested(&new->mmap_sem, 1);
        uml_setup_stubs(new);
        up_write(&new->mmap_sem);
 }
index 063de64..0a3081d 100644 (file)
@@ -38,6 +38,7 @@ REALMODE_CFLAGS       := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
 
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
 export REALMODE_CFLAGS
 
index 6862464..5993813 100644 (file)
@@ -33,6 +33,7 @@ KBUILD_CFLAGS += $(cflags-y)
 KBUILD_CFLAGS += -mno-mmx -mno-sse
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
index 16df89c..1e5b682 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "misc.h"
 #include "../string.h"
+#include <asm/bootparam_utils.h>
 
 /* WARNING!!
  * This code is compiled with -fPIC and it is relocated dynamically
index 4abb284..bce1827 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/page.h>
 #include <asm/boot.h>
 #include <asm/bootparam.h>
-#include <asm/bootparam_utils.h>
 
 #define BOOT_BOOT_H
 #include "../ctype.h"
index 4c80a8b..deaecb9 100644 (file)
@@ -382,6 +382,7 @@ CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TEST=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_INPUT=y
index 3c71dd9..5e24cd2 100644 (file)
@@ -1,3 +1,5 @@
+#include <asm/cpufeatures.h>
+
 /*
 
  x86 function call convention, 64-bit:
@@ -199,6 +201,23 @@ For 32-bit we have the following conventions - kernel is built with
        .byte 0xf1
        .endm
 
+/*
+ * Mitigate Spectre v1 for conditional swapgs code paths.
+ *
+ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
+ * prevent a speculative swapgs when coming from kernel space.
+ *
+ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
+ * to prevent the swapgs from getting speculatively skipped when coming from
+ * user space.
+ */
+.macro FENCE_SWAPGS_USER_ENTRY
+       ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
+.endm
+.macro FENCE_SWAPGS_KERNEL_ENTRY
+       ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
+.endm
+
 #else /* CONFIG_X86_64 */
 
 /*
index 4ada644..30830cd 100644 (file)
@@ -578,6 +578,7 @@ END(irq_entries_start)
         * tracking that we're in kernel mode.
         */
        SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
        SWITCH_KERNEL_CR3
 
        /*
@@ -593,8 +594,10 @@ END(irq_entries_start)
 #ifdef CONFIG_CONTEXT_TRACKING
        call enter_from_user_mode
 #endif
-
+       jmp     2f
 1:
+       FENCE_SWAPGS_KERNEL_ENTRY
+2:
        /*
         * Save previous stack pointer, optionally switch to interrupt stack.
         * irq_count is used to check if a CPU is already on an interrupt stack
@@ -1110,6 +1113,13 @@ ENTRY(paranoid_entry)
        movq    %rax, %cr3
 2:
 #endif
+       /*
+        * The above doesn't do an unconditional CR3 write, even in the PTI
+        * case.  So do an lfence to prevent GS speculation, regardless of
+        * whether PTI is enabled.
+        */
+       FENCE_SWAPGS_KERNEL_ENTRY
+
        ret
 END(paranoid_entry)
 
@@ -1166,12 +1176,12 @@ ENTRY(error_entry)
        testb   $3, CS+8(%rsp)
        jz      .Lerror_kernelspace
 
-.Lerror_entry_from_usermode_swapgs:
        /*
         * We entered from user mode or we're pretending to have entered
         * from user mode due to an IRET fault.
         */
        SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
 
 .Lerror_entry_from_usermode_after_swapgs:
        /*
@@ -1185,6 +1195,8 @@ ENTRY(error_entry)
 #endif
        ret
 
+.Lerror_entry_done_lfence:
+       FENCE_SWAPGS_KERNEL_ENTRY
 .Lerror_entry_done:
        TRACE_IRQS_OFF
        ret
@@ -1203,14 +1215,16 @@ ENTRY(error_entry)
        cmpq    %rax, RIP+8(%rsp)
        je      .Lbstep_iret
        cmpq    $gs_change, RIP+8(%rsp)
-       jne     .Lerror_entry_done
+       jne     .Lerror_entry_done_lfence
 
        /*
         * hack: gs_change can fail with user gsbase.  If this happens, fix up
         * gsbase and proceed.  We'll fix up the exception and land in
         * gs_change's error handler with kernel gsbase.
         */
-       jmp     .Lerror_entry_from_usermode_swapgs
+       SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
+       jmp .Lerror_entry_done
 
 .Lbstep_iret:
        /* Fix truncated RIP */
@@ -1223,6 +1237,7 @@ ENTRY(error_entry)
         * Switch to kernel gsbase:
         */
        SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
 
        /*
         * Pretend that the exception came from user mode: set up pt_regs
@@ -1319,6 +1334,7 @@ ENTRY(nmi)
         * to switch CR3 here.
         */
        cld
+       FENCE_SWAPGS_USER_ENTRY
        movq    %rsp, %rdx
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
        pushq   5*8(%rdx)       /* pt_regs->ss */
@@ -1607,6 +1623,7 @@ end_repeat_nmi:
        movq    %rax, %cr3
 2:
 #endif
+       FENCE_SWAPGS_KERNEL_ENTRY
 
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
        call    do_nmi
index 049327e..6badfe4 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <uapi/linux/time.h>
 #include <asm/vgtod.h>
-#include <asm/hpet.h>
 #include <asm/vvar.h>
 #include <asm/unistd.h>
 #include <asm/msr.h>
@@ -26,16 +25,6 @@ extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
 extern time_t __vdso_time(time_t *t);
 
-#ifdef CONFIG_HPET_TIMER
-extern u8 hpet_page
-       __attribute__((visibility("hidden")));
-
-static notrace cycle_t vread_hpet(void)
-{
-       return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
-}
-#endif
-
 #ifdef CONFIG_PARAVIRT_CLOCK
 extern u8 pvclock_page
        __attribute__((visibility("hidden")));
@@ -209,10 +198,6 @@ notrace static inline u64 vgetsns(int *mode)
 
        if (gtod->vclock_mode == VCLOCK_TSC)
                cycles = vread_tsc();
-#ifdef CONFIG_HPET_TIMER
-       else if (gtod->vclock_mode == VCLOCK_HPET)
-               cycles = vread_hpet();
-#endif
 #ifdef CONFIG_PARAVIRT_CLOCK
        else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
                cycles = vread_pvclock(mode);
index 4158acc..a708aa9 100644 (file)
@@ -25,7 +25,7 @@ SECTIONS
         * segment.
         */
 
-       vvar_start = . - 3 * PAGE_SIZE;
+       vvar_start = . - 2 * PAGE_SIZE;
        vvar_page = vvar_start;
 
        /* Place all vvars at the offsets in asm/vvar.h. */
@@ -35,8 +35,7 @@ SECTIONS
 #undef __VVAR_KERNEL_LDS
 #undef EMIT_VVAR
 
-       hpet_page = vvar_start + PAGE_SIZE;
-       pvclock_page = vvar_start + 2 * PAGE_SIZE;
+       pvclock_page = vvar_start + PAGE_SIZE;
 
        . = SIZEOF_HEADERS;
 
index fd810a5..3328a37 100644 (file)
@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
-extern unsigned int apic_verbosity;
+extern int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
index 4a8cb8d..588d8fb 100644 (file)
  * Note: efi_info is commonly left uninitialized, but that field has a
  * private magic, so it is better to leave it unchanged.
  */
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member)                             \
+       {                                                               \
+               .start = offsetof(struct boot_params, struct_member),   \
+               .len   = sizeof_mbr(struct boot_params, struct_member), \
+       }
+
+struct boot_params_to_save {
+       unsigned int start;
+       unsigned int len;
+};
+
 static void sanitize_boot_params(struct boot_params *boot_params)
 {
        /* 
@@ -35,19 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
-               memset(&boot_params->ext_ramdisk_image, 0,
-                      (char *)&boot_params->efi_info -
-                       (char *)&boot_params->ext_ramdisk_image);
-               memset(&boot_params->kbd_status, 0,
-                      (char *)&boot_params->hdr -
-                      (char *)&boot_params->kbd_status);
-               memset(&boot_params->_pad7[0], 0,
-                      (char *)&boot_params->edd_mbr_sig_buffer[0] -
-                       (char *)&boot_params->_pad7[0]);
-               memset(&boot_params->_pad8[0], 0,
-                      (char *)&boot_params->eddbuf[0] -
-                       (char *)&boot_params->_pad8[0]);
-               memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+               static struct boot_params scratch;
+               char *bp_base = (char *)boot_params;
+               char *save_base = (char *)&scratch;
+               int i;
+
+               const struct boot_params_to_save to_save[] = {
+                       BOOT_PARAM_PRESERVE(screen_info),
+                       BOOT_PARAM_PRESERVE(apm_bios_info),
+                       BOOT_PARAM_PRESERVE(tboot_addr),
+                       BOOT_PARAM_PRESERVE(ist_info),
+                       BOOT_PARAM_PRESERVE(hd0_info),
+                       BOOT_PARAM_PRESERVE(hd1_info),
+                       BOOT_PARAM_PRESERVE(sys_desc_table),
+                       BOOT_PARAM_PRESERVE(olpc_ofw_header),
+                       BOOT_PARAM_PRESERVE(efi_info),
+                       BOOT_PARAM_PRESERVE(alt_mem_k),
+                       BOOT_PARAM_PRESERVE(scratch),
+                       BOOT_PARAM_PRESERVE(e820_entries),
+                       BOOT_PARAM_PRESERVE(eddbuf_entries),
+                       BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+                       BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+                       BOOT_PARAM_PRESERVE(hdr),
+                       BOOT_PARAM_PRESERVE(e820_map),
+                       BOOT_PARAM_PRESERVE(eddbuf),
+               };
+
+               memset(&scratch, 0, sizeof(scratch));
+
+               for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+                       memcpy(save_base + to_save[i].start,
+                              bp_base + to_save[i].start, to_save[i].len);
+               }
+
+               memcpy(boot_params, save_base, sizeof(*boot_params));
        }
 }
 
index eda81dc..c843fed 100644 (file)
@@ -5,8 +5,7 @@
 
 #define VCLOCK_NONE 0  /* No vDSO clock available.     */
 #define VCLOCK_TSC  1  /* vDSO should use vread_tsc.   */
-#define VCLOCK_HPET 2  /* vDSO should use vread_hpet.  */
-#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
+#define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */
 
 struct arch_clocksource_data {
        int vclock_mode;
index d9f7d17..113cb01 100644 (file)
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
+#define X86_FEATURE_FENCE_SWAPGS_USER  ( 7*32+10) /* "" LFENCE in user entry SWAPGS path */
+#define X86_FEATURE_FENCE_SWAPGS_KERNEL        ( 7*32+11) /* "" LFENCE in kernel entry SWAPGS path */
+
 #define X86_FEATURE_RETPOLINE  ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_RSB_CTXSW  ( 7*32+19) /* "" Fill RSB on context switches */
-
 #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
 #define X86_FEATURE_SSBD       ( 7*32+17) /* Speculative Store Bypass Disable */
 
-/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
-#define X86_FEATURE_KAISER     ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+#define X86_FEATURE_RSB_CTXSW  ( 7*32+19) /* "" Fill RSB on context switches */
 
 #define X86_FEATURE_USE_IBPB   ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
 #define X86_FEATURE_USE_IBRS_FW        ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_ZEN                ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
 #define X86_FEATURE_L1TF_PTEINV        ( 7*32+29) /* "" L1TF workaround PTE inversion */
 #define X86_FEATURE_IBRS_ENHANCED      ( 7*32+30) /* Enhanced IBRS */
+#define X86_FEATURE_KAISER     ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_BUG_L1TF           X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
 #define X86_BUG_MDS            X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
 #define X86_BUG_MSBDS_ONLY     X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS         X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 2cb49ac..39f2024 100644 (file)
@@ -1184,25 +1184,29 @@ enum {
 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
 
+asmlinkage void __noreturn kvm_spurious_fault(void);
+
 /*
  * Hardware virtualization extension instructions may fault if a
  * reboot turns off virtualization while processes are running.
- * Trap the fault and ignore the instruction if that happens.
+ * Usually after catching the fault we just panic; during reboot
+ * instead the instruction is ignored.
  */
-asmlinkage void kvm_spurious_fault(void);
-
-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)     \
-       "666: " insn "\n\t" \
-       "668: \n\t"                           \
-       ".pushsection .fixup, \"ax\" \n" \
-       "667: \n\t" \
-       cleanup_insn "\n\t"                   \
-       "cmpb $0, kvm_rebooting \n\t"         \
-       "jne 668b \n\t"                       \
-       __ASM_SIZE(push) " $666b \n\t"        \
-       "jmp kvm_spurious_fault \n\t"         \
-       ".popsection \n\t" \
-       _ASM_EXTABLE(666b, 667b)
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)             \
+       "666: \n\t"                                                     \
+       insn "\n\t"                                                     \
+       "jmp    668f \n\t"                                              \
+       "667: \n\t"                                                     \
+       "call   kvm_spurious_fault \n\t"                                \
+       "668: \n\t"                                                     \
+       ".pushsection .fixup, \"ax\" \n\t"                              \
+       "700: \n\t"                                                     \
+       cleanup_insn "\n\t"                                             \
+       "cmpb   $0, kvm_rebooting\n\t"                                  \
+       "je     667b \n\t"                                              \
+       "jmp    668b \n\t"                                              \
+       ".popsection \n\t"                                              \
+       _ASM_EXTABLE(666b, 700b)
 
 #define __kvm_handle_fault_on_reboot(insn)             \
        ____kvm_handle_fault_on_reboot(insn, "")
index d4f5b82..3018377 100644 (file)
 #define MSR_AMD64_PATCH_LEVEL          0x0000008b
 #define MSR_AMD64_TSC_RATIO            0xc0000104
 #define MSR_AMD64_NB_CFG               0xc001001f
+#define MSR_AMD64_CPUID_FN_1           0xc0011004
 #define MSR_AMD64_PATCH_LOADER         0xc0010020
 #define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
 #define MSR_AMD64_OSVW_STATUS          0xc0010141
index 5a10ac8..20f822f 100644 (file)
@@ -32,6 +32,16 @@ struct msr_regs_info {
        int err;
 };
 
+struct saved_msr {
+       bool valid;
+       struct msr_info info;
+};
+
+struct saved_msrs {
+       unsigned int num;
+       struct saved_msr *array;
+};
+
 static inline unsigned long long native_read_tscp(unsigned int *aux)
 {
        unsigned long low, high;
index e58c078..c3138ac 100644 (file)
        "       lfence;\n"                                      \
        "       jmp    902b;\n"                                 \
        "       .align 16\n"                                    \
-       "903:   addl   $4, %%esp;\n"                            \
+       "903:   lea    4(%%esp), %%esp;\n"                      \
        "       pushl  %[thunk_target];\n"                      \
        "       ret;\n"                                         \
        "       .align 16\n"                                    \
index 25479a1..1f95439 100644 (file)
@@ -117,9 +117,9 @@ static inline int v8086_mode(struct pt_regs *regs)
 #endif
 }
 
-#ifdef CONFIG_X86_64
 static inline bool user_64bit_mode(struct pt_regs *regs)
 {
+#ifdef CONFIG_X86_64
 #ifndef CONFIG_PARAVIRT
        /*
         * On non-paravirt systems, this is the only long mode CPL 3
@@ -130,8 +130,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
        /* Headers are too twisted for this to go in paravirt.h. */
        return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
 #endif
+#else /* !CONFIG_X86_64 */
+       return false;
+#endif
 }
 
+#ifdef CONFIG_X86_64
 #define current_user_stack_pointer()   current_pt_regs()->sp
 #define compat_user_stack_pointer()    current_pt_regs()->sp
 #endif
index d1793f0..8e9dbe7 100644 (file)
@@ -15,6 +15,7 @@ struct saved_context {
        unsigned long cr0, cr2, cr3, cr4;
        u64 misc_enable;
        bool misc_enable_saved;
+       struct saved_msrs saved_msrs;
        struct desc_ptr gdt_desc;
        struct desc_ptr idt;
        u16 ldt;
index 7ebf0eb..6136a18 100644 (file)
@@ -24,6 +24,7 @@ struct saved_context {
        unsigned long cr0, cr2, cr3, cr4, cr8;
        u64 misc_enable;
        bool misc_enable_saved;
+       struct saved_msrs saved_msrs;
        unsigned long efer;
        u16 gdt_pad; /* Unused */
        struct desc_ptr gdt_desc;
index deddc9b..834d1b5 100644 (file)
@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
 /*
  * Debug level, exported for io_apic.c
  */
-unsigned int apic_verbosity;
+int apic_verbosity;
 
 int pic_mode;
 
@@ -593,7 +593,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
 
 /*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
  */
 static void __init lapic_cal_handler(struct clock_event_device *dev)
 {
@@ -677,7 +677,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
 static int __init calibrate_APIC_clock(void)
 {
        struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
-       void (*real_handler)(struct clock_event_device *dev);
+       u64 tsc_perj = 0, tsc_start = 0;
+       unsigned long jif_start;
        unsigned long deltaj;
        long delta, deltatsc;
        int pm_referenced = 0;
@@ -706,28 +707,64 @@ static int __init calibrate_APIC_clock(void)
        apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
                    "calibrating APIC timer ...\n");
 
+       /*
+        * There are platforms w/o global clockevent devices. Instead of
+        * making the calibration conditional on that, use a polling based
+        * approach everywhere.
+        */
        local_irq_disable();
 
-       /* Replace the global interrupt handler */
-       real_handler = global_clock_event->event_handler;
-       global_clock_event->event_handler = lapic_cal_handler;
-
        /*
         * Setup the APIC counter to maximum. There is no way the lapic
         * can underflow in the 100ms detection time frame
         */
        __setup_APIC_LVTT(0xffffffff, 0, 0);
 
-       /* Let the interrupts run */
+       /*
+        * Methods to terminate the calibration loop:
+        *  1) Global clockevent if available (jiffies)
+        *  2) TSC if available and frequency is known
+        */
+       jif_start = READ_ONCE(jiffies);
+
+       if (tsc_khz) {
+               tsc_start = rdtsc();
+               tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+       }
+
+       /*
+        * Enable interrupts so the tick can fire, if a global
+        * clockevent device is available
+        */
        local_irq_enable();
 
-       while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
-               cpu_relax();
+       while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+               /* Wait for a tick to elapse */
+               while (1) {
+                       if (tsc_khz) {
+                               u64 tsc_now = rdtsc();
+                               if ((tsc_now - tsc_start) >= tsc_perj) {
+                                       tsc_start += tsc_perj;
+                                       break;
+                               }
+                       } else {
+                               unsigned long jif_now = READ_ONCE(jiffies);
 
-       local_irq_disable();
+                               if (time_after(jif_now, jif_start)) {
+                                       jif_start = jif_now;
+                                       break;
+                               }
+                       }
+                       cpu_relax();
+               }
 
-       /* Restore the real event handler */
-       global_clock_event->event_handler = real_handler;
+               /* Invoke the calibration routine */
+               local_irq_disable();
+               lapic_cal_handler(NULL);
+               local_irq_enable();
+       }
+
+       local_irq_disable();
 
        /* Build delta t1-t2 as apic timer counts down */
        delta = lapic_cal_t1 - lapic_cal_t2;
@@ -778,10 +815,11 @@ static int __init calibrate_APIC_clock(void)
        levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
 
        /*
-        * PM timer calibration failed or not turned on
-        * so lets try APIC timer based calibration
+        * PM timer calibration failed or not turned on so lets try APIC
+        * timer based calibration, if a global clockevent device is
+        * available.
         */
-       if (!pm_referenced) {
+       if (!pm_referenced && global_clock_event) {
                apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
 
                /*
index 971cf88..d75f665 100644 (file)
@@ -37,32 +37,12 @@ static int bigsmp_early_logical_apicid(int cpu)
        return early_per_cpu(x86_cpu_to_apicid, cpu);
 }
 
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long val, id;
-
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       id = per_cpu(x86_bios_cpu_apicid, cpu);
-       val |= SET_APIC_LOGICAL_ID(id);
-
-       return val;
-}
-
 /*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
+ * bigsmp enables physical destination mode
+ * and doesn't use LDR and DFR
  */
 static void bigsmp_init_apic_ldr(void)
 {
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_FLAT);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
 }
 
 static void bigsmp_setup_apic_routing(void)
index fd94509..4d5e8ff 100644 (file)
@@ -2344,7 +2344,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
         * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
         * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
         */
-       return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+       if (!ioapic_initialized)
+               return gsi_top;
+       /*
+        * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+        * updated. So simply return @from if ioapic_dynirq_base == 0.
+        */
+       return ioapic_dynirq_base ? : from;
 }
 
 #ifdef CONFIG_X86_32
index 6f24832..424d8a6 100644 (file)
@@ -684,6 +684,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
        msr_set_bit(MSR_AMD64_DE_CFG, 31);
 }
 
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "force"))
+               rdrand_force = true;
+       else
+               return -EINVAL;
+
+       return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+       /*
+        * Saving of the MSR used to hide the RDRAND support during
+        * suspend/resume is done by arch/x86/power/cpu.c, which is
+        * dependent on CONFIG_PM_SLEEP.
+        */
+       if (!IS_ENABLED(CONFIG_PM_SLEEP))
+               return;
+
+       /*
+        * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+        * RDRAND support using the CPUID function directly.
+        */
+       if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+               return;
+
+       msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+       /*
+        * Verify that the CPUID change has occurred in case the kernel is
+        * running virtualized and the hypervisor doesn't support the MSR.
+        */
+       if (cpuid_ecx(1) & BIT(30)) {
+               pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+               return;
+       }
+
+       clear_cpu_cap(c, X86_FEATURE_RDRAND);
+       pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+       /*
+        * Some BIOS implementations do not restore proper RDRAND support
+        * across suspend and resume. Check on whether to hide the RDRAND
+        * instruction support via CPUID.
+        */
+       clear_rdrand_cpuid_bit(c);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
        u64 value;
@@ -711,6 +769,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
                        wrmsrl_safe(0xc0011021, value);
                }
        }
+
+       /*
+        * Some BIOS implementations do not restore proper RDRAND support
+        * across suspend and resume. Check on whether to hide the RDRAND
+        * instruction support via CPUID.
+        */
+       clear_rdrand_cpuid_bit(c);
 }
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -755,6 +820,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x10: init_amd_gh(c); break;
        case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
+       case 0x16: init_amd_jg(c); break;
        case 0x17: init_amd_zn(c); break;
        }
 
index 310e279..917c63a 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/intel-family.h>
 #include <asm/e820.h>
 
+static void __init spectre_v1_select_mitigation(void);
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
@@ -87,17 +88,11 @@ void __init check_bugs(void)
        if (boot_cpu_has(X86_FEATURE_STIBP))
                x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
 
-       /* Select the proper spectre mitigation before patching alternatives */
+       /* Select the proper CPU mitigations before patching alternatives: */
+       spectre_v1_select_mitigation();
        spectre_v2_select_mitigation();
-
-       /*
-        * Select proper mitigation for any exposure to the Speculative Store
-        * Bypass vulnerability.
-        */
        ssb_select_mitigation();
-
        l1tf_select_mitigation();
-
        mds_select_mitigation();
 
        arch_smt_update();
@@ -252,6 +247,98 @@ static int __init mds_cmdline(char *str)
 early_param("mds", mds_cmdline);
 
 #undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V1 : " fmt
+
+enum spectre_v1_mitigation {
+       SPECTRE_V1_MITIGATION_NONE,
+       SPECTRE_V1_MITIGATION_AUTO,
+};
+
+static enum spectre_v1_mitigation spectre_v1_mitigation =
+       SPECTRE_V1_MITIGATION_AUTO;
+
+static const char * const spectre_v1_strings[] = {
+       [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
+       [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
+};
+
+/*
+ * Does SMAP provide full mitigation against speculative kernel access to
+ * userspace?
+ */
+static bool smap_works_speculatively(void)
+{
+       if (!boot_cpu_has(X86_FEATURE_SMAP))
+               return false;
+
+       /*
+        * On CPUs which are vulnerable to Meltdown, SMAP does not
+        * prevent speculative access to user data in the L1 cache.
+        * Consider SMAP to be non-functional as a mitigation on these
+        * CPUs.
+        */
+       if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
+               return false;
+
+       return true;
+}
+
+static void __init spectre_v1_select_mitigation(void)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+               spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+               return;
+       }
+
+       if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
+               /*
+                * With Spectre v1, a user can speculatively control either
+                * path of a conditional swapgs with a user-controlled GS
+                * value.  The mitigation is to add lfences to both code paths.
+                *
+                * If FSGSBASE is enabled, the user can put a kernel address in
+                * GS, in which case SMAP provides no protection.
+                *
+                * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
+                *         FSGSBASE enablement patches have been merged. ]
+                *
+                * If FSGSBASE is disabled, the user can only put a user space
+                * address in GS.  That makes an attack harder, but still
+                * possible if there's no SMAP protection.
+                */
+               if (!smap_works_speculatively()) {
+                       /*
+                        * Mitigation can be provided from SWAPGS itself or
+                        * PTI as the CR3 write in the Meltdown mitigation
+                        * is serializing.
+                        *
+                        * If neither is there, mitigate with an LFENCE to
+                        * stop speculation through swapgs.
+                        */
+                       if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
+                           !boot_cpu_has(X86_FEATURE_KAISER))
+                               setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
+
+                       /*
+                        * Enable lfences in the kernel entry (non-swapgs)
+                        * paths, to prevent user entry from speculatively
+                        * skipping swapgs.
+                        */
+                       setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
+               }
+       }
+
+       pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+}
+
+static int __init nospectre_v1_cmdline(char *str)
+{
+       spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+       return 0;
+}
+early_param("nospectre_v1", nospectre_v1_cmdline);
+
+#undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
@@ -1094,7 +1181,7 @@ static void __init l1tf_select_mitigation(void)
 static ssize_t mds_show_state(char *buf)
 {
 #ifdef CONFIG_HYPERVISOR_GUEST
-       if (x86_hyper) {
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
                return sprintf(buf, "%s; SMT Host state unknown\n",
                               mds_strings[mds_mitigation]);
        }
@@ -1154,7 +1241,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                break;
 
        case X86_BUG_SPECTRE_V1:
-               return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+               return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 
        case X86_BUG_SPECTRE_V2:
                return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
index 4bce77b..3965235 100644 (file)
@@ -853,6 +853,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define NO_L1TF                BIT(3)
 #define NO_MDS         BIT(4)
 #define MSBDS_ONLY     BIT(5)
+#define NO_SWAPGS      BIT(6)
 
 #define VULNWL(_vendor, _family, _model, _whitelist)   \
        { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -876,29 +877,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
        VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
 
-       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY),
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
 
        VULNWL_INTEL(CORE_YONAH,                NO_SSB),
 
-       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY),
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
 
-       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF),
-       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF),
-       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS),
+
+       /*
+        * Technically, swapgs isn't serializing on AMD (despite it previously
+        * being documented as such in the APM).  But according to AMD, %gs is
+        * updated non-speculatively, and the issuing of %gs-relative memory
+        * operands will be blocked until the %gs update completes, which is
+        * good enough for our purposes.
+        */
 
        /* AMD Family 0xf - 0x12 */
-       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
 
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
-       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS),
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
        {}
 };
 
@@ -935,6 +944,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
        }
 
+       if (!cpu_matches(NO_SWAPGS))
+               setup_force_cpu_bug(X86_BUG_SWAPGS);
+
        if (cpu_matches(NO_MELTDOWN))
                return;
 
index 6988c74..711b74e 100644 (file)
@@ -3,6 +3,8 @@
 # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
 #
 
+set -e
+
 IN=$1
 OUT=$2
 
index 9cce550..e8d2033 100644 (file)
@@ -774,7 +774,6 @@ static struct clocksource clocksource_hpet = {
        .mask           = HPET_MASK,
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        .resume         = hpet_resume_counter,
-       .archdata       = { .vclock_mode = VCLOCK_HPET },
 };
 
 static int hpet_clocksource_register(void)
index 1ca9297..0b6d27d 100644 (file)
@@ -698,11 +698,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
 {
        struct thread_struct *thread = &tsk->thread;
        unsigned long val = 0;
-       int index = n;
 
        if (n < HBP_NUM) {
+               int index = array_index_nospec(n, HBP_NUM);
                struct perf_event *bp = thread->ptrace_bps[index];
-               index = array_index_nospec(index, HBP_NUM);
 
                if (bp)
                        val = bp->hw.info.address;
index 5da924b..7cd6101 100644 (file)
@@ -216,9 +216,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
        {},
 };
 
+/*
+ * Some devices have a portrait LCD but advertise a landscape resolution (and
+ * pitch). We simply swap width and height for these devices so that we can
+ * correctly deal with some of them coming with multiple resolutions.
+ */
+static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+       {
+               /*
+                * Lenovo MIIX310-10ICR, only some batches have the troublesome
+                * 800x1280 portrait screen. Luckily the portrait version has
+                * its own BIOS version, so we match on that.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+                       DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
+               },
+       },
+       {
+               /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+                                       "Lenovo MIIX 320-10ICR"),
+               },
+       },
+       {
+               /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+                                       "Lenovo ideapad D330-10IGM"),
+               },
+       },
+       {},
+};
+
 __init void sysfb_apply_efi_quirks(void)
 {
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
            !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
                dmi_check_system(efifb_dmi_system_table);
+
+       if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+           dmi_check_system(efifb_dmi_swap_width_height)) {
+               u16 temp = screen_info.lfb_width;
+
+               screen_info.lfb_width = screen_info.lfb_height;
+               screen_info.lfb_height = temp;
+               screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+       }
 }
index b810528..178d63c 100644 (file)
@@ -514,9 +514,12 @@ struct uprobe_xol_ops {
        void    (*abort)(struct arch_uprobe *, struct pt_regs *);
 };
 
-static inline int sizeof_long(void)
+static inline int sizeof_long(struct pt_regs *regs)
 {
-       return is_ia32_task() ? 4 : 8;
+       /*
+        * Check registers for mode as in_xxx_syscall() does not apply here.
+        */
+       return user_64bit_mode(regs) ? 8 : 4;
 }
 
 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -527,9 +530,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
 
 static int push_ret_address(struct pt_regs *regs, unsigned long ip)
 {
-       unsigned long new_sp = regs->sp - sizeof_long();
+       unsigned long new_sp = regs->sp - sizeof_long(regs);
 
-       if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
+       if (copy_to_user((void __user *)new_sp, &ip, sizeof_long(regs)))
                return -EFAULT;
 
        regs->sp = new_sp;
@@ -562,7 +565,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
                long correction = utask->vaddr - utask->xol_vaddr;
                regs->ip += correction;
        } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
-               regs->sp += sizeof_long(); /* Pop incorrect return address */
+               regs->sp += sizeof_long(regs); /* Pop incorrect return address */
                if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
                        return -ERESTART;
        }
@@ -671,7 +674,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
         * "call" insn was executed out-of-line. Just restore ->sp and restart.
         * We could also restore ->ip and try to call branch_emulate_op() again.
         */
-       regs->sp += sizeof_long();
+       regs->sp += sizeof_long(regs);
        return -ERESTART;
 }
 
@@ -962,7 +965,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 unsigned long
 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
 {
-       int rasize = sizeof_long(), nleft;
+       int rasize = sizeof_long(regs), nleft;
        unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
 
        if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
index 31aa2c8..f21d4df 100644 (file)
@@ -124,8 +124,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
                                                 intr ? kvm_perf_overflow_intr :
                                                 kvm_perf_overflow, pmc);
        if (IS_ERR(event)) {
-               printk_once("kvm_pmu: event creation failed %ld\n",
-                           PTR_ERR(event));
+               pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
+                           PTR_ERR(event), pmc->idx);
                return;
        }
 
index 0ec94c6..0a187b9 100644 (file)
@@ -809,8 +809,7 @@ TRACE_EVENT(kvm_write_tsc_offset,
 
 #define host_clocks                                    \
        {VCLOCK_NONE, "none"},                          \
-       {VCLOCK_TSC,  "tsc"},                           \
-       {VCLOCK_HPET, "hpet"}                           \
+       {VCLOCK_TSC,  "tsc"}                            \
 
 TRACE_EVENT(kvm_update_master_clock,
        TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
index 098be61..343c8dd 100644 (file)
@@ -7247,6 +7247,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
        gva_t gva = 0;
+       struct x86_exception e;
 
        if (!nested_vmx_check_permission(vcpu) ||
            !nested_vmx_check_vmcs12(vcpu))
@@ -7273,8 +7274,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                                vmx_instruction_info, true, &gva))
                        return 1;
                /* _system ok, as nested_vmx_check_permission verified cpl=0 */
-               kvm_write_guest_virt_system(vcpu, gva, &field_value,
-                                           (is_long_mode(vcpu) ? 8 : 4), NULL);
+               if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+                                               (is_long_mode(vcpu) ? 8 : 4),
+                                               NULL))
+                       kvm_inject_page_fault(vcpu, &e);
        }
 
        nested_vmx_succeed(vcpu);
index 8613422..74674a6 100644 (file)
@@ -4337,6 +4337,13 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
        if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
                access |= PFERR_USER_MASK;
 
+       /*
+        * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+        * is returned, but our callers are not ready for that and they blindly
+        * call kvm_inject_page_fault.  Ensure that they at least do not leak
+        * uninitialized kernel stack memory into cr2 and error code.
+        */
+       memset(exception, 0, sizeof(*exception));
        return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
                                           access, exception);
 }
@@ -5545,12 +5552,13 @@ restart:
                unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
                toggle_interruptibility(vcpu, ctxt->interruptibility);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
-               kvm_rip_write(vcpu, ctxt->eip);
-               if (r == EMULATE_DONE && ctxt->tf)
-                       kvm_vcpu_do_singlestep(vcpu, &r);
                if (!ctxt->have_exception ||
-                   exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+                   exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+                       kvm_rip_write(vcpu, ctxt->eip);
+                       if (r == EMULATE_DONE && ctxt->tf)
+                               kvm_vcpu_do_singlestep(vcpu, &r);
                        __kvm_set_rflags(vcpu, ctxt->eflags);
+               }
 
                /*
                 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
index afbc4d8..df5aee5 100644 (file)
@@ -176,7 +176,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
 #define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
   ((y) + EXTENDED_Ebias) & 0x7fff; }
 #define exponent16(x)         (*(short *)&((x)->exp))
-#define setexponent16(x,y)  { (*(short *)&((x)->exp)) = (y); }
+#define setexponent16(x,y)  { (*(short *)&((x)->exp)) = (u16)(y); }
 #define addexponent(x,y)    { (*(short *)&((x)->exp)) += (y); }
 #define stdexp(x)           { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
 
index 0054835..382093c 100644 (file)
@@ -17,7 +17,7 @@
 #include "control_w.h"
 
 #define MAKE_REG(s, e, l, h) { l, h, \
-               ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
+               (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
 
 FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
 #if 0
index 462c5c3..0e49868 100644 (file)
@@ -216,13 +216,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 
        pmd = pmd_offset(pud, address);
        pmd_k = pmd_offset(pud_k, address);
-       if (!pmd_present(*pmd_k))
-               return NULL;
 
-       if (!pmd_present(*pmd))
+       if (pmd_present(*pmd) != pmd_present(*pmd_k))
                set_pmd(pmd, *pmd_k);
+
+       if (!pmd_present(*pmd_k))
+               return NULL;
        else
-               BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+               BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 
        return pmd_k;
 }
@@ -242,17 +243,13 @@ void vmalloc_sync_all(void)
                spin_lock(&pgd_lock);
                list_for_each_entry(page, &pgd_list, lru) {
                        spinlock_t *pgt_lock;
-                       pmd_t *ret;
 
                        /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
                        spin_lock(pgt_lock);
-                       ret = vmalloc_sync_one(page_address(page), address);
+                       vmalloc_sync_one(page_address(page), address);
                        spin_unlock(pgt_lock);
-
-                       if (!ret)
-                               break;
                }
                spin_unlock(&pgd_lock);
        }
index 9ab5279..2e5052b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/export.h>
 #include <linux/smp.h>
 #include <linux/perf_event.h>
+#include <linux/dmi.h>
 
 #include <asm/pgtable.h>
 #include <asm/proto.h>
@@ -23,6 +24,7 @@
 #include <asm/debugreg.h>
 #include <asm/cpu.h>
 #include <asm/mmu_context.h>
+#include <asm/cpu_device_id.h>
 
 #ifdef CONFIG_X86_32
 __visible unsigned long saved_context_ebx;
@@ -32,6 +34,29 @@ __visible unsigned long saved_context_eflags;
 #endif
 struct saved_context saved_context;
 
+static void msr_save_context(struct saved_context *ctxt)
+{
+       struct saved_msr *msr = ctxt->saved_msrs.array;
+       struct saved_msr *end = msr + ctxt->saved_msrs.num;
+
+       while (msr < end) {
+               msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
+               msr++;
+       }
+}
+
+static void msr_restore_context(struct saved_context *ctxt)
+{
+       struct saved_msr *msr = ctxt->saved_msrs.array;
+       struct saved_msr *end = msr + ctxt->saved_msrs.num;
+
+       while (msr < end) {
+               if (msr->valid)
+                       wrmsrl(msr->info.msr_no, msr->info.reg.q);
+               msr++;
+       }
+}
+
 /**
  *     __save_processor_state - save CPU registers before creating a
  *             hibernation image and before restoring the memory state from it
@@ -111,6 +136,7 @@ static void __save_processor_state(struct saved_context *ctxt)
 #endif
        ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
                                               &ctxt->misc_enable);
+       msr_save_context(ctxt);
 }
 
 /* Needed by apm.c */
@@ -229,6 +255,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        x86_platform.restore_sched_clock_state();
        mtrr_bp_restore();
        perf_restore_debug_store();
+       msr_restore_context(ctxt);
 }
 
 /* Needed by apm.c */
@@ -320,3 +347,128 @@ static int __init bsp_pm_check_init(void)
 }
 
 core_initcall(bsp_pm_check_init);
+
+static int msr_build_context(const u32 *msr_id, const int num)
+{
+       struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
+       struct saved_msr *msr_array;
+       int total_num;
+       int i, j;
+
+       total_num = saved_msrs->num + num;
+
+       msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
+       if (!msr_array) {
+               pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
+               return -ENOMEM;
+       }
+
+       if (saved_msrs->array) {
+               /*
+                * Multiple callbacks can invoke this function, so copy any
+                * MSR save requests from previous invocations.
+                */
+               memcpy(msr_array, saved_msrs->array,
+                      sizeof(struct saved_msr) * saved_msrs->num);
+
+               kfree(saved_msrs->array);
+       }
+
+       for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+               msr_array[i].info.msr_no        = msr_id[j];
+               msr_array[i].valid              = false;
+               msr_array[i].info.reg.q         = 0;
+       }
+       saved_msrs->num   = total_num;
+       saved_msrs->array = msr_array;
+
+       return 0;
+}
+
+/*
+ * The following sections are a quirk framework for problematic BIOSen:
+ * Sometimes MSRs are modified by the BIOSen after suspended to
+ * RAM, this might cause unexpected behavior after wakeup.
+ * Thus we save/restore these specified MSRs across suspend/resume
+ * in order to work around it.
+ *
+ * For any further problematic BIOSen/platforms,
+ * please add your own function similar to msr_initialize_bdw.
+ */
+static int msr_initialize_bdw(const struct dmi_system_id *d)
+{
+       /* Add any extra MSR ids into this array. */
+       u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
+
+       pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
+       return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+}
+
+static struct dmi_system_id msr_save_dmi_table[] = {
+       {
+        .callback = msr_initialize_bdw,
+        .ident = "BROADWELL BDX_EP",
+        .matches = {
+               DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
+               },
+       },
+       {}
+};
+
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+       u32 cpuid_msr_id[] = {
+               MSR_AMD64_CPUID_FN_1,
+       };
+
+       pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+               c->family);
+
+       return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+       {
+               .vendor = X86_VENDOR_AMD,
+               .family = 0x15,
+               .model = X86_MODEL_ANY,
+               .feature = X86_FEATURE_ANY,
+               .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+       },
+       {
+               .vendor = X86_VENDOR_AMD,
+               .family = 0x16,
+               .model = X86_MODEL_ANY,
+               .feature = X86_FEATURE_ANY,
+               .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+       },
+       {}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+       const struct x86_cpu_id *m;
+       int ret = 0;
+
+       m = x86_match_cpu(msr_save_cpu_table);
+       if (m) {
+               pm_cpu_match_t fn;
+
+               fn = (pm_cpu_match_t)m->driver_data;
+               ret = fn(m);
+       }
+
+       return ret;
+}
+
+static int pm_check_save_msr(void)
+{
+       dmi_check_system(msr_save_dmi_table);
+       pm_cpu_check(msr_save_cpu_table);
+
+       return 0;
+}
+
+device_initcall(pm_check_save_msr);
index e5f8a14..ea1f871 100644 (file)
@@ -879,6 +879,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 
 fail:
        blk_free_flush_queue(q->fq);
+       q->fq = NULL;
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
index f678c73..d2c4645 100644 (file)
@@ -4,7 +4,6 @@
 #include <linux/cdrom.h>
 #include <linux/compat.h>
 #include <linux/elevator.h>
-#include <linux/fd.h>
 #include <linux/hdreg.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
@@ -209,318 +208,6 @@ static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
 #define BLKBSZSET_32           _IOW(0x12, 113, int)
 #define BLKGETSIZE64_32                _IOR(0x12, 114, int)
 
-struct compat_floppy_drive_params {
-       char            cmos;
-       compat_ulong_t  max_dtr;
-       compat_ulong_t  hlt;
-       compat_ulong_t  hut;
-       compat_ulong_t  srt;
-       compat_ulong_t  spinup;
-       compat_ulong_t  spindown;
-       unsigned char   spindown_offset;
-       unsigned char   select_delay;
-       unsigned char   rps;
-       unsigned char   tracks;
-       compat_ulong_t  timeout;
-       unsigned char   interleave_sect;
-       struct floppy_max_errors max_errors;
-       char            flags;
-       char            read_track;
-       short           autodetect[8];
-       compat_int_t    checkfreq;
-       compat_int_t    native_format;
-};
-
-struct compat_floppy_drive_struct {
-       signed char     flags;
-       compat_ulong_t  spinup_date;
-       compat_ulong_t  select_date;
-       compat_ulong_t  first_read_date;
-       short           probed_format;
-       short           track;
-       short           maxblock;
-       short           maxtrack;
-       compat_int_t    generation;
-       compat_int_t    keep_data;
-       compat_int_t    fd_ref;
-       compat_int_t    fd_device;
-       compat_int_t    last_checked;
-       compat_caddr_t dmabuf;
-       compat_int_t    bufblocks;
-};
-
-struct compat_floppy_fdc_state {
-       compat_int_t    spec1;
-       compat_int_t    spec2;
-       compat_int_t    dtr;
-       unsigned char   version;
-       unsigned char   dor;
-       compat_ulong_t  address;
-       unsigned int    rawcmd:2;
-       unsigned int    reset:1;
-       unsigned int    need_configure:1;
-       unsigned int    perp_mode:2;
-       unsigned int    has_fifo:1;
-       unsigned int    driver_version;
-       unsigned char   track[4];
-};
-
-struct compat_floppy_write_errors {
-       unsigned int    write_errors;
-       compat_ulong_t  first_error_sector;
-       compat_int_t    first_error_generation;
-       compat_ulong_t  last_error_sector;
-       compat_int_t    last_error_generation;
-       compat_uint_t   badness;
-};
-
-#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
-#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
-#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
-#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
-#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
-#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
-#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
-#define FDWERRORGET32  _IOR(2, 0x17, struct compat_floppy_write_errors)
-
-static struct {
-       unsigned int    cmd32;
-       unsigned int    cmd;
-} fd_ioctl_trans_table[] = {
-       { FDSETPRM32, FDSETPRM },
-       { FDDEFPRM32, FDDEFPRM },
-       { FDGETPRM32, FDGETPRM },
-       { FDSETDRVPRM32, FDSETDRVPRM },
-       { FDGETDRVPRM32, FDGETDRVPRM },
-       { FDGETDRVSTAT32, FDGETDRVSTAT },
-       { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
-       { FDGETFDCSTAT32, FDGETFDCSTAT },
-       { FDWERRORGET32, FDWERRORGET }
-};
-
-#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
-
-static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
-               unsigned int cmd, unsigned long arg)
-{
-       mm_segment_t old_fs = get_fs();
-       void *karg = NULL;
-       unsigned int kcmd = 0;
-       int i, err;
-
-       for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
-               if (cmd == fd_ioctl_trans_table[i].cmd32) {
-                       kcmd = fd_ioctl_trans_table[i].cmd;
-                       break;
-               }
-       if (!kcmd)
-               return -EINVAL;
-
-       switch (cmd) {
-       case FDSETPRM32:
-       case FDDEFPRM32:
-       case FDGETPRM32:
-       {
-               compat_uptr_t name;
-               struct compat_floppy_struct __user *uf;
-               struct floppy_struct *f;
-
-               uf = compat_ptr(arg);
-               f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
-               if (!karg)
-                       return -ENOMEM;
-               if (cmd == FDGETPRM32)
-                       break;
-               err = __get_user(f->size, &uf->size);
-               err |= __get_user(f->sect, &uf->sect);
-               err |= __get_user(f->head, &uf->head);
-               err |= __get_user(f->track, &uf->track);
-               err |= __get_user(f->stretch, &uf->stretch);
-               err |= __get_user(f->gap, &uf->gap);
-               err |= __get_user(f->rate, &uf->rate);
-               err |= __get_user(f->spec1, &uf->spec1);
-               err |= __get_user(f->fmt_gap, &uf->fmt_gap);
-               err |= __get_user(name, &uf->name);
-               f->name = compat_ptr(name);
-               if (err) {
-                       err = -EFAULT;
-                       goto out;
-               }
-               break;
-       }
-       case FDSETDRVPRM32:
-       case FDGETDRVPRM32:
-       {
-               struct compat_floppy_drive_params __user *uf;
-               struct floppy_drive_params *f;
-
-               uf = compat_ptr(arg);
-               f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
-               if (!karg)
-                       return -ENOMEM;
-               if (cmd == FDGETDRVPRM32)
-                       break;
-               err = __get_user(f->cmos, &uf->cmos);
-               err |= __get_user(f->max_dtr, &uf->max_dtr);
-               err |= __get_user(f->hlt, &uf->hlt);
-               err |= __get_user(f->hut, &uf->hut);
-               err |= __get_user(f->srt, &uf->srt);
-               err |= __get_user(f->spinup, &uf->spinup);
-               err |= __get_user(f->spindown, &uf->spindown);
-               err |= __get_user(f->spindown_offset, &uf->spindown_offset);
-               err |= __get_user(f->select_delay, &uf->select_delay);
-               err |= __get_user(f->rps, &uf->rps);
-               err |= __get_user(f->tracks, &uf->tracks);
-               err |= __get_user(f->timeout, &uf->timeout);
-               err |= __get_user(f->interleave_sect, &uf->interleave_sect);
-               err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
-               err |= __get_user(f->flags, &uf->flags);
-               err |= __get_user(f->read_track, &uf->read_track);
-               err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
-               err |= __get_user(f->checkfreq, &uf->checkfreq);
-               err |= __get_user(f->native_format, &uf->native_format);
-               if (err) {
-                       err = -EFAULT;
-                       goto out;
-               }
-               break;
-       }
-       case FDGETDRVSTAT32:
-       case FDPOLLDRVSTAT32:
-               karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
-               if (!karg)
-                       return -ENOMEM;
-               break;
-       case FDGETFDCSTAT32:
-               karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
-               if (!karg)
-                       return -ENOMEM;
-               break;
-       case FDWERRORGET32:
-               karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
-               if (!karg)
-                       return -ENOMEM;
-               break;
-       default:
-               return -EINVAL;
-       }
-       set_fs(KERNEL_DS);
-       err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg);
-       set_fs(old_fs);
-       if (err)
-               goto out;
-       switch (cmd) {
-       case FDGETPRM32:
-       {
-               struct floppy_struct *f = karg;
-               struct compat_floppy_struct __user *uf = compat_ptr(arg);
-
-               err = __put_user(f->size, &uf->size);
-               err |= __put_user(f->sect, &uf->sect);
-               err |= __put_user(f->head, &uf->head);
-               err |= __put_user(f->track, &uf->track);
-               err |= __put_user(f->stretch, &uf->stretch);
-               err |= __put_user(f->gap, &uf->gap);
-               err |= __put_user(f->rate, &uf->rate);
-               err |= __put_user(f->spec1, &uf->spec1);
-               err |= __put_user(f->fmt_gap, &uf->fmt_gap);
-               err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
-               break;
-       }
-       case FDGETDRVPRM32:
-       {
-               struct compat_floppy_drive_params __user *uf;
-               struct floppy_drive_params *f = karg;
-
-               uf = compat_ptr(arg);
-               err = __put_user(f->cmos, &uf->cmos);
-               err |= __put_user(f->max_dtr, &uf->max_dtr);
-               err |= __put_user(f->hlt, &uf->hlt);
-               err |= __put_user(f->hut, &uf->hut);
-               err |= __put_user(f->srt, &uf->srt);
-               err |= __put_user(f->spinup, &uf->spinup);
-               err |= __put_user(f->spindown, &uf->spindown);
-               err |= __put_user(f->spindown_offset, &uf->spindown_offset);
-               err |= __put_user(f->select_delay, &uf->select_delay);
-               err |= __put_user(f->rps, &uf->rps);
-               err |= __put_user(f->tracks, &uf->tracks);
-               err |= __put_user(f->timeout, &uf->timeout);
-               err |= __put_user(f->interleave_sect, &uf->interleave_sect);
-               err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
-               err |= __put_user(f->flags, &uf->flags);
-               err |= __put_user(f->read_track, &uf->read_track);
-               err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
-               err |= __put_user(f->checkfreq, &uf->checkfreq);
-               err |= __put_user(f->native_format, &uf->native_format);
-               break;
-       }
-       case FDGETDRVSTAT32:
-       case FDPOLLDRVSTAT32:
-       {
-               struct compat_floppy_drive_struct __user *uf;
-               struct floppy_drive_struct *f = karg;
-
-               uf = compat_ptr(arg);
-               err = __put_user(f->flags, &uf->flags);
-               err |= __put_user(f->spinup_date, &uf->spinup_date);
-               err |= __put_user(f->select_date, &uf->select_date);
-               err |= __put_user(f->first_read_date, &uf->first_read_date);
-               err |= __put_user(f->probed_format, &uf->probed_format);
-               err |= __put_user(f->track, &uf->track);
-               err |= __put_user(f->maxblock, &uf->maxblock);
-               err |= __put_user(f->maxtrack, &uf->maxtrack);
-               err |= __put_user(f->generation, &uf->generation);
-               err |= __put_user(f->keep_data, &uf->keep_data);
-               err |= __put_user(f->fd_ref, &uf->fd_ref);
-               err |= __put_user(f->fd_device, &uf->fd_device);
-               err |= __put_user(f->last_checked, &uf->last_checked);
-               err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
-               err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
-               break;
-       }
-       case FDGETFDCSTAT32:
-       {
-               struct compat_floppy_fdc_state __user *uf;
-               struct floppy_fdc_state *f = karg;
-
-               uf = compat_ptr(arg);
-               err = __put_user(f->spec1, &uf->spec1);
-               err |= __put_user(f->spec2, &uf->spec2);
-               err |= __put_user(f->dtr, &uf->dtr);
-               err |= __put_user(f->version, &uf->version);
-               err |= __put_user(f->dor, &uf->dor);
-               err |= __put_user(f->address, &uf->address);
-               err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
-                                  (char *)&f->address + sizeof(f->address), sizeof(int));
-               err |= __put_user(f->driver_version, &uf->driver_version);
-               err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
-               break;
-       }
-       case FDWERRORGET32:
-       {
-               struct compat_floppy_write_errors __user *uf;
-               struct floppy_write_errors *f = karg;
-
-               uf = compat_ptr(arg);
-               err = __put_user(f->write_errors, &uf->write_errors);
-               err |= __put_user(f->first_error_sector, &uf->first_error_sector);
-               err |= __put_user(f->first_error_generation, &uf->first_error_generation);
-               err |= __put_user(f->last_error_sector, &uf->last_error_sector);
-               err |= __put_user(f->last_error_generation, &uf->last_error_generation);
-               err |= __put_user(f->badness, &uf->badness);
-               break;
-       }
-       default:
-               break;
-       }
-       if (err)
-               err = -EFAULT;
-
-out:
-       kfree(karg);
-       return err;
-}
-
 static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
                        unsigned cmd, unsigned long arg)
 {
@@ -537,16 +224,6 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
        case HDIO_GET_ADDRESS:
        case HDIO_GET_BUSSTATE:
                return compat_hdio_ioctl(bdev, mode, cmd, arg);
-       case FDSETPRM32:
-       case FDDEFPRM32:
-       case FDGETPRM32:
-       case FDSETDRVPRM32:
-       case FDGETDRVPRM32:
-       case FDGETDRVSTAT32:
-       case FDPOLLDRVSTAT32:
-       case FDGETFDCSTAT32:
-       case FDWERRORGET32:
-               return compat_fd_ioctl(bdev, mode, cmd, arg);
        case CDROMREADAUDIO:
                return compat_cdrom_read_audio(bdev, mode, cmd, arg);
        case CDROM_SEND_PACKET:
@@ -566,23 +243,6 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
        case HDIO_DRIVE_CMD:
        /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
        case 0x330:
-       /* 0x02 -- Floppy ioctls */
-       case FDMSGON:
-       case FDMSGOFF:
-       case FDSETEMSGTRESH:
-       case FDFLUSH:
-       case FDWERRORCLR:
-       case FDSETMAXERRS:
-       case FDGETMAXERRS:
-       case FDGETDRVTYP:
-       case FDEJECT:
-       case FDCLRPRM:
-       case FDFMTBEG:
-       case FDFMTEND:
-       case FDRESET:
-       case FDTWADDLE:
-       case FDFMTTRK:
-       case FDRAWCMD:
        /* CDROM stuff */
        case CDROMPAUSE:
        case CDROMRESUME:
index 12ad3e3..73b56f2 100644 (file)
@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
                        const u8 *key, unsigned int keylen)
 {
        struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+       be128 k;
 
        if (keylen != GHASH_BLOCK_SIZE) {
                crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
 
        if (ctx->gf128)
                gf128mul_free_4k(ctx->gf128);
-       ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+
+       BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
+       memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
+       ctx->gf128 = gf128mul_init_4k_lle(&k);
+       memzero_explicit(&k, GHASH_BLOCK_SIZE);
+
        if (!ctx->gf128)
                return -ENOMEM;
 
index 91fe6c3..d726b03 100644 (file)
@@ -3225,7 +3225,8 @@ static void binder_transaction(struct binder_proc *proc,
        }
        off_end = (void *)off_start + tr->offsets_size;
        sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
-       sg_buf_end = sg_bufp + extra_buffers_size;
+       sg_buf_end = sg_bufp + extra_buffers_size -
+               ALIGN(secctx_sz, sizeof(u64));
        off_min = 0;
        for (; offp < off_end; offp++) {
                struct binder_object_header *hdr;
index c8614f3..6bea284 100644 (file)
@@ -219,6 +219,11 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
 
        if (mm) {
                down_read(&mm->mmap_sem);
+               if (!mmget_still_valid(mm)) {
+                       if (allocate == 0)
+                               goto free_range;
+                       goto err_no_vma;
+               }
                vma = alloc->vma;
        }
 
index cd2eab6..65371e1 100644 (file)
@@ -300,6 +300,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
                hpriv->phys[port] = NULL;
                rc = 0;
                break;
+       case -EPROBE_DEFER:
+               /* Do not complain yet */
+               break;
 
        default:
                dev_err(dev,
index 18de4c4..1d8901f 100644 (file)
@@ -703,6 +703,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
        unsigned int offset;
        unsigned char *buf;
 
+       if (!qc->cursg) {
+               qc->curbytes = qc->nbytes;
+               return;
+       }
        if (qc->curbytes == qc->nbytes - qc->sect_size)
                ap->hsm_task_state = HSM_ST_LAST;
 
@@ -742,6 +746,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
        if (qc->cursg_ofs == qc->cursg->length) {
                qc->cursg = sg_next(qc->cursg);
+               if (!qc->cursg)
+                       ap->hsm_task_state = HSM_ST_LAST;
                qc->cursg_ofs = 0;
        }
 }
index 7017a81..0838562 100644 (file)
@@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
        unsigned int ret;
        struct rm_feature_desc *desc;
        struct ata_taskfile tf;
-       static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+       static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
index 31c6010..7fa8401 100644 (file)
@@ -199,7 +199,7 @@ config ATM_NICSTAR_USE_SUNI
          make the card work).
 
 config ATM_NICSTAR_USE_IDT77105
-       bool "Use IDT77015 PHY driver (25Mbps)"
+       bool "Use IDT77105 PHY driver (25Mbps)"
        depends on ATM_NICSTAR
        help
          Support for the PHYsical layer chip in ForeRunner LE25 cards. In
index 7d00f29..860a33a 100644 (file)
@@ -63,6 +63,7 @@
 #include <asm/byteorder.h>  
 #include <linux/vmalloc.h>
 #include <linux/jiffies.h>
+#include <linux/nospec.h>
 #include "iphase.h"              
 #include "suni.h"                
 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2755,8 +2756,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
    }
    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
    board = ia_cmds.status;
-   if ((board < 0) || (board > iadev_count))
-         board = 0;    
+
+       if ((board < 0) || (board > iadev_count))
+               board = 0;
+       board = array_index_nospec(board, iadev_count + 1);
+
    iadev = ia_dev[board];
    switch (ia_cmds.cmd) {
    case MEMDUMP:
index cef0f5c..4e27b3a 100644 (file)
@@ -862,12 +862,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
  */
 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
 {
+       unsigned int ref;
+
        /* see if we live in a "glue" directory */
        if (!live_in_glue_dir(glue_dir, dev))
                return;
 
        mutex_lock(&gdp_mutex);
-       if (!kobject_has_children(glue_dir))
+       /**
+        * There is a race condition between removing glue directory
+        * and adding a new device under the glue directory.
+        *
+        * CPU1:                                         CPU2:
+        *
+        * device_add()
+        *   get_device_parent()
+        *     class_dir_create_and_add()
+        *       kobject_add_internal()
+        *         create_dir()    // create glue_dir
+        *
+        *                                               device_add()
+        *                                                 get_device_parent()
+        *                                                   kobject_get() // get glue_dir
+        *
+        * device_del()
+        *   cleanup_glue_dir()
+        *     kobject_del(glue_dir)
+        *
+        *                                               kobject_add()
+        *                                                 kobject_add_internal()
+        *                                                   create_dir() // in glue_dir
+        *                                                     sysfs_create_dir_ns()
+        *                                                       kernfs_create_dir_ns(sd)
+        *
+        *       sysfs_remove_dir() // glue_dir->sd=NULL
+        *       sysfs_put()        // free glue_dir->sd
+        *
+        *                                                         // sd is freed
+        *                                                         kernfs_new_node(sd)
+        *                                                           kernfs_get(glue_dir)
+        *                                                           kernfs_add_one()
+        *                                                           kernfs_put()
+        *
+        * Before CPU1 remove last child device under glue dir, if CPU2 add
+        * a new device under glue dir, the glue_dir kobject reference count
+        * will be increase to 2 in kobject_get(k). And CPU2 has been called
+        * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+        * and sysfs_put(). This result in glue_dir->sd is freed.
+        *
+        * Then the CPU2 will see a stale "empty" but still potentially used
+        * glue dir around in kernfs_new_node().
+        *
+        * In order to avoid this happening, we also should make sure that
+        * kernfs_node for glue_dir is released in CPU1 only when refcount
+        * for glue_dir kobj is 1.
+        */
+       ref = atomic_read(&glue_dir->kref.refcount);
+       if (!kobject_has_children(glue_dir) && !--ref)
                kobject_del(glue_dir);
        kobject_put(glue_dir);
        mutex_unlock(&gdp_mutex);
index b3a62e9..6b96ebb 100644 (file)
@@ -1358,6 +1358,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                                             map->format.reg_bytes +
                                             map->format.pad_bytes,
                                             val, val_len);
+       else
+               ret = -ENOTSUPP;
 
        /* If that didn't work fall back on linearising by hand. */
        if (ret == -ENOTSUPP) {
index 2daa5b8..a12a163 100644 (file)
@@ -192,6 +192,7 @@ static int print_unex = 1;
 #include <linux/io.h>
 #include <linux/uaccess.h>
 #include <linux/async.h>
+#include <linux/compat.h>
 
 /*
  * PS/2 floppies have much slower step rates than regular floppies.
@@ -2113,6 +2114,9 @@ static void setup_format_params(int track)
        raw_cmd->kernel_data = floppy_track_buffer;
        raw_cmd->length = 4 * F_SECT_PER_TRACK;
 
+       if (!F_SECT_PER_TRACK)
+               return;
+
        /* allow for about 30ms for data transport per track */
        head_shift = (F_SECT_PER_TRACK + 5) / 6;
 
@@ -3233,8 +3237,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
        int cnt;
 
        /* sanity checking for parameters. */
-       if (g->sect <= 0 ||
-           g->head <= 0 ||
+       if ((int)g->sect <= 0 ||
+           (int)g->head <= 0 ||
+           /* check for overflow in max_sector */
+           (int)(g->sect * g->head) <= 0 ||
+           /* check for zero in F_SECT_PER_TRACK */
+           (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
            g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
            /* check if reserved bits are set */
            (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
@@ -3378,6 +3386,24 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return 0;
 }
 
+static bool valid_floppy_drive_params(const short autodetect[8],
+               int native_format)
+{
+       size_t floppy_type_size = ARRAY_SIZE(floppy_type);
+       size_t i = 0;
+
+       for (i = 0; i < 8; ++i) {
+               if (autodetect[i] < 0 ||
+                   autodetect[i] >= floppy_type_size)
+                       return false;
+       }
+
+       if (native_format < 0 || native_format >= floppy_type_size)
+               return false;
+
+       return true;
+}
+
 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
                    unsigned long param)
 {
@@ -3504,6 +3530,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
                SUPBOUND(size, strlen((const char *)outparam) + 1);
                break;
        case FDSETDRVPRM:
+               if (!valid_floppy_drive_params(inparam.dp.autodetect,
+                               inparam.dp.native_format))
+                       return -EINVAL;
                *UDP = inparam.dp;
                break;
        case FDGETDRVPRM:
@@ -3569,6 +3598,332 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+
+struct compat_floppy_drive_params {
+       char            cmos;
+       compat_ulong_t  max_dtr;
+       compat_ulong_t  hlt;
+       compat_ulong_t  hut;
+       compat_ulong_t  srt;
+       compat_ulong_t  spinup;
+       compat_ulong_t  spindown;
+       unsigned char   spindown_offset;
+       unsigned char   select_delay;
+       unsigned char   rps;
+       unsigned char   tracks;
+       compat_ulong_t  timeout;
+       unsigned char   interleave_sect;
+       struct floppy_max_errors max_errors;
+       char            flags;
+       char            read_track;
+       short           autodetect[8];
+       compat_int_t    checkfreq;
+       compat_int_t    native_format;
+};
+
+struct compat_floppy_drive_struct {
+       signed char     flags;
+       compat_ulong_t  spinup_date;
+       compat_ulong_t  select_date;
+       compat_ulong_t  first_read_date;
+       short           probed_format;
+       short           track;
+       short           maxblock;
+       short           maxtrack;
+       compat_int_t    generation;
+       compat_int_t    keep_data;
+       compat_int_t    fd_ref;
+       compat_int_t    fd_device;
+       compat_int_t    last_checked;
+       compat_caddr_t dmabuf;
+       compat_int_t    bufblocks;
+};
+
+struct compat_floppy_fdc_state {
+       compat_int_t    spec1;
+       compat_int_t    spec2;
+       compat_int_t    dtr;
+       unsigned char   version;
+       unsigned char   dor;
+       compat_ulong_t  address;
+       unsigned int    rawcmd:2;
+       unsigned int    reset:1;
+       unsigned int    need_configure:1;
+       unsigned int    perp_mode:2;
+       unsigned int    has_fifo:1;
+       unsigned int    driver_version;
+       unsigned char   track[4];
+};
+
+struct compat_floppy_write_errors {
+       unsigned int    write_errors;
+       compat_ulong_t  first_error_sector;
+       compat_int_t    first_error_generation;
+       compat_ulong_t  last_error_sector;
+       compat_int_t    last_error_generation;
+       compat_uint_t   badness;
+};
+
+#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
+#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
+#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
+#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
+#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
+#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
+#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
+#define FDWERRORGET32  _IOR(2, 0x17, struct compat_floppy_write_errors)
+
+static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+                   struct compat_floppy_struct __user *arg)
+{
+       struct floppy_struct v;
+       int drive, type;
+       int err;
+
+       BUILD_BUG_ON(offsetof(struct floppy_struct, name) !=
+                    offsetof(struct compat_floppy_struct, name));
+
+       if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL)))
+               return -EPERM;
+
+       memset(&v, 0, sizeof(struct floppy_struct));
+       if (copy_from_user(&v, arg, offsetof(struct floppy_struct, name)))
+               return -EFAULT;
+
+       mutex_lock(&floppy_mutex);
+       drive = (long)bdev->bd_disk->private_data;
+       type = ITYPE(UDRS->fd_device);
+       err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM,
+                       &v, drive, type, bdev);
+       mutex_unlock(&floppy_mutex);
+       return err;
+}
+
+static int compat_get_prm(int drive,
+                         struct compat_floppy_struct __user *arg)
+{
+       struct compat_floppy_struct v;
+       struct floppy_struct *p;
+       int err;
+
+       memset(&v, 0, sizeof(v));
+       mutex_lock(&floppy_mutex);
+       err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p);
+       if (err) {
+               mutex_unlock(&floppy_mutex);
+               return err;
+       }
+       memcpy(&v, p, offsetof(struct floppy_struct, name));
+       mutex_unlock(&floppy_mutex);
+       if (copy_to_user(arg, &v, sizeof(struct compat_floppy_struct)))
+               return -EFAULT;
+       return 0;
+}
+
+static int compat_setdrvprm(int drive,
+                           struct compat_floppy_drive_params __user *arg)
+{
+       struct compat_floppy_drive_params v;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
+               return -EFAULT;
+       if (!valid_floppy_drive_params(v.autodetect, v.native_format))
+               return -EINVAL;
+       mutex_lock(&floppy_mutex);
+       UDP->cmos = v.cmos;
+       UDP->max_dtr = v.max_dtr;
+       UDP->hlt = v.hlt;
+       UDP->hut = v.hut;
+       UDP->srt = v.srt;
+       UDP->spinup = v.spinup;
+       UDP->spindown = v.spindown;
+       UDP->spindown_offset = v.spindown_offset;
+       UDP->select_delay = v.select_delay;
+       UDP->rps = v.rps;
+       UDP->tracks = v.tracks;
+       UDP->timeout = v.timeout;
+       UDP->interleave_sect = v.interleave_sect;
+       UDP->max_errors = v.max_errors;
+       UDP->flags = v.flags;
+       UDP->read_track = v.read_track;
+       memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect));
+       UDP->checkfreq = v.checkfreq;
+       UDP->native_format = v.native_format;
+       mutex_unlock(&floppy_mutex);
+       return 0;
+}
+
+static int compat_getdrvprm(int drive,
+                           struct compat_floppy_drive_params __user *arg)
+{
+       struct compat_floppy_drive_params v;
+
+       memset(&v, 0, sizeof(struct compat_floppy_drive_params));
+       mutex_lock(&floppy_mutex);
+       v.cmos = UDP->cmos;
+       v.max_dtr = UDP->max_dtr;
+       v.hlt = UDP->hlt;
+       v.hut = UDP->hut;
+       v.srt = UDP->srt;
+       v.spinup = UDP->spinup;
+       v.spindown = UDP->spindown;
+       v.spindown_offset = UDP->spindown_offset;
+       v.select_delay = UDP->select_delay;
+       v.rps = UDP->rps;
+       v.tracks = UDP->tracks;
+       v.timeout = UDP->timeout;
+       v.interleave_sect = UDP->interleave_sect;
+       v.max_errors = UDP->max_errors;
+       v.flags = UDP->flags;
+       v.read_track = UDP->read_track;
+       memcpy(v.autodetect, UDP->autodetect, sizeof(v.autodetect));
+       v.checkfreq = UDP->checkfreq;
+       v.native_format = UDP->native_format;
+       mutex_unlock(&floppy_mutex);
+
+       if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+               return -EFAULT;
+       return 0;
+}
+
+static int compat_getdrvstat(int drive, bool poll,
+                           struct compat_floppy_drive_struct __user *arg)
+{
+       struct compat_floppy_drive_struct v;
+
+       memset(&v, 0, sizeof(struct compat_floppy_drive_struct));
+       mutex_lock(&floppy_mutex);
+
+       if (poll) {
+               if (lock_fdc(drive, true))
+                       goto Eintr;
+               if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
+                       goto Eintr;
+               process_fd_request();
+       }
+       v.spinup_date = UDRS->spinup_date;
+       v.select_date = UDRS->select_date;
+       v.first_read_date = UDRS->first_read_date;
+       v.probed_format = UDRS->probed_format;
+       v.track = UDRS->track;
+       v.maxblock = UDRS->maxblock;
+       v.maxtrack = UDRS->maxtrack;
+       v.generation = UDRS->generation;
+       v.keep_data = UDRS->keep_data;
+       v.fd_ref = UDRS->fd_ref;
+       v.fd_device = UDRS->fd_device;
+       v.last_checked = UDRS->last_checked;
+       v.dmabuf = (uintptr_t)UDRS->dmabuf;
+       v.bufblocks = UDRS->bufblocks;
+       mutex_unlock(&floppy_mutex);
+
+       if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+               return -EFAULT;
+       return 0;
+Eintr:
+       mutex_unlock(&floppy_mutex);
+       return -EINTR;
+}
+
+static int compat_getfdcstat(int drive,
+                           struct compat_floppy_fdc_state __user *arg)
+{
+       struct compat_floppy_fdc_state v32;
+       struct floppy_fdc_state v;
+
+       mutex_lock(&floppy_mutex);
+       v = *UFDCS;
+       mutex_unlock(&floppy_mutex);
+
+       memset(&v32, 0, sizeof(struct compat_floppy_fdc_state));
+       v32.spec1 = v.spec1;
+       v32.spec2 = v.spec2;
+       v32.dtr = v.dtr;
+       v32.version = v.version;
+       v32.dor = v.dor;
+       v32.address = v.address;
+       v32.rawcmd = v.rawcmd;
+       v32.reset = v.reset;
+       v32.need_configure = v.need_configure;
+       v32.perp_mode = v.perp_mode;
+       v32.has_fifo = v.has_fifo;
+       v32.driver_version = v.driver_version;
+       memcpy(v32.track, v.track, 4);
+       if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_fdc_state)))
+               return -EFAULT;
+       return 0;
+}
+
+static int compat_werrorget(int drive,
+                           struct compat_floppy_write_errors __user *arg)
+{
+       struct compat_floppy_write_errors v32;
+       struct floppy_write_errors v;
+
+       memset(&v32, 0, sizeof(struct compat_floppy_write_errors));
+       mutex_lock(&floppy_mutex);
+       v = *UDRWE;
+       mutex_unlock(&floppy_mutex);
+       v32.write_errors = v.write_errors;
+       v32.first_error_sector = v.first_error_sector;
+       v32.first_error_generation = v.first_error_generation;
+       v32.last_error_sector = v.last_error_sector;
+       v32.last_error_generation = v.last_error_generation;
+       v32.badness = v.badness;
+       if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_write_errors)))
+               return -EFAULT;
+       return 0;
+}
+
+static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+                   unsigned long param)
+{
+       int drive = (long)bdev->bd_disk->private_data;
+       switch (cmd) {
+       case FDMSGON:
+       case FDMSGOFF:
+       case FDSETEMSGTRESH:
+       case FDFLUSH:
+       case FDWERRORCLR:
+       case FDEJECT:
+       case FDCLRPRM:
+       case FDFMTBEG:
+       case FDRESET:
+       case FDTWADDLE:
+               return fd_ioctl(bdev, mode, cmd, param);
+       case FDSETMAXERRS:
+       case FDGETMAXERRS:
+       case FDGETDRVTYP:
+       case FDFMTEND:
+       case FDFMTTRK:
+       case FDRAWCMD:
+               return fd_ioctl(bdev, mode, cmd,
+                               (unsigned long)compat_ptr(param));
+       case FDSETPRM32:
+       case FDDEFPRM32:
+               return compat_set_geometry(bdev, mode, cmd, compat_ptr(param));
+       case FDGETPRM32:
+               return compat_get_prm(drive, compat_ptr(param));
+       case FDSETDRVPRM32:
+               return compat_setdrvprm(drive, compat_ptr(param));
+       case FDGETDRVPRM32:
+               return compat_getdrvprm(drive, compat_ptr(param));
+       case FDPOLLDRVSTAT32:
+               return compat_getdrvstat(drive, true, compat_ptr(param));
+       case FDGETDRVSTAT32:
+               return compat_getdrvstat(drive, false, compat_ptr(param));
+       case FDGETFDCSTAT32:
+               return compat_getfdcstat(drive, compat_ptr(param));
+       case FDWERRORGET32:
+               return compat_werrorget(drive, compat_ptr(param));
+       }
+       return -EINVAL;
+}
+#endif
+
 static void __init config_types(void)
 {
        bool has_drive = false;
@@ -3885,6 +4240,9 @@ static const struct block_device_operations floppy_fops = {
        .getgeo                 = fd_getgeo,
        .check_events           = floppy_check_events,
        .revalidate_disk        = floppy_revalidate,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl           = fd_compat_ioctl,
+#endif
 };
 
 /*
index 0db4a00..adcc8f5 100644 (file)
@@ -399,6 +399,9 @@ int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
                return err;
        }
 
+       /* Give the controller some time to get ready to receive the NVM */
+       msleep(10);
+
        /* Download NVM configuration */
        config.type = TLV_TYPE_NVM;
        snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
index d776dfd..16f2131 100644 (file)
@@ -101,6 +101,9 @@ static int ath_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        ath = kzalloc(sizeof(*ath), GFP_KERNEL);
        if (!ath)
                return -ENOMEM;
index f9b569e..20a1b4d 100644 (file)
@@ -279,6 +279,9 @@ static int bcm_open(struct hci_uart *hu)
 
        bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
        if (!bcm)
                return -ENOMEM;
index d0b615a..9833b53 100644 (file)
@@ -729,6 +729,11 @@ static int bcsp_close(struct hci_uart *hu)
        skb_queue_purge(&bcsp->rel);
        skb_queue_purge(&bcsp->unrel);
 
+       if (bcsp->rx_skb) {
+               kfree_skb(bcsp->rx_skb);
+               bcsp->rx_skb = NULL;
+       }
+
        kfree(bcsp);
        return 0;
 }
index 0c63fce..929674e 100644 (file)
@@ -407,6 +407,9 @@ static int intel_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        intel = kzalloc(sizeof(*intel), GFP_KERNEL);
        if (!intel)
                return -ENOMEM;
index 85bb31b..dd7f811 100644 (file)
@@ -276,6 +276,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+       if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+               return true;
+
+       return false;
+}
+
 /* Flow control or un-flow control the device */
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
 {
index 74237af..b3a049a 100644 (file)
@@ -106,6 +106,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
 void hci_uart_init_tty(struct hci_uart *hu);
 void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
 void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
                         unsigned int oper_speed);
index c5c4038..efdff5d 100644 (file)
@@ -633,5 +633,12 @@ config MSM_RDBG
          for a debugger running on a host PC to communicate with a remote
          stub running on peripheral subsystems such as the ADSP, MODEM etc.
 
+config QCOM_SDIO_CLIENT
+       bool "QCOM_SDIO_CLIENT support"
+       depends on SDIO_QCN
+       default y
+       help
+         Interface used for SDIO and sahara user spce application
+
 endmenu
 
index 77697b8..dc00ba4 100644 (file)
@@ -67,3 +67,4 @@ ifdef CONFIG_COMPAT
 obj-$(CONFIG_MSM_ADSPRPC)       += adsprpc_compat.o
 endif
 obj-$(CONFIG_MSM_RDBG)         += rdbg.o
+obj-$(CONFIG_QCOM_SDIO_CLIENT) += qti_sdio_client.o
index 0e7befd..b898007 100644 (file)
@@ -652,7 +652,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
        struct fastrpc_session_ctx *sess;
        struct fastrpc_apps *apps = fl->apps;
        int cid = fl->cid;
-       struct fastrpc_channel_ctx *chan = &apps->channel[cid];
+       struct fastrpc_channel_ctx *chan = NULL;
        struct fastrpc_mmap *map = NULL;
        struct dma_attrs attrs;
        dma_addr_t region_start = 0;
@@ -660,6 +660,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
        unsigned long flags;
        int err = 0, vmid;
 
+       VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+       if (err)
+               goto bail;
+       chan = &apps->channel[cid];
+
        if (!fastrpc_mmap_find(fl, fd, va, len, mflags, ppmap))
                return 0;
        map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -2016,6 +2021,9 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
        ioctl.attrs = NULL;
        VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
                FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+       if (err)
+               pr_err("adsprpc: %s: releasing DSP process failed for %s, returned 0x%x",
+                                       __func__, current->comm, err);
 bail:
        return err;
 }
index ba629ab..1bcacb8 100644 (file)
@@ -25,7 +25,7 @@ endmenu
 menu "HSIC/SMUX support for DIAG"
 
 config DIAGFWD_BRIDGE_CODE
-       depends on USB_QCOM_DIAG_BRIDGE || MSM_MHI
+       depends on USB_QCOM_DIAG_BRIDGE || MSM_MHI || QCOM_SDIO_CLIENT
        default y
        bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
        help
index d57ebd8..40c5387 100644 (file)
@@ -1,5 +1,8 @@
 obj-$(CONFIG_DIAG_CHAR) := diagchar.o
 obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+
 obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
 obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
+obj-$(CONFIG_QCOM_SDIO_CLIENT) += diagfwd_sdio.o
+
 diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_smd.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
index ad00b91..2301e1e 100644 (file)
@@ -2078,6 +2078,11 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
        if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
                return ret;
 
+       reg_entry.cmd_code = 0;
+       reg_entry.subsys_id = 0;
+       reg_entry.cmd_code_hi = 0;
+       reg_entry.cmd_code_lo = 0;
+
        if (header_len >= (sizeof(uint8_t)))
                reg_entry.cmd_code = header->cmd_code;
        if (header_len >= (2 * sizeof(uint8_t)))
index 519c01e..775a66d 100644 (file)
@@ -901,7 +901,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
                goto end;
        if (mask_size + write_len > dest_len)
                mask_size = dest_len - write_len;
-       memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+       if (mask_size && src_len >= header_len + mask_size)
+               memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
        write_len += mask_size;
        for (i = 0; i < NUM_PERIPHERALS; i++) {
                if (!diag_check_update(i, pid))
@@ -1179,7 +1180,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
        int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
        uint32_t mask_size = 0;
        struct diag_log_mask_t *log_item = NULL;
-       struct diag_log_config_req_t *req;
+       struct diag_log_config_get_req_t *req;
        struct diag_log_config_rsp_t rsp;
        struct diag_mask_info *mask_info = NULL;
        struct diag_md_session_t *info = NULL;
@@ -1189,7 +1190,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
 
        mask_info = (!info) ? &log_mask : info->log_mask;
        if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
-               src_len < sizeof(struct diag_log_config_req_t)) {
+               src_len < sizeof(struct diag_log_config_get_req_t)) {
                pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
                       __func__, src_buf, src_len, dest_buf, dest_len,
                       mask_info);
@@ -1208,7 +1209,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
                return 0;
        }
 
-       req = (struct diag_log_config_req_t *)src_buf;
+       req = (struct diag_log_config_get_req_t *)src_buf;
        read_len += req_header_len;
 
        rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
index a736ff2..5c76825 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2018-2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,13 @@ struct diag_msg_mask_t {
        uint32_t *ptr;
 };
 
+struct diag_log_config_get_req_t {
+       uint8_t cmd_code;
+       uint8_t padding[3];
+       uint32_t sub_cmd;
+       uint32_t equip_id;
+} __packed;
+
 struct diag_log_config_req_t {
        uint8_t cmd_code;
        uint8_t padding[3];
index bed9ef1..47e0dab 100644 (file)
@@ -1161,7 +1161,7 @@ static void diag_remote_exit(void)
        return;
 }
 
-int diagfwd_bridge_init(bool use_mhi)
+int diagfwd_bridge_init(int xprt)
 {
        return 0;
 }
@@ -3788,7 +3788,7 @@ static int diag_mhi_probe(struct platform_device *pdev)
                diag_remote_exit();
                return ret;
        }
-       ret = diagfwd_bridge_init(true);
+       ret = diagfwd_bridge_init(1);
        if (ret) {
                diagfwd_bridge_exit();
                return ret;
@@ -3821,7 +3821,7 @@ static int diagfwd_usb_probe(struct platform_device *pdev)
                diag_remote_exit();
                return ret;
        }
-       ret = diagfwd_bridge_init(false);
+       ret = diagfwd_bridge_init(0);
        if (ret) {
                diagfwd_bridge_exit();
                return ret;
@@ -3844,6 +3844,39 @@ static struct platform_driver diagfwd_usb_driver = {
        },
 };
 
+static int diagfwd_sdio_probe(struct platform_device *pdev)
+{
+       int ret;
+
+       driver->pdev = pdev;
+       ret = diag_remote_init();
+       if (ret) {
+               diag_remote_exit();
+               return ret;
+       }
+       ret = diagfwd_bridge_init(2);
+       if (ret) {
+               diagfwd_bridge_exit();
+               return ret;
+       }
+       pr_debug("diag: usb device is ready\n");
+       return 0;
+}
+
+static const struct of_device_id diagfwd_sdio_table[] = {
+       {.compatible = "qcom,diagfwd-sdio"},
+       {},
+};
+
+static struct platform_driver diagfwd_sdio_driver = {
+       .probe = diagfwd_sdio_probe,
+       .driver = {
+               .name = "DIAGFWD SDIO Platform",
+               .owner = THIS_MODULE,
+               .of_match_table = diagfwd_sdio_table,
+       },
+};
+
 static int __init diagchar_init(void)
 {
        dev_t dev;
@@ -3971,6 +4004,7 @@ static int __init diagchar_init(void)
        pr_debug("diagchar initialized now");
        platform_driver_register(&diag_mhi_driver);
        platform_driver_register(&diagfwd_usb_driver);
+       platform_driver_register(&diagfwd_sdio_driver);
        return 0;
 
 fail:
index da24a11..bfe85d9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
 #include "diagfwd_bridge.h"
 #ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
 #include "diagfwd_hsic.h"
+#include "diagfwd_sdio.h"
 #endif
 #ifdef CONFIG_MSM_MHI
 #include "diagfwd_mhi.h"
@@ -42,6 +43,13 @@ static int diag_mhi_init(void)
 }
 #endif
 
+#ifndef CONFIG_QCOM_SDIO_CLIENT
+static int diag_sdio_init(void)
+{
+       return -EINVAL;
+}
+#endif
+
 #define BRIDGE_TO_MUX(x)       (x + DIAG_MUX_BRIDGE_BASE)
 
 struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV] = {
@@ -271,14 +279,17 @@ int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt)
        return err;
 }
 
-int diagfwd_bridge_init(bool use_mhi)
+int diagfwd_bridge_init(int xprt)
 {
        int err = 0;
 
-       if (use_mhi)
+       if (xprt == 1)
                err = diag_mhi_init();
+       else if (xprt == 2)
+               err = diag_sdio_init();
        else
                err = diag_hsic_init();
+
        if (err)
                goto fail;
        return 0;
index 250ef07..b595e53 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, 2018-2019, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,7 +52,7 @@ struct diagfwd_bridge_info {
 };
 
 extern struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV];
-int diagfwd_bridge_init(bool use_mhi);
+int diagfwd_bridge_init(int xprt);
 void diagfwd_bridge_exit(void);
 int diagfwd_bridge_close(int id);
 int diagfwd_bridge_write(int id, unsigned char *buf, int len);
diff --git a/drivers/char/diag/diagfwd_sdio.c b/drivers/char/diag/diagfwd_sdio.c
new file mode 100644 (file)
index 0000000..d50c916
--- /dev/null
@@ -0,0 +1,428 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <asm/current.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_sdio.h"
+
+#define DIAG_SDIO_STRING_SZ    11
+
+struct diag_sdio_info diag_sdio[NUM_SDIO_DEV] = {
+       {
+               .id = SDIO_1,
+               .dev_id = DIAGFWD_MDM,
+               .name = "MDM",
+               .mempool = POOL_TYPE_MDM,
+               .opened = 0,
+               .enabled = 0,
+               .suspended = 0,
+               .sdio_wq = NULL
+       },
+};
+
+static void diag_sdio_read_complete(void *ctxt, char *buf, int len,
+                                   int actual_size)
+{
+       int err = 0;
+       int index = (int)(uintptr_t)ctxt;
+       struct diag_sdio_info *ch = NULL;
+
+       if (index < 0 || index >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", index, __func__);
+               return;
+       }
+       ch = &diag_sdio[index];
+
+       /*
+        * Don't pass on the buffer if the channel is closed when a pending read
+        * completes. Also, actual size can be negative error codes - do not
+        * pass on the buffer.
+        */
+       if (!ch->opened || actual_size <= 0)
+               goto fail;
+       err = diag_remote_dev_read_done(ch->dev_id, buf, actual_size);
+       if (err)
+               goto fail;
+       return;
+
+fail:
+       diagmem_free(driver, buf, ch->mempool);
+       queue_work(ch->sdio_wq, &ch->read_work);
+       return;
+}
+
+static void diag_sdio_write_complete(void *ctxt, char *buf, int len,
+                                    int actual_size)
+{
+       int index = (int)(uintptr_t)ctxt;
+       struct diag_sdio_info *ch = NULL;
+
+       if (index < 0 || index >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", index, __func__);
+               return;
+       }
+
+       ch = &diag_sdio[index];
+       diag_remote_dev_write_done(ch->dev_id, buf, actual_size, ch->id);
+       return;
+}
+
+static int diag_sdio_suspend(void *ctxt)
+{
+       int index = (int)(uintptr_t)ctxt;
+       unsigned long flags;
+       struct diag_sdio_info *ch = NULL;
+
+       if (index < 0 || index >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", index, __func__);
+               return -EINVAL;
+       }
+
+       ch = &diag_sdio[index];
+       spin_lock_irqsave(&ch->lock, flags);
+       ch->suspended = 1;
+       spin_unlock_irqrestore(&ch->lock, flags);
+       return 0;
+}
+
+static void diag_sdio_resume(void *ctxt)
+{
+       int index = (int)(uintptr_t)ctxt;
+       unsigned long flags;
+       struct diag_sdio_info *ch = NULL;
+
+       if (index < 0 || index >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", index, __func__);
+               return;
+       }
+       ch = &diag_sdio[index];
+       spin_lock_irqsave(&ch->lock, flags);
+       ch->suspended = 0;
+       spin_unlock_irqrestore(&ch->lock, flags);
+       queue_work(ch->sdio_wq, &(ch->read_work));
+}
+
+static struct diag_bridge_ops diag_sdio_ops[NUM_SDIO_DEV] = {
+       {
+               .ctxt = (void *)SDIO_1,
+               .read_complete_cb = diag_sdio_read_complete,
+               .write_complete_cb = diag_sdio_write_complete,
+               .suspend = diag_sdio_suspend,
+               .resume = diag_sdio_resume,
+       },
+};
+
+static int sdio_open(int id)
+{
+       int err = 0;
+       unsigned long flags;
+       struct diag_sdio_info *ch = NULL;
+
+       if (id < 0 || id >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", id, __func__);
+               return -EINVAL;
+       }
+
+       ch = &diag_sdio[id];
+       if (!ch->enabled)
+               return -ENODEV;
+
+       if (ch->opened) {
+               pr_debug("diag: SDIO channel %d is already opened\n", ch->id);
+               return -ENODEV;
+       }
+
+       err = qti_client_open(ch->ch_num, &diag_sdio_ops[ch->id]);
+       if (err) {
+               pr_err("diag: Unable to open SDIO channel %d, err: %d",
+                      ch->id, err);
+               return err;
+       }
+       spin_lock_irqsave(&ch->lock, flags);
+       ch->opened = 1;
+       spin_unlock_irqrestore(&ch->lock, flags);
+       diagmem_init(driver, ch->mempool);
+       /* Notify the bridge that the channel is open */
+       diag_remote_dev_open(ch->dev_id);
+       queue_work(ch->sdio_wq, &(ch->read_work));
+       return 0;
+}
+
+static void sdio_open_work_fn(struct work_struct *work)
+{
+       struct diag_sdio_info *ch = container_of(work, struct diag_sdio_info,
+                                                open_work);
+       if (ch)
+               sdio_open(ch->id);
+}
+
+static int sdio_close(int id)
+{
+       unsigned long flags;
+       struct diag_sdio_info *ch = NULL;
+
+       if (id < 0 || id >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", id, __func__);
+               return -EINVAL;
+       }
+
+       ch = &diag_sdio[id];
+       if (!ch->enabled)
+               return -ENODEV;
+
+       if (!ch->opened) {
+               pr_debug("diag: SDIO channel %d is already closed\n", ch->id);
+               return -ENODEV;
+       }
+
+       spin_lock_irqsave(&ch->lock, flags);
+       ch->opened = 0;
+       spin_unlock_irqrestore(&ch->lock, flags);
+       qti_client_close(ch->ch_num);
+       diagmem_exit(driver, ch->mempool);
+       diag_remote_dev_close(ch->dev_id);
+       return 0;
+}
+
+static void sdio_close_work_fn(struct work_struct *work)
+{
+       struct diag_sdio_info *ch = container_of(work, struct diag_sdio_info,
+                                                close_work);
+       if (ch)
+               sdio_close(ch->id);
+}
+
+static void sdio_read_work_fn(struct work_struct *work)
+{
+       int err = 0;
+       unsigned char *buf = NULL;
+       struct diag_sdio_info *ch = container_of(work, struct diag_sdio_info,
+                                                read_work);
+       if (!ch || !ch->enabled || !ch->opened)
+               return;
+
+       do {
+               buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE, ch->mempool);
+               if (!buf) {
+                       err = -ENOMEM;
+                       break;
+               }
+
+               err = qti_client_read(ch->ch_num, buf, DIAG_MDM_BUF_SIZE);
+               if (err < 0) {
+                       diagmem_free(driver, buf, ch->mempool);
+                       break;
+               }
+       } while (buf);
+
+       /* Read from the SDIO channel continuously if the channel is present */
+       if (!err)
+               queue_work(ch->sdio_wq, &ch->read_work);
+}
+
+static int diag_sdio_probe(struct platform_device *pdev)
+{
+       unsigned long flags;
+       struct diag_sdio_info *ch = NULL;
+
+       if (!pdev)
+               return -EIO;
+
+       pr_debug("diag: sdio probe pdev: %d\n", pdev->id);
+       if (pdev->id >= NUM_SDIO_DEV) {
+               pr_err("diag: No support for SDIO device %d\n", pdev->id);
+               return -EIO;
+       }
+
+       ch = &diag_sdio[pdev->id];
+       ch->ch_num = *((int *)pdev->dev.platform_data);
+
+       if (!ch->enabled) {
+               spin_lock_irqsave(&ch->lock, flags);
+               ch->enabled = 1;
+               spin_unlock_irqrestore(&ch->lock, flags);
+       }
+       queue_work(ch->sdio_wq, &(ch->open_work));
+       return 0;
+}
+
+static int diag_sdio_remove(struct platform_device *pdev)
+{
+       struct diag_sdio_info *ch = NULL;
+
+       if (!pdev)
+               return -EIO;
+
+       pr_debug("diag: sdio close pdev: %d\n", pdev->id);
+       if (pdev->id >= NUM_SDIO_DEV) {
+               pr_err("diag: No support for SDIO device %d\n", pdev->id);
+               return -EIO;
+       }
+
+       ch = &diag_sdio[pdev->id];
+       queue_work(ch->sdio_wq, &(ch->close_work));
+       return 0;
+}
+
+static int diagfwd_sdio_runtime_suspend(struct device *dev)
+{
+       dev_dbg(dev, "pm_runtime: suspending...\n");
+       return 0;
+}
+
+static int diagfwd_sdio_runtime_resume(struct device *dev)
+{
+       dev_dbg(dev, "pm_runtime: resuming...\n");
+       return 0;
+}
+
+static const struct dev_pm_ops diagfwd_sdio_dev_pm_ops = {
+       .runtime_suspend = diagfwd_sdio_runtime_suspend,
+       .runtime_resume = diagfwd_sdio_runtime_resume,
+};
+
+static struct platform_driver msm_sdio_ch_driver = {
+       .probe = diag_sdio_probe,
+       .remove = diag_sdio_remove,
+       .driver = {
+                  .name = "diag_bridge_sdio",
+                  .owner = THIS_MODULE,
+                  .pm   = &diagfwd_sdio_dev_pm_ops,
+                  },
+};
+
+static int sdio_queue_read(int id)
+{
+       if (id < 0 || id >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", id, __func__);
+               return -EINVAL;
+       }
+       queue_work(diag_sdio[id].sdio_wq, &(diag_sdio[id].read_work));
+       return 0;
+}
+
+static int sdio_write(int id, unsigned char *buf, int len, int ctxt)
+{
+       int err = 0;
+       struct diag_sdio_info *ch = NULL;
+
+
+       if (id < 0 || id >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", id, __func__);
+               return -EINVAL;
+       }
+       if (!buf || len <= 0) {
+               return -EINVAL;
+       }
+
+       ch = &diag_sdio[id];
+       if (!ch->opened || !ch->enabled) {
+               pr_debug("diag: %s, ch %d is disabled. opened %d enabled %d\n",
+                                    __func__, id, ch->opened, ch->enabled);
+               return -EIO;
+       }
+
+       err = qti_client_write(ch->ch_num, buf, len);
+       if (err < 0) {
+               pr_err("diag: failed to write to ch[%d] in %s\n", ch->ch_num,
+                                                               __func__);
+               return err;
+       }
+       return 0;
+}
+
+static int sdio_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+       if (id < 0 || id >= NUM_SDIO_DEV) {
+               pr_err("diag: Invalid index %d in %s\n", id, __func__);
+               return -EINVAL;
+       }
+       if (!buf)
+               return -EIO;
+       diagmem_free(driver, buf, diag_sdio[id].mempool);
+       queue_work(diag_sdio[id].sdio_wq, &(diag_sdio[id].read_work));
+       return 0;
+}
+
+static struct diag_remote_dev_ops diag_sdio_fwd_ops = {
+       .open = sdio_open,
+       .close = sdio_close,
+       .queue_read = sdio_queue_read,
+       .write = sdio_write,
+       .fwd_complete = sdio_fwd_complete,
+};
+
+int diag_sdio_init(void)
+{
+       int i;
+       int err = 0;
+       struct diag_sdio_info *ch = NULL;
+       char wq_name[DIAG_SDIO_NAME_SZ + DIAG_SDIO_STRING_SZ];
+
+       for (i = 0; i < NUM_SDIO_DEV; i++) {
+               ch = &diag_sdio[i];
+               spin_lock_init(&ch->lock);
+               INIT_WORK(&(ch->read_work), sdio_read_work_fn);
+               INIT_WORK(&(ch->open_work), sdio_open_work_fn);
+               INIT_WORK(&(ch->close_work), sdio_close_work_fn);
+               strlcpy(wq_name, "DIAG_SDIO_", DIAG_SDIO_STRING_SZ);
+               strlcat(wq_name, ch->name, sizeof(ch->name));
+               ch->sdio_wq = create_singlethread_workqueue(wq_name);
+               if (!ch->sdio_wq)
+                       goto fail;
+               err = diagfwd_bridge_register(ch->dev_id, ch->id,
+                                             &diag_sdio_fwd_ops);
+               if (err) {
+                       pr_err("diag: Unable to register SDIO channel %d with bridge, err: %d\n",
+                              i, err);
+                       goto fail;
+               }
+       }
+
+       err = platform_driver_register(&msm_sdio_ch_driver);
+       if (err) {
+               pr_err("diag: could not register SDIO device, err: %d\n", err);
+               goto fail;
+       }
+
+       return 0;
+fail:
+       diag_sdio_exit();
+       return -ENOMEM;
+}
+
+void diag_sdio_exit(void)
+{
+       int i;
+       struct diag_sdio_info *ch = NULL;
+
+       for (i = 0; i < NUM_SDIO_DEV; i++) {
+               ch = &diag_sdio[i];
+               ch->enabled = 0;
+               ch->opened = 0;
+               ch->suspended = 0;
+               if (ch->sdio_wq)
+                       destroy_workqueue(ch->sdio_wq);
+       }
+       platform_driver_unregister(&msm_sdio_ch_driver);
+}
diff --git a/drivers/char/diag/diagfwd_sdio.h b/drivers/char/diag/diagfwd_sdio.h
new file mode 100644 (file)
index 0000000..1fa258e
--- /dev/null
@@ -0,0 +1,55 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SDIO_H
+#define DIAGFWD_SDIO_H
+
+#ifdef CONFIG_QCOM_SDIO_CLIENT
+
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <linux/usb/diag_bridge.h>
+
+#define SDIO_1                 0
+#define NUM_SDIO_DEV           1
+
+#define DIAG_SDIO_NAME_SZ      24
+
+struct diag_sdio_info {
+       int id;
+       int dev_id;
+       int ch_num;
+       int mempool;
+       uint8_t opened;
+       uint8_t enabled;
+       uint8_t suspended;
+       char name[DIAG_SDIO_NAME_SZ];
+       struct work_struct read_work;
+       struct work_struct open_work;
+       struct work_struct close_work;
+       struct workqueue_struct *sdio_wq;
+       spinlock_t lock;
+};
+
+extern struct diag_sdio_info diag_sdio[NUM_SDIO_DEV];
+extern int qti_client_open(int id, struct diag_bridge_ops *ops);
+extern int qti_client_close(int id);
+extern int qti_client_read(int id, char *buf, size_t count);
+extern int qti_client_write(int id, char *buf, size_t count);
+
+
+int diag_sdio_init(void);
+void diag_sdio_exit(void);
+
+#endif
+#endif /*CONFIG_QCOM_SDIO_CLIENT*/
index 72e0738..5b38d7a 100644 (file)
@@ -569,8 +569,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
        unsigned long long m;
 
        m = hpets->hp_tick_freq + (dis >> 1);
-       do_div(m, dis);
-       return (unsigned long)m;
+       return div64_ul(m, dis);
 }
 
 static int
diff --git a/drivers/char/qti_sdio_client.c b/drivers/char/qti_sdio_client.c
new file mode 100644 (file)
index 0000000..5cc1079
--- /dev/null
@@ -0,0 +1,1184 @@
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* add additional information to our printk's */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/cache.h>
+#include <linux/qcn_sdio_al.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <uapi/linux/major.h>
+#include <linux/ipc_logging.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+
+#define        DATA_ALIGNMENT                  4
+#define        MAX_CLIENTS                     5
+#define        TX_BUF_SIZE                     0x4000
+#define        RX_BUF_SIZE                     0x4000
+
+#define        TTY_RX_BYTE_COUNT               0x21
+#define        TTY_RX_BYTE_COUNT_TRANS         0x22
+#define        TTY_RX_BYTE_TRANS_MODE          0x23
+#define        TTY_RX_BYTE_TX_READY            0x24
+
+#define        TTY_SAHARA_DOORBELL_EVENT       0x20
+#define        TTY_TX_BUF_SZ_EVENT             0x21
+#define        TTY_TX_BUF_SZ_TRANS_EVENT       0x22
+
+#define        QMI_RX_BYTE_COUNT               0x61
+#define        QMI_RX_BYTE_COUNT_TRANS         0x62
+#define        QMI_RX_BYTE_TRANS_MODE          0x63
+#define        QMI_RX_BYTE_TX_READY            0x64
+
+#define        QMI_DOORBELL_EVENT              0x60
+#define        QMI_TX_BUF_SZ_EVENT             0x61
+#define        QMI_TX_BUF_SZ_TRANS_EVENT       0x62
+
+#define        DIAG_RX_BYTE_COUNT              0x81
+#define        DIAG_RX_BYTE_COUNT_TRANS        0x82
+#define        DIAG_RX_BYTE_TRANS_MODE         0x83
+#define        DIAG_RX_BYTE_TX_READY           0x84
+
+#define        DIAG_DOORBELL_EVENT             0x80
+#define        DIAG_TX_BUF_SZ_EVENT            0x81
+#define        DIAG_TX_BUF_SZ_TRANS_EVENT      0x82
+
+#define        QCN_IPC_LOG_PAGES               32
+
+#define        IPC_BRIDGE_MAX_READ_SZ          (8 * 1024)
+#define        IPC_BRIDGE_MAX_WRITE_SZ         (8 * 1024)
+
+static bool to_console;
+module_param(to_console, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static bool ipc_log;
+module_param(ipc_log, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+static DEFINE_MUTEX(work_lock);
+static spinlock_t list_lock;
+
+#define        qlog(qsb, _msg, ...) do {                                            \
+       if (!qsb ? 1 : to_console)                                           \
+               pr_err("[%s] " _msg, __func__, ##__VA_ARGS__);               \
+       if (qsb && ipc_log)                                                  \
+               ipc_log_string(qsb->ipc_log_ctxt, "[%s] " _msg, __func__,    \
+                                                       ##__VA_ARGS__);      \
+} while (0)
+
+enum qcn_sdio_cli_id {
+       QCN_SDIO_CLI_ID_INVALID = 0,
+       QCN_SDIO_CLI_ID_TTY,
+       QCN_SDIO_CLI_ID_WLAN,
+       QCN_SDIO_CLI_ID_QMI,
+       QCN_SDIO_CLI_ID_DIAG,
+       QCN_SDIO_CLI_ID_MAX
+};
+
+struct ipc_bridge_platform_data {
+       unsigned int max_read_size;
+       unsigned int max_write_size;
+       int (*open)(int id, void *ops);
+       int (*read)(int id, char *buf, size_t count);
+       int (*write)(int id, char *buf, size_t count);
+       int (*close)(int id);
+};
+
+struct diag_bridge_ops {
+       void *ctxt;
+       void (*read_complete_cb)(void *ctxt, char *buf,
+                       int buf_size, int actual);
+       void (*write_complete_cb)(void *ctxt, char *buf,
+                       int buf_size, int actual);
+       int (*suspend)(void *ctxt);
+       void (*resume)(void *ctxt);
+};
+
+struct qti_sdio_bridge {
+       const char *name;
+       const char *ch_name;
+       uint8_t id;
+       uint8_t mode;
+       struct sdio_al_channel_handle *channel_handle;
+       struct sdio_al_client_handle *client_handle;
+       wait_queue_head_t wait_q;
+       u8 *tx_dma_buf;
+       u8 *rx_dma_buf;
+       int data_avail;
+       int data_remain;
+       int blk_trans_mode;
+       int tx_ready;
+       struct diag_bridge_ops *ops;
+       void *ipc_log_ctxt;
+       void *priv_dev_info;
+       unsigned int mdata_count;
+       unsigned int tx_ready_count;
+       unsigned int data_avail_count;
+       atomic_t is_client_closing;
+};
+
+struct data_avail_node {
+       int id;
+       int data_avail;
+       u8 *rx_dma_buf;
+       struct list_head list;
+};
+
+struct tty_device {
+       struct device *qsb_device;
+       struct class *qsb_class;
+};
+
+static struct qti_sdio_bridge *qsbdev[MAX_CLIENTS];
+
+static int kworker_refs_count;
+struct kthread_work kwork;
+struct kthread_worker kworker;
+struct task_struct *task;
+struct list_head data_avail_list;
+static struct completion read_complete;
+
+void qti_client_queue_rx(int id, u8 *buf, unsigned int bytes)
+{
+       struct data_avail_node *data_node;
+
+       if ((id < QCN_SDIO_CLI_ID_TTY) || (id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s invalid client ID %d\n", __func__, id);
+               return;
+       }
+
+       data_node = kzalloc(sizeof(struct data_avail_node), GFP_ATOMIC);
+       if (!data_node) {
+               to_console = 1;
+               qlog(qsbdev[id], "client %d dnode allocation failed\n", id);
+               to_console = 0;
+               return;
+       }
+
+       qlog(qsbdev[id], "%s Queuing to work %d %p\n", qsbdev[id]->name, bytes,
+                                                                       buf);
+       data_node->data_avail = bytes;
+       data_node->id = id;
+       data_node->rx_dma_buf = buf;
+
+       spin_lock(&list_lock);
+       list_add_tail(&data_node->list, &data_avail_list);
+       spin_unlock(&list_lock);
+
+       queue_kthread_work(&kworker, &kwork);
+}
+
+void qti_client_ul_xfer_cb(struct sdio_al_channel_handle *ch_handle,
+                               struct sdio_al_xfer_result *xfer, void *ctxt)
+{
+       struct qti_sdio_bridge *qsb = NULL;
+       struct completion *tx_complete = (struct completion *)ctxt;
+       struct sdio_al_client_data *cl_data =
+                                       ch_handle->channel_data->client_data;
+
+       if (!xfer || xfer->xfer_status || !cl_data ||
+               (cl_data->id < QCN_SDIO_CLI_ID_TTY) ||
+               (cl_data->id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s invalid client ID\n", __func__);
+               return;
+       }
+
+       qsb = qsbdev[cl_data->id];
+       complete(tx_complete);
+}
+
+void qti_client_dl_xfer_cb(struct sdio_al_channel_handle *ch_handle,
+                               struct sdio_al_xfer_result *xfer, void *ctxt)
+{
+       struct qti_sdio_bridge *qsb = NULL;
+       struct sdio_al_client_data *cl_data =
+                                       ch_handle->channel_data->client_data;
+
+       if (!xfer || xfer->xfer_status || !cl_data ||
+               (cl_data->id < QCN_SDIO_CLI_ID_TTY) ||
+               (cl_data->id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s invalid client ID\n", __func__);
+               return;
+       }
+
+       qsb = qsbdev[cl_data->id];
+       qti_client_queue_rx(cl_data->id, xfer->buf_addr, (int)(uintptr_t)ctxt);
+}
+
+void qti_client_data_avail_cb(struct sdio_al_channel_handle *ch_handle,
+                                                       unsigned int bytes)
+{
+       int ret = 0;
+       int padded_len = 0;
+       u8 *rx_dma_buf = NULL;
+       struct qti_sdio_bridge *qsb = NULL;
+       struct sdio_al_client_data *cl_data =
+                                       ch_handle->channel_data->client_data;
+
+       if (!cl_data || (cl_data->id < QCN_SDIO_CLI_ID_TTY) ||
+                       (cl_data->id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s invalid client ID\n", __func__);
+               return;
+       }
+
+       qsb = qsbdev[cl_data->id];
+       qsb->data_avail_count++;
+       rx_dma_buf = kzalloc(RX_BUF_SIZE, GFP_ATOMIC);
+       if (!rx_dma_buf) {
+               to_console = 1;
+               qlog(qsb, "Unable to allocate rx_dma_buf\n");
+               to_console = 0;
+               return;
+       }
+
+       if (qsb->blk_trans_mode &&
+                       (bytes % qsb->client_handle->block_size)) {
+               padded_len = (((bytes /
+                               qsb->client_handle->block_size) + 1) *
+                               (qsb->client_handle->block_size));
+       } else {
+               padded_len = bytes;
+       }
+
+       if (qsb->mode) {
+               ret = sdio_al_queue_transfer_async(qsb->channel_handle,
+                                               SDIO_AL_RX, rx_dma_buf,
+                                               padded_len, 0,
+                                               (void *)(uintptr_t)bytes);
+               if (ret) {
+                       to_console = 1;
+                       qlog(qsb, "%s: data queueing failed %d\n", qsb->name,
+                                                                       ret);
+                       to_console = 0;
+                       return;
+               }
+       } else {
+               ret = sdio_al_queue_transfer(qsb->channel_handle,
+                               SDIO_AL_RX, rx_dma_buf, padded_len, 0);
+               if (ret == 1) {
+                       pr_debug("operating in async mode now\n");
+                       goto out;
+               }
+               if (ret) {
+                       to_console = 1;
+                       qlog(qsb, "%s: data transfer failed %d\n", qsb->name,
+                                                                       ret);
+                       to_console = 0;
+                       return;
+               }
+               qti_client_queue_rx(cl_data->id, rx_dma_buf, bytes);
+       }
+out:
+       qlog(qsb, "%s: data %s success\n", qsb->name,
+                                       qsb->mode ? "queueing" : "transfer");
+}
+
+static void sdio_dl_meta_data_cb(struct sdio_al_channel_handle *ch_handle,
+               unsigned int data)
+{
+       u8 event = 0;
+       struct qti_sdio_bridge *qsb = NULL;
+       struct sdio_al_client_data *cl_data =
+                                       ch_handle->channel_data->client_data;
+
+       if (!cl_data || (cl_data->id < QCN_SDIO_CLI_ID_TTY) ||
+                       (cl_data->id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s invalid client ID\n", __func__);
+               return;
+       }
+
+       qsb = qsbdev[cl_data->id];
+
+       event = (u8)((data & 0xFF000000) >> 24);
+
+       switch (event) {
+       case TTY_RX_BYTE_COUNT:
+               qlog(qsb, "client %s data_avail %d\n", qsb->name,
+                                                       data & 0x00003FFF);
+               qti_client_data_avail_cb(ch_handle, (data & 0x00003FFF));
+               break;
+       case QMI_RX_BYTE_COUNT:
+       case DIAG_RX_BYTE_COUNT:
+               qlog(qsb, "client %s meta_data %x\n", qsb->name, data);
+               break;
+       case TTY_RX_BYTE_COUNT_TRANS:
+       case QMI_RX_BYTE_COUNT_TRANS:
+       case DIAG_RX_BYTE_COUNT_TRANS:
+               break;
+       case TTY_RX_BYTE_TRANS_MODE:
+       case QMI_RX_BYTE_TRANS_MODE:
+       case DIAG_RX_BYTE_TRANS_MODE:
+               qsb->blk_trans_mode = (data & 0x00000001);
+               qlog(qsb, "client %s mode = %d data %x\n", qsb->name,
+                                               qsb->blk_trans_mode, data);
+               break;
+       case TTY_RX_BYTE_TX_READY:
+       case QMI_RX_BYTE_TX_READY:
+       case DIAG_RX_BYTE_TX_READY:
+               qsb->tx_ready = 1;
+               wake_up(&qsb->wait_q);
+               qlog(qsb, "client %s tx_ready data = %x\n", qsb->name, data);
+               qsb->tx_ready_count++;
+               break;
+       default:
+               to_console = 1;
+               qlog(qsb, "client %s INVALID_DATA\n", qsb->name);
+               to_console = 0;
+       }
+}
+
+int qti_client_open(int id, void *ops)
+{
+       int ret = -ENODEV;
+       unsigned int mdata = 0;
+       unsigned int event = 0;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       switch (id) {
+       case QCN_SDIO_CLI_ID_TTY:
+               event = TTY_SAHARA_DOORBELL_EVENT;
+               break;
+       case QCN_SDIO_CLI_ID_QMI:
+               event = QMI_DOORBELL_EVENT;
+               break;
+       case QCN_SDIO_CLI_ID_DIAG:
+               event = DIAG_DOORBELL_EVENT;
+               break;
+       default:
+               to_console = 1;
+               qlog(qsb, "Invalid client\n");
+               to_console = 0;
+               return ret;
+       }
+
+       qsb = qsbdev[id];
+
+       qlog(qsb, "client %s\n", qsb->name);
+
+       qsb->ops = (struct diag_bridge_ops *)ops;
+
+       mdata = (event << 24);
+       ret = sdio_al_meta_transfer(qsb->channel_handle, mdata, 0);
+
+       return ret;
+}
+EXPORT_SYMBOL(qti_client_open);
+
+int qti_client_close(int id)
+{
+       int ret = -ENODEV;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       if ((id < QCN_SDIO_CLI_ID_TTY) || (id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s invalid client ID %d\n", __func__, id);
+               return ret;
+       }
+
+       qsb = qsbdev[id];
+
+       qlog(qsb, "client %s\n", qsb->name);
+
+       qsb->ops = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(qti_client_close);
+
+int qti_client_read(int id, char *buf, size_t count)
+{
+       int ret = 0;
+       int bytes = 0;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       if ((id < QCN_SDIO_CLI_ID_TTY) || (id > QCN_SDIO_CLI_ID_DIAG) ||
+                               atomic_read(&qsbdev[id]->is_client_closing)) {
+               pr_err("%s invalid client ID %d\n", __func__, id);
+               return -ENODEV;
+       }
+
+       qsb = qsbdev[id];
+       qlog(qsb, "client %s\n", qsb->name);
+
+       if (id == QCN_SDIO_CLI_ID_DIAG && !qsb->ops) {
+               to_console = 1;
+               qlog(qsb, "%s: no diag operations assigned\n", qsb->name);
+               to_console = 0;
+               ret = -ENODEV;
+               goto out;
+       }
+
+       wait_event(qsb->wait_q, qsb->data_avail ||
+                                       atomic_read(&qsb->is_client_closing));
+       if (atomic_read(&qsb->is_client_closing)) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       bytes = qsb->data_avail;
+
+       if (!qsb->data_remain) {
+               if (count > bytes)
+                       count = bytes;
+
+               if (id == QCN_SDIO_CLI_ID_TTY) {
+                       ret = copy_to_user(buf, qsb->rx_dma_buf, count);
+                       if (ret) {
+                               to_console = 1;
+                               qlog(qsb, "%s: failed to copy to user buffer\n",
+                                                               qsb->name);
+                               to_console = 0;
+                               return -EIO;
+                       }
+               } else {
+                       memcpy(buf, qsb->rx_dma_buf, count);
+               }
+               qsb->data_remain = bytes - count;
+       } else {
+               if (count > qsb->data_remain)
+                       count = qsb->data_remain;
+
+               if (id == QCN_SDIO_CLI_ID_TTY) {
+                       ret = copy_to_user(buf, qsb->rx_dma_buf +
+                               (bytes - qsb->data_remain), count);
+                       if (ret) {
+                               to_console = 1;
+                               qlog(qsb, "%s: failed to copy to user buffer\n",
+                                               qsb->name);
+                               to_console = 0;
+                               return -EIO;
+                       }
+               } else {
+                       memcpy(buf, qsb->rx_dma_buf +
+                               (bytes - qsb->data_remain), count);
+               }
+               qsb->data_remain -= count;
+       }
+out:
+       if (id == QCN_SDIO_CLI_ID_DIAG && qsb->ops &&
+                                               qsb->ops->read_complete_cb) {
+               qsb->ops->read_complete_cb((void *)(uintptr_t)0, buf, count,
+                               ret < 0 ? ret : count);
+       }
+
+       if (!qsb->data_remain) {
+               qsb->data_avail = 0;
+               bytes = 0;
+               complete(&read_complete);
+       }
+
+       return count;
+}
+EXPORT_SYMBOL(qti_client_read);
+
+int qti_client_write(int id, char *buf, size_t count)
+{
+       int ret = 0;
+       int remaining = 0;
+       int padded_len = 0;
+       int temp_count = 0;
+       u8 *buffer = NULL;
+       unsigned int mdata = 0;
+       unsigned int event = 0;
+       struct qti_sdio_bridge *qsb = NULL;
+       DECLARE_COMPLETION_ONSTACK(tx_complete);
+
+       if (atomic_read(&qsbdev[id]->is_client_closing))
+               return -ENODEV;
+
+       switch (id) {
+       case QCN_SDIO_CLI_ID_TTY:
+               event = TTY_TX_BUF_SZ_EVENT;
+               break;
+       case QCN_SDIO_CLI_ID_QMI:
+               event = QMI_TX_BUF_SZ_EVENT;
+               break;
+       case QCN_SDIO_CLI_ID_DIAG:
+               event = DIAG_TX_BUF_SZ_EVENT;
+               break;
+       default:
+               to_console = 1;
+               qlog(qsb, "Invalid client\n");
+               to_console = 0;
+               return ret;
+       }
+
+       qsb = qsbdev[id];
+       qsb->tx_ready = 0;
+
+       qlog(qsb, "client %s\n", qsb->name);
+
+       if (id == QCN_SDIO_CLI_ID_DIAG && !qsb->ops) {
+               to_console = 1;
+               qlog(qsb, "%s: no diag operations assigned\n", qsb->name);
+               to_console = 0;
+               ret = -ENODEV;
+               return ret;
+       }
+
+       if (id == QCN_SDIO_CLI_ID_TTY) {
+               ret = copy_from_user(qsb->tx_dma_buf, buf, count);
+               if (ret) {
+                       qlog(qsb, "%s: failed to copy from user buffer\n",
+                                                               qsb->name);
+                       return ret;
+               }
+       } else {
+               memcpy(qsb->tx_dma_buf, buf, count);
+       }
+
+       remaining = count;
+       temp_count = count;
+       buffer = qsb->tx_dma_buf;
+
+       while (remaining) {
+               qsb->tx_ready = 0;
+               if (id != QCN_SDIO_CLI_ID_TTY && remaining > 1024) {
+                       temp_count = remaining;
+                       remaining = remaining - 1024;
+                       temp_count = temp_count - remaining;
+               } else if (id == QCN_SDIO_CLI_ID_TTY) {
+                       remaining = 0;
+               } else {
+                       temp_count = remaining;
+                       remaining = 0;
+               }
+
+               if (qsb->blk_trans_mode &&
+                               (temp_count % qsb->client_handle->block_size))
+                       padded_len =
+                               (((temp_count / qsb->client_handle->block_size)
+                               + 1) * (qsb->client_handle->block_size));
+               else
+                       padded_len = temp_count;
+
+               mdata = ((event << 24) | (temp_count & 0x3FFF));
+               ret = sdio_al_meta_transfer(qsb->channel_handle, mdata, 0);
+               if (ret) {
+                       to_console = 1;
+                       qlog(qsb, "%s: meta data transfer failed %d\n",
+                                                               qsb->name, ret);
+                       to_console = 0;
+                       return ret;
+               }
+
+               qlog(qsb, "MDATA: %x\n", mdata);
+               qsb->mdata_count++;
+
+               wait_event(qsb->wait_q, qsb->tx_ready ||
+                                       atomic_read(&qsb->is_client_closing));
+               if (atomic_read(&qsb->is_client_closing)) {
+                       ret = -ENODEV;
+                       goto out;
+               }
+
+               if (qsb->mode) {
+                       reinit_completion(&tx_complete);
+                       ret = sdio_al_queue_transfer_async(qsb->channel_handle,
+                                                       SDIO_AL_TX, buffer,
+                                                       padded_len, 0,
+                                                       (void *)&tx_complete);
+                       if (ret) {
+                               to_console = 1;
+                               qlog(qsb, "%s: data transfer failed %d\n",
+                                                               qsb->name, ret);
+                               to_console = 0;
+                               return ret;
+                       }
+
+                       if (qsb->mode)
+                               wait_for_completion(&tx_complete);
+               } else {
+
+                       ret = sdio_al_queue_transfer(qsb->channel_handle,
+                                       SDIO_AL_TX, buffer, padded_len, 0);
+                       if (ret) {
+                               to_console = 1;
+                               qlog(qsb, "%s: data transfer failed %d\n",
+                                                               qsb->name, ret);
+                               to_console = 0;
+                               return ret;
+                       }
+               }
+               buffer = buffer + temp_count;
+       }
+
+out:
+       if (id == QCN_SDIO_CLI_ID_DIAG && qsb->ops &&
+                       qsb->ops->write_complete_cb) {
+               qsb->ops->write_complete_cb((void *)(uintptr_t)0, buf, count,
+                               ret < 0 ? ret : count);
+       }
+
+       return count;
+}
+EXPORT_SYMBOL(qti_client_write);
+
+static int qsb_dev_open(struct inode *inode, struct file *file)
+{
+       if (atomic_read(&inode->i_count) != 1)
+               return -EBUSY;
+
+       return qti_client_open(QCN_SDIO_CLI_ID_TTY, NULL);
+}
+
+static ssize_t qsb_dev_read(struct file *file, char __user *buf, size_t count,
+               loff_t *ppos)
+{
+       return qti_client_read(QCN_SDIO_CLI_ID_TTY, (char *)buf, count);
+}
+
+static ssize_t qsb_dev_write(struct file *file, const char __user *buf,
+               size_t count, loff_t *ppos)
+{
+       return qti_client_write(QCN_SDIO_CLI_ID_TTY, (char *)buf, count);
+}
+
+static int qsb_dev_release(struct inode *inode, struct file *file)
+{
+       return qti_client_close(QCN_SDIO_CLI_ID_TTY);
+}
+
+static
+unsigned int qsb_dev_poll(struct file *file, struct poll_table_struct *wait)
+{
+       int ret = 0;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       qsb = qsbdev[QCN_SDIO_CLI_ID_TTY];
+
+       if (!qsb->data_avail)
+               poll_wait(file, &qsb->wait_q, wait);
+
+       if (qsb->data_avail)
+               ret = POLLIN | POLLRDNORM;
+
+       return ret;
+}
+
+static const struct ipc_bridge_platform_data ipc_bridge_pdata = {
+       .max_read_size = IPC_BRIDGE_MAX_READ_SZ,
+       .max_write_size = IPC_BRIDGE_MAX_WRITE_SZ,
+       .open = qti_client_open,
+       .read = qti_client_read,
+       .write = qti_client_write,
+       .close = qti_client_close,
+};
+
+static const struct file_operations qsb_dev_ops = {
+       .open = qsb_dev_open,
+       .read = qsb_dev_read,
+       .write = qsb_dev_write,
+       .release = qsb_dev_release,
+       .poll = qsb_dev_poll,
+};
+
+int qti_client_debug_init(int id)
+{
+       int ret = -EINVAL;
+       char name[32] = {0};
+       struct qti_sdio_bridge *qsb = NULL;
+
+       if ((id < QCN_SDIO_CLI_ID_TTY) || (id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s : invalid client ID %d\n", __func__, id);
+               return ret;
+       }
+
+       qsb = qsbdev[id];
+
+       snprintf(name, sizeof(name), "%s_%s", "qcn_client",
+                       (char *)(qsb->name + 15));
+
+       qsb->ipc_log_ctxt = ipc_log_context_create(QCN_IPC_LOG_PAGES, name, 0);
+       if (!qsb->ipc_log_ctxt) {
+               pr_err("failed to initialize ipc logging for client_%d", id);
+               goto out;
+       }
+
+       return 0;
+out:
+       return ret;
+}
+
+void qti_client_debug_deinit(int id)
+{
+       struct qti_sdio_bridge *qsb = NULL;
+
+       if ((id < QCN_SDIO_CLI_ID_TTY) || (id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s : invalid client ID %d\n", __func__, id);
+               return;
+       }
+
+       qsb = qsbdev[id];
+
+       if (qsb && qsb->ipc_log_ctxt) {
+               ipc_log_context_destroy(qsb->ipc_log_ctxt);
+               qsb->ipc_log_ctxt = NULL;
+               qsb = NULL;
+       }
+}
+
+static int qti_client_probe(struct sdio_al_client_handle *client_handle)
+{
+       int ret = -EINVAL;
+       int major_no = 0;
+       int diag_ch = QCN_SDIO_CLI_ID_DIAG;
+       struct tty_device *tty_dev = NULL;
+       struct platform_device *ipc_pdev = NULL;
+       struct platform_device *diag_pdev = NULL;
+       struct qti_sdio_bridge *qsb = NULL;
+       struct sdio_al_channel_handle *channel_handle = NULL;
+       struct sdio_al_channel_data *channel_data = NULL;
+
+       if ((client_handle->id < QCN_SDIO_CLI_ID_TTY) ||
+                       (client_handle->id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s : invalid client ID %d\n", __func__,
+                                                       client_handle->id);
+               goto err;
+       }
+
+       qti_client_debug_init(client_handle->id);
+
+       qsb = qsbdev[client_handle->id];
+
+       qlog(qsb, "probing client %s\n", qsb->name);
+
+       channel_data = kzalloc(sizeof(struct sdio_al_channel_data), GFP_KERNEL);
+       if (!channel_data) {
+               to_console = 1;
+               qlog(qsb, "client %s failed to allocate channel_data\n",
+                                                               qsb->name);
+               to_console = 0;
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       channel_data->name = kasprintf(GFP_KERNEL, qsb->ch_name);
+       channel_data->client_data = client_handle->client_data;
+       channel_data->dl_meta_data_cb = sdio_dl_meta_data_cb;
+
+       if (client_handle->id != QCN_SDIO_CLI_ID_TTY)
+               channel_data->dl_data_avail_cb = qti_client_data_avail_cb;
+       else
+               channel_data->dl_data_avail_cb = NULL;
+
+       channel_data->ul_xfer_cb = qti_client_ul_xfer_cb;
+       channel_data->dl_xfer_cb = qti_client_dl_xfer_cb;
+
+       channel_handle = sdio_al_register_channel(client_handle, channel_data);
+       if (IS_ERR(channel_handle)) {
+               ret = PTR_ERR(channel_handle);
+               to_console = 1;
+               qlog(qsb,
+                      "client %s failed to register channel_handle ret = %d\n",
+                                                               qsb->name, ret);
+               to_console = 0;
+               goto channel_data_err;
+       }
+
+       qsb->channel_handle = channel_handle;
+
+       qsb->tx_dma_buf = kzalloc(TX_BUF_SIZE, GFP_KERNEL);
+       if (!qsb->tx_dma_buf) {
+               to_console = 1;
+               qlog(qsb, "client %s failed to allocate tx_buf\n", qsb->name);
+               to_console = 0;
+               ret = -ENOMEM;
+               goto channel_handle_err;
+       }
+
+       init_waitqueue_head(&qsb->wait_q);
+
+       if (client_handle->id == QCN_SDIO_CLI_ID_TTY) {
+               tty_dev = kmalloc(sizeof(struct tty_device), GFP_KERNEL);
+               if (!tty_dev) {
+                       to_console = 1;
+                       qlog(qsb, "unable to allocate platform device\n");
+                       to_console = 0;
+                       ret = -ENOMEM;
+                       goto tx_err;
+               }
+
+               major_no = register_chrdev(UNNAMED_MAJOR, "QCN", &qsb_dev_ops);
+               if (major_no < 0) {
+                       to_console = 1;
+                       qlog(qsb, "client %s failed to allocate major_no\n",
+                                                               qsb->name);
+                       to_console = 0;
+                       ret = major_no;
+                       goto tx_err;
+               }
+
+               tty_dev->qsb_class = class_create(THIS_MODULE, "qsahara");
+               if (IS_ERR(tty_dev->qsb_class)) {
+                       to_console = 1;
+                       qlog(qsb, "client %s failed to create class\n",
+                                                               qsb->name);
+                       to_console = 0;
+                       ret = PTR_ERR(tty_dev->qsb_class);
+                       goto reg_err;
+               }
+
+               tty_dev->qsb_device = device_create(tty_dev->qsb_class, NULL,
+                                       MKDEV(major_no, 0), NULL, "qcn_sdio");
+               if (IS_ERR(tty_dev->qsb_device)) {
+                       to_console = 1;
+                       qlog(qsb, "client %s failed to create device node\n",
+                                                               qsb->name);
+                       to_console = 0;
+                       ret = PTR_ERR(tty_dev->qsb_device);
+
+                       goto dev_err;
+               }
+               qsb->priv_dev_info = tty_dev;
+       }
+
+       if (client_handle->id == QCN_SDIO_CLI_ID_QMI) {
+               ipc_pdev = platform_device_alloc("ipc_bridge_sdio",
+                                                       QCN_SDIO_CLI_ID_QMI);
+               if (!ipc_pdev) {
+                       to_console = 1;
+                       qlog(qsb, "unable to allocate platform device\n");
+                       to_console = 0;
+                       ret = -ENOMEM;
+                       goto tx_err;
+               }
+
+               ret = platform_device_add_data(ipc_pdev, &ipc_bridge_pdata,
+                               sizeof(struct ipc_bridge_platform_data));
+               if (ret) {
+                       to_console = 1;
+                       qlog(qsb, "failed to add pdata\n");
+                       to_console = 0;
+                       goto put_pdev;
+               }
+
+               ret = platform_device_add(ipc_pdev);
+               if (ret) {
+                       to_console = 1;
+                       qlog(qsb, "failed to add ipc_pdev\n");
+                       to_console = 0;
+                       goto put_pdev;
+               }
+               qsb->priv_dev_info = ipc_pdev;
+       }
+
+       if (client_handle->id == QCN_SDIO_CLI_ID_DIAG) {
+               diag_pdev = platform_device_register_data(NULL,
+                               "diag_bridge_sdio", 0, &diag_ch, sizeof(int));
+               if (IS_ERR(diag_pdev)) {
+                       to_console = 1;
+                       qlog(qsb, "%s: unable to allocate platform device\n",
+                                                               __func__);
+                       to_console = 0;
+                       ret = PTR_ERR(diag_pdev);
+                       goto put_pdev;
+               }
+               qsb->priv_dev_info = diag_pdev;
+       }
+
+       atomic_set(&qsb->is_client_closing, 0);
+       qlog(qsb, "probed client %s\n", qsb->name);
+       return 0;
+
+put_pdev:
+       if (client_handle->id == QCN_SDIO_CLI_ID_QMI)
+               platform_device_put(ipc_pdev);
+
+       if (client_handle->id == QCN_SDIO_CLI_ID_TTY) {
+dev_err:
+               class_destroy(tty_dev->qsb_class);
+reg_err:
+               unregister_chrdev(major_no, "qsahara");
+       }
+tx_err:
+       kfree(qsb->tx_dma_buf);
+channel_handle_err:
+       sdio_al_deregister_channel(channel_handle);
+channel_data_err:
+       kfree(channel_data);
+err:
+       pr_err("probe failed for client %d\n", client_handle->id);
+       return ret;
+}
+
+static int qti_client_remove(struct sdio_al_client_handle *client_handle)
+{
+       int ret = -EINVAL;
+       int minor_no = 0;
+       int major_no = 0;
+       struct tty_device *tty_dev = NULL;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       if ((client_handle->id < QCN_SDIO_CLI_ID_TTY) ||
+                       (client_handle->id > QCN_SDIO_CLI_ID_DIAG)) {
+               pr_err("%s : invalid client ID %d\n", __func__,
+                                                       client_handle->id);
+               goto err;
+       }
+
+       qsb = qsbdev[client_handle->id];
+
+       atomic_set(&qsb->is_client_closing, 1);
+       wake_up(&qsb->wait_q);
+
+       tty_dev = (struct tty_device *)qsb->priv_dev_info;
+       if (client_handle->id == QCN_SDIO_CLI_ID_TTY && tty_dev->qsb_device) {
+               minor_no = MINOR(tty_dev->qsb_device->devt);
+               major_no = MAJOR(tty_dev->qsb_device->devt);
+               device_destroy(tty_dev->qsb_class, MKDEV(major_no, minor_no));
+               class_destroy(tty_dev->qsb_class);
+               unregister_chrdev(major_no, "qsahara");
+               tty_dev->qsb_class = NULL;
+               tty_dev->qsb_device = NULL;
+               major_no = 0;
+       }
+
+       if (client_handle->id == QCN_SDIO_CLI_ID_QMI)
+               platform_device_unregister(qsb->priv_dev_info);
+
+       if (client_handle->id == QCN_SDIO_CLI_ID_DIAG)
+               platform_device_unregister(qsb->priv_dev_info);
+
+       qlog(qsb, "removed client %s\n", qsb->name);
+       kfree(qsb->tx_dma_buf);
+       qti_client_debug_deinit(client_handle->id);
+       return 0;
+
+err:
+       pr_err("%s : failed to removed client %d\n", __func__,
+                                                       client_handle->id);
+       return ret;
+}
+
+static void data_avail_worker(struct kthread_work *work)
+{
+       struct data_avail_node *data_node = NULL;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       mutex_lock(&work_lock);
+       spin_lock(&list_lock);
+       while (!list_empty(&data_avail_list)) {
+               reinit_completion(&read_complete);
+               data_node = list_first_entry(&data_avail_list,
+                                               struct data_avail_node, list);
+               list_del(&data_node->list);
+               spin_unlock(&list_lock);
+
+               qsb = qsbdev[data_node->id];
+               qsb->data_avail = data_node->data_avail;
+
+               qsb->rx_dma_buf = data_node->rx_dma_buf;
+
+               qlog(qsb, "%s Queuing to read %d %p\n", qsb->name,
+                                       qsb->data_avail, qsb->rx_dma_buf);
+
+               if (qsb->data_avail)
+                       wake_up(&qsb->wait_q);
+
+               wait_for_completion(&read_complete);
+               kfree(data_node->rx_dma_buf);
+               kfree(data_node);
+               spin_lock(&list_lock);
+       }
+       spin_unlock(&list_lock);
+       mutex_unlock(&work_lock);
+}
+
+static int qti_bridge_probe(struct platform_device *pdev)
+{
+       int id = 0;
+       int ret = -EPROBE_DEFER;
+       struct sdio_al_client_data *client_data = NULL;
+       struct sdio_al_client_handle *client_handle = NULL;
+
+       ret = sdio_al_is_ready();
+       if (ret) {
+               ret = -EPROBE_DEFER;
+               goto out;
+       }
+
+       ret = of_property_read_u32(pdev->dev.of_node, "qcom,client-id", &id);
+       if (ret) {
+               pr_err("qcom,client-id not found\n");
+               goto out;
+       }
+
+       qsbdev[id] = kzalloc(sizeof(struct qti_sdio_bridge), GFP_KERNEL);
+       if (!qsbdev[id]) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       qsbdev[id]->id = id;
+
+       ret = of_property_read_string(pdev->dev.of_node, "qcom,ch-name",
+                       &(qsbdev[id]->ch_name));
+       if (ret) {
+               pr_err("qcom,ch-name not found\n");
+               goto out;
+       }
+
+       client_data = kzalloc(sizeof(struct sdio_al_client_data), GFP_KERNEL);
+       if (!client_data) {
+               ret = -ENOMEM;
+               goto bridge_alloc_error;
+       }
+
+       ret = of_property_read_string(pdev->dev.of_node, "qcom,client-name",
+                       &client_data->name);
+       if (ret) {
+               pr_err("qcom,client-name not found\n");
+               goto bridge_alloc_error;
+       }
+
+       qsbdev[id]->name = kasprintf(GFP_KERNEL, client_data->name);
+
+       ret = of_property_read_u32(pdev->dev.of_node, "qcom,client-mode",
+                                                       &client_data->mode);
+       if (ret)
+               pr_err("qcom,client-mode not set, mode[async] as default\n");
+
+       client_data->probe = qti_client_probe;
+       client_data->remove = qti_client_remove;
+       client_data->id = id;
+
+       qsbdev[id]->mode = client_data->mode;
+
+       client_handle = sdio_al_register_client(client_data);
+       if (IS_ERR(client_handle)) {
+               ret = PTR_ERR(client_handle);
+               goto client_error;
+       }
+
+       if (qsbdev[client_handle->id]->id != client_handle->id) {
+               pr_err("probed client %d doesn't match registered client %d\n",
+                       qsbdev[client_handle->id]->id, client_handle->id);
+               goto client_reg_error;
+       }
+
+       qsbdev[client_handle->id]->client_handle = client_handle;
+
+       if (!kworker_refs_count) {
+               init_kthread_work(&kwork, data_avail_worker);
+               init_kthread_worker(&kworker);
+               init_completion(&read_complete);
+
+               INIT_LIST_HEAD(&data_avail_list);
+
+               task = kthread_run(kthread_worker_fn, &kworker, "qcn_worker");
+               if (IS_ERR(task)) {
+                       pr_err("Failed to run qcn_worker thread\n");
+                       goto client_reg_error;
+               }
+               qcn_sdio_client_probe_complete(client_handle->id);
+               spin_lock_init(&list_lock);
+       }
+       ++kworker_refs_count;
+
+       return 0;
+
+client_reg_error:
+       sdio_al_deregister_client(client_handle);
+client_error:
+       kfree(client_data);
+bridge_alloc_error:
+       kfree(qsbdev[id]);
+out:
+       return ret;
+}
+
+static int qti_bridge_remove(struct platform_device *pdev)
+{
+       int ret = -EBUSY;
+       int id = 0;
+       struct qti_sdio_bridge *qsb = NULL;
+
+       ret = of_property_read_u32(pdev->dev.of_node, "qcom,client-id", &id);
+       if (ret) {
+               pr_err("%s: qcom,client-id not found\n", __func__);
+               goto out;
+       }
+
+       qsb = qsbdev[id];
+
+       --kworker_refs_count;
+       if (!kworker_refs_count)
+               kthread_stop(task);
+
+       sdio_al_deregister_client(qsb->client_handle);
+       kfree(qsb);
+
+out:
+       return ret;
+}
+
+static const struct of_device_id qti_sdio_bridge_of_match[] = {
+       {.compatible    = "qcom,sdio-bridge"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, qti_sdio_bridge_of_match);
+
+static struct platform_driver qti_sdio_bridge_driver = {
+       .probe  = qti_bridge_probe,
+       .remove = qti_bridge_remove,
+       .driver = {
+               .name   = "sdio_bridge",
+               .owner  = THIS_MODULE,
+               .of_match_table = qti_sdio_bridge_of_match,
+       },
+};
+
+static int __init qti_bridge_init(void)
+{
+       int ret = -EBUSY;
+
+       ret = platform_driver_register(&qti_sdio_bridge_driver);
+       if (ret) {
+               printk(to_console ? KERN_ERR : KERN_DEBUG
+               "%s: platform_driver registeration  failed\n", __func__);
+               goto out;
+       }
+
+       return 0;
+out:
+       return ret;
+}
+
+static void __exit qti_bridge_exit(void)
+{
+       platform_driver_unregister(&qti_sdio_bridge_driver);
+}
+
+module_init(qti_bridge_init);
+module_exit(qti_bridge_exit);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc.");
+MODULE_LICENSE("GPL v2");
index 7858648..14af5c9 100644 (file)
@@ -307,7 +307,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
  * This requires of_device_id table.  In the same time this will not change the
  * actual *device* matching so do not add .of_match_table.
  */
-static const struct of_device_id s2mps11_dt_match[] = {
+static const struct of_device_id s2mps11_dt_match[] __used = {
        {
                .compatible = "samsung,s2mps11-clk",
                .data = (void *)S2MPS11X,
index b840e4a..2b28958 100644 (file)
@@ -61,10 +61,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
        u32 delay_num = 0;
 
        /* See the comment for rockchip_mmc_set_phase below */
-       if (!rate) {
-               pr_err("%s: invalid clk rate\n", __func__);
+       if (!rate)
                return -EINVAL;
-       }
 
        raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
 
index e7a8b63..ceddc4f 100644 (file)
@@ -32,11 +32,17 @@ DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
 static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
 
+struct concurrent_times {
+       atomic64_t active[NR_CPUS];
+       atomic64_t policy[NR_CPUS];
+};
+
 struct uid_entry {
        uid_t uid;
        unsigned int max_state;
        struct hlist_node hash;
        struct rcu_head rcu;
+       struct concurrent_times *concurrent_times;
        u64 time_in_state[0];
 };
 
@@ -87,6 +93,7 @@ static struct uid_entry *find_uid_entry_locked(uid_t uid)
 static struct uid_entry *find_or_register_uid_locked(uid_t uid)
 {
        struct uid_entry *uid_entry, *temp;
+       struct concurrent_times *times;
        unsigned int max_state = READ_ONCE(next_offset);
        size_t alloc_size = sizeof(*uid_entry) + max_state *
                sizeof(uid_entry->time_in_state[0]);
@@ -115,9 +122,15 @@ static struct uid_entry *find_or_register_uid_locked(uid_t uid)
        uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
        if (!uid_entry)
                return NULL;
+       times = kzalloc(sizeof(*times), GFP_ATOMIC);
+       if (!times) {
+               kfree(uid_entry);
+               return NULL;
+       }
 
        uid_entry->uid = uid;
        uid_entry->max_state = max_state;
+       uid_entry->concurrent_times = times;
 
        hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
 
@@ -232,6 +245,86 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
        return 0;
 }
 
+static int concurrent_time_seq_show(struct seq_file *m, void *v,
+       atomic64_t *(*get_times)(struct concurrent_times *))
+{
+       struct uid_entry *uid_entry;
+       int i, num_possible_cpus = num_possible_cpus();
+
+       rcu_read_lock();
+
+       hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+               atomic64_t *times = get_times(uid_entry->concurrent_times);
+
+               seq_put_decimal_ull(m, 0, (u64)uid_entry->uid);
+               seq_putc(m, ':');
+
+               for (i = 0; i < num_possible_cpus; ++i) {
+                       u64 time = cputime_to_clock_t(atomic64_read(&times[i]));
+
+                       seq_put_decimal_ull(m, ' ', time);
+               }
+               seq_putc(m, '\n');
+       }
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static inline atomic64_t *get_active_times(struct concurrent_times *times)
+{
+       return times->active;
+}
+
+static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
+{
+       if (v == uid_hash_table) {
+               seq_printf(m, "cpus: %d", num_possible_cpus());
+               seq_putc(m, '\n');
+       }
+
+       return concurrent_time_seq_show(m, v, get_active_times);
+}
+
+static inline atomic64_t *get_policy_times(struct concurrent_times *times)
+{
+       return times->policy;
+}
+
+static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
+{
+       int i;
+       struct cpu_freqs *freqs, *last_freqs = NULL;
+
+       if (v == uid_hash_table) {
+               int cnt = 0;
+
+               for_each_possible_cpu(i) {
+                       freqs = all_freqs[i];
+                       if (!freqs)
+                               continue;
+                       if (freqs != last_freqs) {
+                               if (last_freqs) {
+                                       seq_printf(m, ": %d", cnt);
+                                       seq_putc(m, ' ');
+                                       cnt = 0;
+                               }
+                               seq_printf(m, "policy%d", i);
+
+                               last_freqs = freqs;
+                       }
+                       cnt++;
+               }
+               if (last_freqs) {
+                       seq_printf(m, ": %d", cnt);
+                       seq_putc(m, '\n');
+               }
+       }
+
+       return concurrent_time_seq_show(m, v, get_policy_times);
+}
+
 void cpufreq_task_times_init(struct task_struct *p)
 {
        unsigned long flags;
@@ -326,11 +419,16 @@ void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
 {
        unsigned long flags;
        unsigned int state;
+       unsigned int active_cpu_cnt = 0;
+       unsigned int policy_cpu_cnt = 0;
+       unsigned int policy_first_cpu;
        struct uid_entry *uid_entry;
        struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+       struct cpufreq_policy *policy;
        uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+       int cpu = 0;
 
-       if (!freqs || p->flags & PF_EXITING)
+       if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
                return;
 
        state = freqs->offset + READ_ONCE(freqs->last_index);
@@ -346,6 +444,42 @@ void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
        if (uid_entry && state < uid_entry->max_state)
                uid_entry->time_in_state[state] += cputime;
        spin_unlock_irqrestore(&uid_lock, flags);
+
+       rcu_read_lock();
+       uid_entry = find_uid_entry_rcu(uid);
+       if (!uid_entry) {
+               rcu_read_unlock();
+               return;
+       }
+
+       for_each_possible_cpu(cpu)
+               if (!idle_cpu(cpu))
+                       ++active_cpu_cnt;
+
+       atomic64_add(cputime,
+                    &uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
+
+       policy = cpufreq_cpu_get(task_cpu(p));
+       if (!policy) {
+               /*
+                * This CPU may have just come up and not have a cpufreq policy
+                * yet.
+                */
+               rcu_read_unlock();
+               return;
+       }
+
+       for_each_cpu(cpu, policy->related_cpus)
+               if (!idle_cpu(cpu))
+                       ++policy_cpu_cnt;
+
+       policy_first_cpu = cpumask_first(policy->related_cpus);
+       cpufreq_cpu_put(policy);
+
+       atomic64_add(cputime,
+                    &uid_entry->concurrent_times->policy[policy_first_cpu +
+                                                         policy_cpu_cnt - 1]);
+       rcu_read_unlock();
 }
 
 void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@@ -387,6 +521,14 @@ void cpufreq_times_create_policy(struct cpufreq_policy *policy)
                all_freqs[cpu] = freqs;
 }
 
+static void uid_entry_reclaim(struct rcu_head *rcu)
+{
+       struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
+
+       kfree(uid_entry->concurrent_times);
+       kfree(uid_entry);
+}
+
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
 {
        struct uid_entry *uid_entry;
@@ -400,7 +542,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
                        hash, uid_start) {
                        if (uid_start == uid_entry->uid) {
                                hash_del_rcu(&uid_entry->hash);
-                               kfree_rcu(uid_entry, rcu);
+                               call_rcu(&uid_entry->rcu, uid_entry_reclaim);
                        }
                }
        }
@@ -453,11 +595,55 @@ static const struct file_operations uid_time_in_state_fops = {
        .release        = seq_release,
 };
 
+static const struct seq_operations concurrent_active_time_seq_ops = {
+       .start = uid_seq_start,
+       .next = uid_seq_next,
+       .stop = uid_seq_stop,
+       .show = concurrent_active_time_seq_show,
+};
+
+static int concurrent_active_time_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &concurrent_active_time_seq_ops);
+}
+
+static const struct file_operations concurrent_active_time_fops = {
+       .open           = concurrent_active_time_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
+static const struct seq_operations concurrent_policy_time_seq_ops = {
+       .start = uid_seq_start,
+       .next = uid_seq_next,
+       .stop = uid_seq_stop,
+       .show = concurrent_policy_time_seq_show,
+};
+
+static int concurrent_policy_time_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &concurrent_policy_time_seq_ops);
+}
+
+static const struct file_operations concurrent_policy_time_fops = {
+       .open           = concurrent_policy_time_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 static int __init cpufreq_times_init(void)
 {
        proc_create_data("uid_time_in_state", 0444, NULL,
                         &uid_time_in_state_fops, NULL);
 
+       proc_create_data("uid_concurrent_active_time", 0444, NULL,
+                        &concurrent_active_time_fops, NULL);
+
+       proc_create_data("uid_concurrent_policy_time", 0444, NULL,
+                        &concurrent_policy_time_fops, NULL);
+
        return 0;
 }
 
index 58c933f..991b6a3 100644 (file)
@@ -145,10 +145,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
        int err = -ENODEV;
 
        cpu = of_get_cpu_node(policy->cpu, NULL);
+       if (!cpu)
+               goto out;
 
+       max_freqp = of_get_property(cpu, "clock-frequency", NULL);
        of_node_put(cpu);
-       if (!cpu)
+       if (!max_freqp) {
+               err = -EINVAL;
                goto out;
+       }
+
+       /* we need the freq in kHz */
+       max_freq = *max_freqp / 1000;
 
        dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
        if (!dn)
@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
        }
 
        pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
-       max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-       if (!max_freqp) {
-               err = -EINVAL;
-               goto out_unmap_sdcpwr;
-       }
-
-       /* we need the freq in kHz */
-       max_freq = *max_freqp / 1000;
-
        pr_debug("max clock-frequency is at %u kHz\n", max_freq);
        pr_debug("initializing frequency table\n");
 
@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
-out_unmap_sdcpwr:
-       iounmap(sdcpwr_mapbase);
-
 out_unmap_sdcasr:
        iounmap(sdcasr_mapbase);
 out:
index 62ce935..0147452 100644 (file)
@@ -1426,6 +1426,18 @@ static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
        }
 }
 
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+                                 const u8 *key, unsigned int keylen)
+{
+       if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+           keylen == AES_KEYSIZE_256)
+               return ablkcipher_setkey(cipher, key, keylen);
+
+       crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+       return -EINVAL;
+}
+
 static void common_nonsnoop_unmap(struct device *dev,
                                  struct talitos_edesc *edesc,
                                  struct ablkcipher_request *areq)
@@ -1446,11 +1458,15 @@ static void ablkcipher_done(struct device *dev,
                            int err)
 {
        struct ablkcipher_request *areq = context;
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
        struct talitos_edesc *edesc;
 
        edesc = container_of(desc, struct talitos_edesc, desc);
 
        common_nonsnoop_unmap(dev, edesc, areq);
+       memcpy(areq->info, ctx->iv, ivsize);
 
        kfree(edesc);
 
@@ -1625,6 +1641,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+       if (!areq->nbytes)
+               return 0;
+
+       if (areq->nbytes % blocksize)
+               return -EINVAL;
 
        /* allocate extended descriptor */
        edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1642,6 +1666,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+       if (!areq->nbytes)
+               return 0;
+
+       if (areq->nbytes % blocksize)
+               return -EINVAL;
 
        /* allocate extended descriptor */
        edesc = ablkcipher_edesc_alloc(areq, false);
@@ -2375,6 +2407,7 @@ static struct talitos_alg_template driver_algs[] = {
                                .min_keysize = AES_MIN_KEY_SIZE,
                                .max_keysize = AES_MAX_KEY_SIZE,
                                .ivsize = AES_BLOCK_SIZE,
+                               .setkey = ablkcipher_aes_setkey,
                        }
                },
                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
index 8234d30..e121657 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016,2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -213,10 +213,11 @@ static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
        case DEVFREQ_GOV_SUSPEND:
                {
                        struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
-                       priv->bus.total_time = 0;
-                       priv->bus.gpu_time = 0;
-                       priv->bus.ram_time = 0;
+                       if (priv) {
+                               priv->bus.total_time = 0;
+                               priv->bus.gpu_time = 0;
+                               priv->bus.ram_time = 0;
+                       }
                }
                break;
        default:
index 4054747..dd97dbf 100644 (file)
@@ -1786,27 +1786,6 @@ static int sdma_probe(struct platform_device *pdev)
        if (pdata && pdata->script_addrs)
                sdma_add_scripts(sdma, pdata->script_addrs);
 
-       if (pdata) {
-               ret = sdma_get_firmware(sdma, pdata->fw_name);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
-       } else {
-               /*
-                * Because that device tree does not encode ROM script address,
-                * the RAM script in firmware is mandatory for device tree
-                * probe, otherwise it fails.
-                */
-               ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
-                                             &fw_name);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to get firmware name\n");
-               else {
-                       ret = sdma_get_firmware(sdma, fw_name);
-                       if (ret)
-                               dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
-               }
-       }
-
        sdma->dma_device.dev = &pdev->dev;
 
        sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -1848,6 +1827,33 @@ static int sdma_probe(struct platform_device *pdev)
                of_node_put(spba_bus);
        }
 
+       /*
+        * Kick off firmware loading as the very last step:
+        * attempt to load firmware only if we're not on the error path, because
+        * the firmware callback requires a fully functional and allocated sdma
+        * instance.
+        */
+       if (pdata) {
+               ret = sdma_get_firmware(sdma, pdata->fw_name);
+               if (ret)
+                       dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+       } else {
+               /*
+                * Because that device tree does not encode ROM script address,
+                * the RAM script in firmware is mandatory for device tree
+                * probe, otherwise it fails.
+                */
+               ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+                                             &fw_name);
+               if (ret) {
+                       dev_warn(&pdev->dev, "failed to get firmware name\n");
+               } else {
+                       ret = sdma_get_firmware(sdma, fw_name);
+                       if (ret)
+                               dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+               }
+       }
+
        return 0;
 
 err_register:
index 1dfc71c..57b6e6c 100644 (file)
@@ -1199,8 +1199,10 @@ static int omap_dma_probe(struct platform_device *pdev)
 
                rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
                                      IRQF_SHARED, "omap-dma-engine", od);
-               if (rc)
+               if (rc) {
+                       omap_dma_free(od);
                        return rc;
+               }
        }
 
        rc = dma_async_device_register(&od->ddev);
index 2b36d1c..956189a 100644 (file)
@@ -1030,7 +1030,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        dma_addr_t dev_addr;
 
        /* Someone calling slave DMA on a generic channel? */
-       if (rchan->mid_rid < 0 || !sg_len) {
+       if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
                dev_warn(chan->device->dev,
                         "%s: bad parameter: len=%d, id=%d\n",
                         __func__, sg_len, rchan->mid_rid);
index dd3e7ba..0fede05 100644 (file)
@@ -142,7 +142,7 @@ enum d40_events {
  * when the DMA hw is powered off.
  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
  */
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
        D40_DREG_LCPA,
        D40_DREG_LCLA,
        D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
 
 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
        D40_CHAN_REG_SSCFG,
        D40_CHAN_REG_SSELT,
        D40_CHAN_REG_SSPTR,
index 3c8f19f..d459cf4 100644 (file)
@@ -26,7 +26,7 @@
 static int edac_mc_log_ue = 1;
 static int edac_mc_log_ce = 1;
 static int edac_mc_panic_on_ue;
-static int edac_mc_poll_msec = 1000;
+static unsigned int edac_mc_poll_msec = 1000;
 
 /* Getter functions for above */
 int edac_mc_get_log_ue(void)
@@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
 }
 
 /* this is temporary */
-int edac_mc_get_poll_msec(void)
+unsigned int edac_mc_get_poll_msec(void)
 {
        return edac_mc_poll_msec;
 }
 
 static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
 {
-       unsigned long l;
+       unsigned int i;
        int ret;
 
        if (!val)
                return -EINVAL;
 
-       ret = kstrtoul(val, 0, &l);
+       ret = kstrtouint(val, 0, &i);
        if (ret)
                return ret;
 
-       if (l < 1000)
+       if (i < 1000)
                return -EINVAL;
 
-       *((unsigned long *)kp->arg) = l;
+       *((unsigned int *)kp->arg) = i;
 
        /* notify edac_mc engine to reset the poll period */
-       edac_mc_reset_delay_period(l);
+       edac_mc_reset_delay_period(i);
 
        return 0;
 }
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
 module_param(edac_mc_log_ce, int, 0644);
 MODULE_PARM_DESC(edac_mc_log_ce,
                 "Log correctable error to console: 0=off 1=on");
-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
                  &edac_mc_poll_msec, 0644);
 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
 
@@ -426,6 +426,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
 static int edac_create_csrow_object(struct mem_ctl_info *mci,
                                    struct csrow_info *csrow, int index)
 {
+       int err;
+
        csrow->dev.type = &csrow_attr_type;
        csrow->dev.bus = mci->bus;
        csrow->dev.groups = csrow_dev_groups;
@@ -438,7 +440,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
        edac_dbg(0, "creating (virtual) csrow node %s\n",
                 dev_name(&csrow->dev));
 
-       return device_add(&csrow->dev);
+       err = device_add(&csrow->dev);
+       if (err)
+               put_device(&csrow->dev);
+
+       return err;
 }
 
 /* Create a CSROW object under specifed edac_mc_device */
index b95a48f..c7a7a08 100644 (file)
@@ -33,7 +33,7 @@ extern int edac_mc_get_log_ue(void);
 extern int edac_mc_get_log_ce(void);
 extern int edac_mc_get_panic_on_ue(void);
 extern int edac_get_poll_msec(void);
-extern int edac_mc_get_poll_msec(void);
+extern unsigned int edac_mc_get_poll_msec(void);
 
 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
                                 unsigned len);
index 68489ef..3c06be1 100644 (file)
@@ -135,7 +135,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
 
 config ISCSI_IBFT_FIND
        bool "iSCSI Boot Firmware Table Attributes"
-       depends on X86 && ACPI
+       depends on X86 && ISCSI_IBFT
        default n
        help
          This option enables the kernel to find the region of memory
@@ -146,7 +146,8 @@ config ISCSI_IBFT_FIND
 config ISCSI_IBFT
        tristate "iSCSI Boot Firmware Table Attributes module"
        select ISCSI_BOOT_SYSFS
-       depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+       select ISCSI_IBFT_FIND if X86
+       depends on ACPI && SCSI && SCSI_LOWLEVEL
        default n
        help
          This option enables support for detection and exposing of iSCSI
index 437c8ef..30d67fb 100644 (file)
@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBFT_ISCSI_VERSION);
 
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
 struct ibft_hdr {
        u8 id;
        u8 version;
index c8c49b1..6e65c02 100644 (file)
@@ -821,9 +821,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
 
        raw_spin_lock_irqsave(&bank->lock, flags);
        bank->irq_usage &= ~(BIT(offset));
-       omap_set_gpio_irqenable(bank, offset, 0);
-       omap_clear_gpio_irqstatus(bank, offset);
        omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+       omap_clear_gpio_irqstatus(bank, offset);
+       omap_set_gpio_irqenable(bank, offset, 0);
        if (!LINE_USED(bank->mod_usage, offset))
                omap_clear_gpio_debounce(bank, offset);
        omap_disable_gpio_module(bank, offset);
@@ -865,8 +865,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&bank->lock, flags);
-       omap_set_gpio_irqenable(bank, offset, 0);
        omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+       omap_set_gpio_irqenable(bank, offset, 0);
        raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
@@ -878,9 +878,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&bank->lock, flags);
-       if (trigger)
-               omap_set_gpio_triggering(bank, offset, trigger);
-
        omap_set_gpio_irqenable(bank, offset, 1);
 
        /*
@@ -888,9 +885,13 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
         * is cleared, thus after the handler has run. OMAP4 needs this done
         * after enabing the interrupt to clear the wakeup status.
         */
-       if (bank->level_mask & BIT(offset))
+       if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
+           trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
                omap_clear_gpio_irqstatus(bank, offset);
 
+       if (trigger)
+               omap_set_gpio_triggering(bank, offset, trigger);
+
        raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
@@ -1611,6 +1612,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
        .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
        .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
        .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
+       .irqstatus_raw0 =       OMAP4_GPIO_IRQSTATUSRAW0,
+       .irqstatus_raw1 =       OMAP4_GPIO_IRQSTATUSRAW1,
        .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
        .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
        .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
index 2acc5cb..30864de 100644 (file)
@@ -185,6 +185,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
 }
 
 static int
+nvkm_i2c_preinit(struct nvkm_subdev *subdev)
+{
+       struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+       struct nvkm_i2c_bus *bus;
+       struct nvkm_i2c_pad *pad;
+
+       /*
+        * We init our i2c busses as early as possible, since they may be
+        * needed by the vbios init scripts on some cards
+        */
+       list_for_each_entry(pad, &i2c->pad, head)
+               nvkm_i2c_pad_init(pad);
+       list_for_each_entry(bus, &i2c->bus, head)
+               nvkm_i2c_bus_init(bus);
+
+       return 0;
+}
+
+static int
 nvkm_i2c_init(struct nvkm_subdev *subdev)
 {
        struct nvkm_i2c *i2c = nvkm_i2c(subdev);
@@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
 static const struct nvkm_subdev_func
 nvkm_i2c = {
        .dtor = nvkm_i2c_dtor,
+       .preinit = nvkm_i2c_preinit,
        .init = nvkm_i2c_init,
        .fini = nvkm_i2c_fini,
        .intr = nvkm_i2c_intr,
index f418c00..ecad4d7 100644 (file)
@@ -1389,7 +1389,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
        dsi->format = desc->format;
        dsi->lanes = desc->lanes;
 
-       return mipi_dsi_attach(dsi);
+       err = mipi_dsi_attach(dsi);
+       if (err) {
+               struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
+
+               drm_panel_remove(&panel->base);
+       }
+
+       return err;
 }
 
 static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
index 6296e9f..0b8f8c1 100644 (file)
@@ -535,6 +535,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
        ret = wait_event_timeout(vgdev->resp_wq,
                                 atomic_read(&cache_ent->is_valid), 5 * HZ);
 
+       /* is_valid check must proceed before copy of the cache entry. */
+       smp_rmb();
+
        ptr = cache_ent->caps_cache;
 
 copy_exit:
index 52436b3..a1b3ea1 100644 (file)
@@ -618,6 +618,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
                    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
                        memcpy(cache_ent->caps_cache, resp->capset_data,
                               cache_ent->size);
+                       /* Copy must occur before is_valid is signalled. */
+                       smp_wmb();
                        atomic_set(&cache_ent->is_valid, 1);
                        break;
                }
index 1dcb96c..1f7b602 100644 (file)
@@ -255,7 +255,7 @@ static int init_csc(struct ipu_ic *ic,
        writel(param, base++);
 
        param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
-               (params->sat << 9);
+               (params->sat << 10);
        writel(param, base++);
 
        param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
index 65e7335..3a37778 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017,2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -259,6 +259,11 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
                PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc");
        if (ret)
                return ret;
+
+       /* allocate a chunk of memory to create user profiling IB1s */
+       kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->profile_desc,
+               PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0, "profile_desc");
+
        return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc,
                        KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY,
                        0, "ringbuffer");
@@ -272,7 +277,7 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
 
        if (!adreno_is_a3xx(adreno_dev)) {
                status = kgsl_allocate_global(device, &device->scratch,
-                               PAGE_SIZE, 0, 0, "scratch");
+                               PAGE_SIZE, 0, KGSL_MEMDESC_RANDOM, "scratch");
                if (status != 0)
                        return status;
        }
@@ -303,7 +308,7 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
 
        kgsl_free_global(device, &rb->pagetable_desc);
        kgsl_free_global(device, &rb->preemption_desc);
-
+       kgsl_free_global(device, &rb->profile_desc);
        kgsl_free_global(device, &rb->buffer_desc);
        kgsl_del_event_group(&rb->events);
        memset(rb, 0, sizeof(struct adreno_ringbuffer));
@@ -737,6 +742,37 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
        return (unsigned int)(p - cmds);
 }
 
+/* This is the maximum possible size for 64 bit targets */
+#define PROFILE_IB_DWORDS 4
+#define PROFILE_IB_SLOTS (PAGE_SIZE / (PROFILE_IB_DWORDS << 2))
+
+static int set_user_profiling(struct adreno_device *adreno_dev,
+               struct adreno_ringbuffer *rb, u32 *cmds, u64 gpuaddr)
+{
+       int dwords, index = 0;
+       u64 ib_gpuaddr;
+       u32 *ib;
+
+       if (!rb->profile_desc.hostptr)
+               return 0;
+
+       ib = ((u32 *) rb->profile_desc.hostptr) +
+               (rb->profile_index * PROFILE_IB_DWORDS);
+       ib_gpuaddr = rb->profile_desc.gpuaddr +
+               (rb->profile_index * (PROFILE_IB_DWORDS << 2));
+
+       dwords = _get_alwayson_counter(adreno_dev, ib, gpuaddr);
+
+       /* Make an indirect buffer for the request */
+       cmds[index++] = cp_mem_packet(adreno_dev, CP_INDIRECT_BUFFER_PFE, 2, 1);
+       index += cp_gpuaddr(adreno_dev, &cmds[index], ib_gpuaddr);
+       cmds[index++] = dwords;
+
+       rb->profile_index = (rb->profile_index + 1) % PROFILE_IB_SLOTS;
+
+       return index;
+}
+
 /* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
 int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
                struct kgsl_drawobj_cmd *cmdobj,
@@ -836,14 +872,12 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
        if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
                !adreno_is_a3xx(adreno_dev) && profile_buffer) {
                user_profiling = true;
-               dwords += 6;
 
                /*
-                * REG_TO_MEM packet on A5xx and above needs another ordinal.
-                * Add 2 more dwords since we do profiling before and after.
+                * User side profiling uses two IB1s, one before with 4 dwords
+                * per INDIRECT_BUFFER_PFE call
                 */
-               if (!ADRENO_LEGACY_PM4(adreno_dev))
-                       dwords += 2;
+               dwords += 8;
 
                /*
                 * we want to use an adreno_submit_time struct to get the
@@ -886,11 +920,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
        }
 
        /*
-        * Add cmds to read the GPU ticks at the start of command obj and
+        * Add IB1 to read the GPU ticks at the start of command obj and
         * write it into the appropriate command obj profiling buffer offset
         */
        if (user_profiling) {
-               cmds += _get_alwayson_counter(adreno_dev, cmds,
+               cmds += set_user_profiling(adreno_dev, rb, cmds,
                        cmdobj->profiling_buffer_gpuaddr +
                        offsetof(struct kgsl_drawobj_profiling_buffer,
                        gpu_ticks_submitted));
@@ -929,11 +963,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
        }
 
        /*
-        * Add cmds to read the GPU ticks at the end of command obj and
+        * Add IB1 to read the GPU ticks at the end of command obj and
         * write it into the appropriate command obj profiling buffer offset
         */
        if (user_profiling) {
-               cmds += _get_alwayson_counter(adreno_dev, cmds,
+               cmds += set_user_profiling(adreno_dev, rb, cmds,
                        cmdobj->profiling_buffer_gpuaddr +
                        offsetof(struct kgsl_drawobj_profiling_buffer,
                        gpu_ticks_retired));
index 63374af..d64ccbd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2016,2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,18 @@ struct adreno_ringbuffer {
        unsigned long sched_timer;
        enum adreno_dispatcher_starve_timer_states starve_timer_state;
        spinlock_t preempt_lock;
+       /**
+        * @profile_desc: global memory to construct IB1s to do user side
+        * profiling
+        */
+       struct kgsl_memdesc profile_desc;
+       /**
+        * @profile_index: Pointer to the next "slot" in profile_desc for a user
+        * profiling IB1.  This allows for PAGE_SIZE / 16 = 256 simultaneous
+        * commands per ringbuffer with user profiling enabled
+        * enough.
+        */
+       u32 profile_index;
 };
 
 /* Returns the current ringbuffer */
index 417d8c6..90d9bd2 100644 (file)
@@ -3342,11 +3342,15 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
        unsigned int cmd, void *data)
 {
        struct kgsl_process_private *process = dev_priv->process_priv;
+       struct kgsl_device *device = dev_priv->device;
        struct kgsl_sparse_phys_alloc *param = data;
        struct kgsl_mem_entry *entry;
        int ret;
        int id;
 
+       if (!(device->flags & KGSL_FLAG_SPARSE))
+               return -ENOTSUPP;
+
        ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
        if (ret)
                return ret;
@@ -3425,9 +3429,13 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
        unsigned int cmd, void *data)
 {
        struct kgsl_process_private *process = dev_priv->process_priv;
+       struct kgsl_device *device = dev_priv->device;
        struct kgsl_sparse_phys_free *param = data;
        struct kgsl_mem_entry *entry;
 
+       if (!(device->flags & KGSL_FLAG_SPARSE))
+               return -ENOTSUPP;
+
        entry = kgsl_sharedmem_find_id_flags(process, param->id,
                        KGSL_MEMFLAGS_SPARSE_PHYS);
        if (entry == NULL)
@@ -3457,10 +3465,14 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
        unsigned int cmd, void *data)
 {
        struct kgsl_process_private *private = dev_priv->process_priv;
+       struct kgsl_device *device = dev_priv->device;
        struct kgsl_sparse_virt_alloc *param = data;
        struct kgsl_mem_entry *entry;
        int ret;
 
+       if (!(device->flags & KGSL_FLAG_SPARSE))
+               return -ENOTSUPP;
+
        ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
        if (ret)
                return ret;
@@ -3500,9 +3512,13 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
        unsigned int cmd, void *data)
 {
        struct kgsl_process_private *process = dev_priv->process_priv;
+       struct kgsl_device *device = dev_priv->device;
        struct kgsl_sparse_virt_free *param = data;
        struct kgsl_mem_entry *entry = NULL;
 
+       if (!(device->flags & KGSL_FLAG_SPARSE))
+               return -ENOTSUPP;
+
        entry = kgsl_sharedmem_find_id_flags(process, param->id,
                        KGSL_MEMFLAGS_SPARSE_VIRT);
        if (entry == NULL)
@@ -3849,6 +3865,7 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
                unsigned int cmd, void *data)
 {
        struct kgsl_process_private *private = dev_priv->process_priv;
+       struct kgsl_device *device = dev_priv->device;
        struct kgsl_sparse_bind *param = data;
        struct kgsl_sparse_binding_object obj;
        struct kgsl_mem_entry *virt_entry;
@@ -3857,6 +3874,9 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
        int ret = 0;
        int i = 0;
 
+       if (!(device->flags & KGSL_FLAG_SPARSE))
+               return -ENOTSUPP;
+
        ptr = (void __user *) (uintptr_t) param->list;
 
        if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
@@ -3912,6 +3932,9 @@ long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
        long result;
        unsigned int i = 0;
 
+       if (!(device->flags & KGSL_FLAG_SPARSE))
+               return -ENOTSUPP;
+
        /* Make sure sparse and syncpoint count isn't too big */
        if (param->numsparse > KGSL_MAX_SPARSE ||
                param->numsyncs > KGSL_MAX_SYNCPOINTS)
@@ -4720,6 +4743,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
        /* Initialize logging first, so that failures below actually print. */
        kgsl_device_debugfs_init(device);
 
+       /* Disable the sparse ioctl invocation as they are not used */
+       device->flags &= ~KGSL_FLAG_SPARSE;
+
        status = kgsl_pwrctrl_init(device);
        if (status)
                goto error;
index a486d9a..6b8ef82 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016,2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -189,6 +189,8 @@ struct kgsl_memdesc_ops {
 #define KGSL_MEMDESC_TZ_LOCKED BIT(7)
 /* The memdesc is allocated through contiguous memory */
 #define KGSL_MEMDESC_CONTIG BIT(8)
+/* For global buffers, randomly assign an address from the region */
+#define KGSL_MEMDESC_RANDOM BIT(9)
 
 /**
  * struct kgsl_memdesc - GPU memory object descriptor
index 549e23c..0ab6041 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,6 +61,7 @@ enum kgsl_event_results {
 };
 
 #define KGSL_FLAG_WAKE_ON_TOUCH BIT(0)
+#define KGSL_FLAG_SPARSE        BIT(1)
 
 /*
  * "list" of event types for ftrace symbolic magic
index 4e3788b..9ba15b6 100644 (file)
@@ -614,13 +614,29 @@ static void add_profiling_buffer(struct kgsl_device *device,
                return;
        }
 
-       cmdobj->profiling_buf_entry = entry;
 
-       if (id != 0)
+       if (!id) {
+               cmdobj->profiling_buffer_gpuaddr = gpuaddr;
+       } else {
+               u64 off = offset + sizeof(struct kgsl_drawobj_profiling_buffer);
+
+               /*
+                * Make sure there is enough room in the object to store the
+                * entire profiling buffer object
+                */
+               if (off < offset || off >= entry->memdesc.size) {
+                       dev_err(device->dev,
+                               "ignore invalid profile offset ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
+                       drawobj->context->id, id, offset, gpuaddr, size);
+                       kgsl_mem_entry_put(entry);
+                       return;
+               }
+
                cmdobj->profiling_buffer_gpuaddr =
                        entry->memdesc.gpuaddr + offset;
-       else
-               cmdobj->profiling_buffer_gpuaddr = gpuaddr;
+       }
+
+       cmdobj->profiling_buf_entry = entry;
 }
 
 /**
index 320b815..ffab542 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <linux/msm_kgsl.h>
 #include <linux/ratelimit.h>
 #include <linux/of_platform.h>
+#include <linux/random.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 #include <stddef.h>
@@ -84,15 +85,8 @@ static struct kmem_cache *addr_entry_cache;
  *
  * Here we define an array and a simple allocator to keep track of the currently
  * active global entries. Each entry is assigned a unique address inside of a
- * MMU implementation specific "global" region. The addresses are assigned
- * sequentially and never re-used to avoid having to go back and reprogram
- * existing pagetables. The entire list of active entries are mapped and
- * unmapped into every new pagetable as it is created and destroyed.
- *
- * Because there are relatively few entries and they are defined at boot time we
- * don't need to go over the top to define a dynamic allocation scheme. It will
- * be less wasteful to pick a static number with a little bit of growth
- * potential.
+ * MMU implementation specific "global" region. We use a simple bitmap based
+ * allocator for the region to allow for both fixed and dynamic addressing.
  */
 
 #define GLOBAL_PT_ENTRIES 32
@@ -102,10 +96,12 @@ struct global_pt_entry {
        char name[32];
 };
 
+#define GLOBAL_MAP_PAGES (KGSL_IOMMU_GLOBAL_MEM_SIZE >> PAGE_SHIFT)
+
 static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
 static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
+static DECLARE_BITMAP(global_map, GLOBAL_MAP_PAGES);
 static int global_pt_count;
-uint64_t global_pt_alloc;
 static struct kgsl_memdesc gpu_qdss_desc;
 static struct kgsl_memdesc gpu_qtimer_desc;
 
@@ -186,6 +182,12 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
 
        for (i = 0; i < global_pt_count; i++) {
                if (global_pt_entries[i].memdesc == memdesc) {
+                       u64 offset = memdesc->gpuaddr -
+                               KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
+
+                       bitmap_clear(global_map, offset >> PAGE_SHIFT,
+                               kgsl_memdesc_footprint(memdesc) >> PAGE_SHIFT);
+
                        memdesc->gpuaddr = 0;
                        memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
                        global_pt_entries[i].memdesc = NULL;
@@ -197,15 +199,43 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
 static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
                struct kgsl_memdesc *memdesc, const char *name)
 {
+       u32 bit, start = 0;
+       u64 size = kgsl_memdesc_footprint(memdesc);
+
        if (memdesc->gpuaddr != 0)
                return;
 
-       BUG_ON(global_pt_count >= GLOBAL_PT_ENTRIES);
-       BUG_ON((global_pt_alloc + memdesc->size) >= KGSL_IOMMU_GLOBAL_MEM_SIZE);
+       if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES))
+               return;
+
+       if (WARN_ON(size > KGSL_IOMMU_GLOBAL_MEM_SIZE))
+               return;
+
+       if (memdesc->priv & KGSL_MEMDESC_RANDOM) {
+               u32 range = GLOBAL_MAP_PAGES - (size >> PAGE_SHIFT);
+
+               start = get_random_int() % range;
+       }
+
+       while (start >= 0) {
+               bit = bitmap_find_next_zero_area(global_map, GLOBAL_MAP_PAGES,
+                       start, size >> PAGE_SHIFT, 0);
+
+               if (bit < GLOBAL_MAP_PAGES)
+                       break;
+
+               start--;
+       }
+
+       if (WARN_ON(start < 0))
+               return;
+
+       memdesc->gpuaddr =
+               KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + (bit << PAGE_SHIFT);
+
+       bitmap_set(global_map, bit, size >> PAGE_SHIFT);
 
-       memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
        memdesc->priv |= KGSL_MEMDESC_GLOBAL;
-       global_pt_alloc += memdesc->size;
 
        global_pt_entries[global_pt_count].memdesc = memdesc;
        strlcpy(global_pt_entries[global_pt_count].name, name,
index fd8cd41..7f6c137 100644 (file)
@@ -200,14 +200,13 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
  * Add a usage to the temporary parser table.
  */
 
-static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage)
 {
        if (parser->local.usage_index >= HID_MAX_USAGES) {
                hid_err(parser->device, "usage index exceeded\n");
                return -1;
        }
        parser->local.usage[parser->local.usage_index] = usage;
-       parser->local.usage_size[parser->local.usage_index] = size;
        parser->local.collection_index[parser->local.usage_index] =
                parser->collection_stack_ptr ?
                parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -464,7 +463,10 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
                        return 0;
                }
 
-               return hid_add_usage(parser, data, item->size);
+               if (item->size <= 2)
+                       data = (parser->global.usage_page << 16) + data;
+
+               return hid_add_usage(parser, data);
 
        case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
 
@@ -473,6 +475,9 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
                        return 0;
                }
 
+               if (item->size <= 2)
+                       data = (parser->global.usage_page << 16) + data;
+
                parser->local.usage_minimum = data;
                return 0;
 
@@ -483,6 +488,9 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
                        return 0;
                }
 
+               if (item->size <= 2)
+                       data = (parser->global.usage_page << 16) + data;
+
                count = data - parser->local.usage_minimum;
                if (count + parser->local.usage_index >= HID_MAX_USAGES) {
                        /*
@@ -502,7 +510,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
                }
 
                for (n = parser->local.usage_minimum; n <= data; n++)
-                       if (hid_add_usage(parser, n, item->size)) {
+                       if (hid_add_usage(parser, n)) {
                                dbg_hid("hid_add_usage failed\n");
                                return -1;
                        }
@@ -517,22 +525,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 }
 
 /*
- * Concatenate Usage Pages into Usages where relevant:
- * As per specification, 6.2.2.8: "When the parser encounters a main item it
- * concatenates the last declared Usage Page with a Usage to form a complete
- * usage value."
- */
-
-static void hid_concatenate_usage_page(struct hid_parser *parser)
-{
-       int i;
-
-       for (i = 0; i < parser->local.usage_index; i++)
-               if (parser->local.usage_size[i] <= 2)
-                       parser->local.usage[i] += parser->global.usage_page << 16;
-}
-
-/*
  * Process a main item.
  */
 
@@ -541,8 +533,6 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
        __u32 data;
        int ret;
 
-       hid_concatenate_usage_page(parser);
-
        data = item_udata(item);
 
        switch (item->tag) {
@@ -756,8 +746,6 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
        __u32 data;
        int i;
 
-       hid_concatenate_usage_page(parser);
-
        data = item_udata(item);
 
        switch (item->tag) {
index 6e1a4a4..ab9da59 100644 (file)
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 
        /* Locate the boot interface, to receive the LED change events */
        struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+       struct hid_device *boot_hid;
+       struct hid_input *boot_hid_input;
 
-       struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
-       struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+       if (unlikely(boot_interface == NULL))
+               return -ENODEV;
+
+       boot_hid = usb_get_intfdata(boot_interface);
+       boot_hid_input = list_first_entry(&boot_hid->inputs,
                struct hid_input, list);
 
        return boot_hid_input->input->event(boot_hid_input->input, type, code,
index 00d8366..e180729 100644 (file)
 #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A  0x0a4a
 #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A  0x0b4a
 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE         0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641    0x0641
 
 #define USB_VENDOR_ID_HUION            0x256c
 #define USB_DEVICE_ID_HUION_TABLET     0x006e
index b833760..cfa0cb2 100644 (file)
@@ -34,6 +34,8 @@
 
 #include "hid-ids.h"
 
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT       0xb320
+
 static const signed short ff_rumble[] = {
        FF_RUMBLE,
        -1
@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
        struct hid_field *ff_field = tmff->ff_field;
        int x, y;
        int left, right;        /* Rumbling */
+       int motor_swap;
 
        switch (effect->type) {
        case FF_CONSTANT:
@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
                                        ff_field->logical_minimum,
                                        ff_field->logical_maximum);
 
+               /* 2-in-1 strong motor is left */
+               if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+                       motor_swap = left;
+                       left = right;
+                       right = motor_swap;
+               }
+
                dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
                ff_field->value[0] = left;
                ff_field->value[1] = right;
@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
                .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304),   /* FireStorm Dual Power 2 (and 3) */
                .driver_data = (unsigned long)ff_rumble },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT),   /* Dual Trigger 2-in-1 */
+               .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323),   /* Dual Trigger 3-in-1 (PC Mode) */
                .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324),   /* Dual Trigger 3-in-1 (PS3 Mode) */
index c9a1131..5dcdfdc 100644 (file)
@@ -82,6 +82,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
index e7dd152..250ec05 100644 (file)
@@ -308,6 +308,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
        spin_unlock_irq(&list->hiddev->list_lock);
 
        mutex_lock(&hiddev->existancelock);
+       /*
+        * recheck exist with existance lock held to
+        * avoid opening a disconnected device
+        */
+       if (!list->hiddev->exist) {
+               res = -ENODEV;
+               goto bail_unlock;
+       }
        if (!list->hiddev->open++)
                if (list->hiddev->exist) {
                        struct hid_device *hid = hiddev->hid;
@@ -322,6 +330,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
        return 0;
 bail_unlock:
        mutex_unlock(&hiddev->existancelock);
+
+       spin_lock_irq(&list->hiddev->list_lock);
+       list_del(&list->node);
+       spin_unlock_irq(&list->hiddev->list_lock);
 bail:
        file->private_data = NULL;
        vfree(list);
index b184956..72a1fdd 100644 (file)
@@ -674,7 +674,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
        input_report_key(input, BTN_BASE2, (data[11] & 0x02));
 
        if (data[12] & 0x80)
-               input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+               input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
        else
                input_report_abs(input, ABS_WHEEL, 0);
 
index d3c6115..db38dff 100644 (file)
@@ -696,7 +696,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
 static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
 static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
 
@@ -3478,6 +3478,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
                data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
                data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+               data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
                data->REG_PWM[0] = NCT6106_REG_PWM;
                data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
                data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
index 12b94b0..7f8738a 100644 (file)
@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
        &sensor_dev_attr_in3_alarm.dev_attr.attr,
        &sensor_dev_attr_in3_beep.dev_attr.attr,
 
-       &sensor_dev_attr_in4_input.dev_attr.attr,       /* 17 */
+       &sensor_dev_attr_in4_input.dev_attr.attr,       /* 16 */
        &sensor_dev_attr_in4_min.dev_attr.attr,
        &sensor_dev_attr_in4_max.dev_attr.attr,
        &sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
 
        if (index >= 6 && index < 11 && (reg & 0x03) != 0x03)   /* VSEN1 */
                return 0;
-       if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c)  /* VSEN2 */
+       if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c)  /* VSEN2 */
                return 0;
-       if (index >= 17 && (reg & 0x30) != 0x30)                /* VSEN3 */
+       if (index >= 16 && (reg & 0x30) != 0x30)                /* VSEN3 */
                return 0;
 
        return attr->mode;
index 7d5c53a..5fcdb2d 100644 (file)
@@ -625,7 +625,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
                goto err_out;
 
        ret = -ENOMEM;
-       page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
        if (!page)
                goto err_free_sgt;
 
index 0f41fef..127ff76 100644 (file)
@@ -1053,7 +1053,6 @@ int stm_source_register_device(struct device *parent,
 
 err:
        put_device(&src->dev);
-       kfree(src);
 
        return err;
 }
index 198e558..870f698 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2155,8 +2155,12 @@ static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
 {
        struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
        struct i2c_msg          *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
-       int bc_rem = cur_msg->len - cur_buf->end_idx;
+       int bc_rem = 0;
 
+       if (!cur_msg)
+               return false;
+
+       bc_rem = cur_msg->len - cur_buf->end_idx;
        if (cur_buf->is_init && cur_buf->end_idx && bc_rem) {
                /* not the first buffer in a message */
 
@@ -2330,17 +2334,12 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        struct i2c_msm_ctrl      *ctrl = i2c_get_adapdata(adap);
        struct i2c_msm_xfer      *xfer = &ctrl->xfer;
 
-       if (num < 1) {
+       if (IS_ERR_OR_NULL(msgs) || num < 1) {
                dev_err(ctrl->dev,
-               "error on number of msgs(%d) received\n", num);
+               "Error on msgs Accessing invalid message pointer or message buffer\n");
                return -EINVAL;
        }
 
-       if (IS_ERR_OR_NULL(msgs)) {
-               dev_err(ctrl->dev, " error on msgs Accessing invalid  pointer location\n");
-               return PTR_ERR(msgs);
-       }
-
        /* if system is suspended just bail out */
        if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
                dev_err(ctrl->dev,
index 864a7c8..68835de 100644 (file)
@@ -481,14 +481,13 @@ int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgi
        struct net_device *dev;
 
        union {
-               struct sockaddr     _sockaddr;
                struct sockaddr_in  _sockaddr_in;
                struct sockaddr_in6 _sockaddr_in6;
        } sgid_addr, dgid_addr;
 
 
-       rdma_gid2ip(&sgid_addr._sockaddr, sgid);
-       rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+       rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+       rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
 
        memset(&dev_addr, 0, sizeof(dev_addr));
        dev_addr.bound_dev_if = if_index;
@@ -496,8 +495,9 @@ int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgi
 
        ctx.addr = &dev_addr;
        init_completion(&ctx.comp);
-       ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
-                       &dev_addr, 1000, resolve_cb, &ctx);
+       ret = rdma_resolve_ip(&self, (struct sockaddr *)&sgid_addr,
+                             (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
+                             resolve_cb, &ctx);
        if (ret)
                return ret;
 
@@ -519,16 +519,15 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
        int ret = 0;
        struct rdma_dev_addr dev_addr;
        union {
-               struct sockaddr     _sockaddr;
                struct sockaddr_in  _sockaddr_in;
                struct sockaddr_in6 _sockaddr_in6;
        } gid_addr;
 
-       rdma_gid2ip(&gid_addr._sockaddr, sgid);
+       rdma_gid2ip((struct sockaddr *)&gid_addr, sgid);
 
        memset(&dev_addr, 0, sizeof(dev_addr));
        dev_addr.net = &init_net;
-       ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
+       ret = rdma_translate_ip((struct sockaddr *)&gid_addr, &dev_addr, vlan_id);
        if (ret)
                return ret;
 
index 57f281f..e9e75f4 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <asm/uaccess.h>
 
@@ -842,11 +843,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
        if (get_user(id, arg))
                return -EFAULT;
+       if (id >= IB_UMAD_MAX_AGENTS)
+               return -EINVAL;
 
        mutex_lock(&file->port->file_mutex);
        mutex_lock(&file->mutex);
 
-       if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+       id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+       if (!__get_agent(file, id)) {
                ret = -EINVAL;
                goto out;
        }
index 199a9cd..531c985 100644 (file)
@@ -1526,8 +1526,6 @@ tx_err:
                                    tx_buf_size, DMA_TO_DEVICE);
                kfree(tun_qp->tx_ring[i].buf.addr);
        }
-       kfree(tun_qp->tx_ring);
-       tun_qp->tx_ring = NULL;
        i = MLX4_NUM_TUNNEL_BUFS;
 err:
        while (i > 0) {
@@ -1536,6 +1534,8 @@ err:
                                    rx_buf_size, DMA_FROM_DEVICE);
                kfree(tun_qp->ring[i].addr);
        }
+       kfree(tun_qp->tx_ring);
+       tun_qp->tx_ring = NULL;
        kfree(tun_qp->ring);
        tun_qp->ring = NULL;
        return -ENOMEM;
index 3399271..170368b 100644 (file)
@@ -792,7 +792,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        struct device *ddev = dev->ib_dev.dma_device;
        struct umr_common *umrc = &dev->umrc;
        struct mlx5_ib_umr_context umr_context;
-       struct mlx5_umr_wr umrwr;
+       struct mlx5_umr_wr umrwr = {};
        struct ib_send_wr *bad;
        struct mlx5_ib_mr *mr;
        struct ib_sge sg;
@@ -839,7 +839,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
                goto free_pas;
        }
 
-       memset(&umrwr, 0, sizeof(umrwr));
        umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
        prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
                         page_shift, virt_addr, len, access_flags);
@@ -1163,11 +1162,10 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        struct umr_common *umrc = &dev->umrc;
        struct mlx5_ib_umr_context umr_context;
-       struct mlx5_umr_wr umrwr;
+       struct mlx5_umr_wr umrwr = {};
        struct ib_send_wr *bad;
        int err;
 
-       memset(&umrwr.wr, 0, sizeof(umrwr));
        umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
        prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
 
index db64adf..3e1ea91 100644 (file)
@@ -145,7 +145,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
                return -ENODEV;
 
        epirq = &interface->endpoint[0].desc;
+       if (!usb_endpoint_is_int_in(epirq))
+               return -ENODEV;
+
        epout = &interface->endpoint[1].desc;
+       if (!usb_endpoint_is_int_out(epout))
+               return -ENODEV;
 
        if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
                goto fail;
index 8805575..821b446 100644 (file)
@@ -153,7 +153,8 @@ struct trackpoint_data
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
 #else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+                                   bool set_properties)
 {
        return -ENOSYS;
 }
index 8b68a21..5a0e4cd 100644 (file)
@@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com
 
 /* Max size of a single report */
 #define REPORT_MAX_SIZE       10
+#define MAX_COLLECTION_LEVELS  10
 
 
 /* Bitmask whether pen is in range */
@@ -224,8 +225,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
        char  maintype = 'x';
        char  globtype[12];
        int   indent = 0;
-       char  indentstr[10] = "";
-
+       char  indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };
 
        dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
 
@@ -351,6 +351,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
                        case TAG_MAIN_COL_START:
                                maintype = 'S';
 
+                               if (indent == MAX_COLLECTION_LEVELS) {
+                                       dev_err(ddev, "Collection level %d would exceed limit of %d\n",
+                                               indent + 1,
+                                               MAX_COLLECTION_LEVELS);
+                                       break;
+                               }
+
                                if (data == 0) {
                                        dev_dbg(ddev, "======>>>>>> Physical\n");
                                        strcpy(globtype, "Physical");
@@ -370,8 +377,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
                                break;
 
                        case TAG_MAIN_COL_END:
-                               dev_dbg(ddev, "<<<<<<======\n");
                                maintype = 'E';
+
+                               if (indent == 0) {
+                                       dev_err(ddev, "Collection level already at zero\n");
+                                       break;
+                               }
+
+                               dev_dbg(ddev, "<<<<<<======\n");
+
                                indent--;
                                for (x = 0; x < indent; x++)
                                        indentstr[x] = '-';
index 2812f92..0ccc120 100644 (file)
@@ -125,6 +125,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
        if (intf->cur_altsetting->desc.bNumEndpoints < 1)
                return -ENODEV;
 
+       endpoint = &intf->cur_altsetting->endpoint[0].desc;
+       if (!usb_endpoint_is_int_in(endpoint))
+               return -ENODEV;
+
        kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!kbtab || !input_dev)
@@ -164,8 +168,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
        input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
        input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
 
-       endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
        usb_fill_int_urb(kbtab->irq, dev,
                         usb_rcvintpipe(dev, endpoint->bEndpointAddress),
                         kbtab->data, 8,
index db85cc5..6a69b5b 100644 (file)
@@ -1223,7 +1223,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
        NULL,
 };
 
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
        int cap_ptr = iommu->cap_ptr;
        u32 range, misc, low, high;
index 2d203b4..c56da0b 100644 (file)
@@ -145,6 +145,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
        .irq_unmask             = imx_gpcv2_irq_unmask,
        .irq_set_wake           = imx_gpcv2_irq_set_wake,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_type           = irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index 6a2df32..691ad06 100644 (file)
@@ -687,6 +687,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        if (!cdev->ap.applid)
                return -ENODEV;
 
+       if (count < CAPIMSG_BASELEN)
+               return -EINVAL;
+
        skb = alloc_skb(count, GFP_USER);
        if (!skb)
                return -ENOMEM;
@@ -697,7 +700,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        }
        mlen = CAPIMSG_LEN(skb->data);
        if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
-               if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+               if (count < CAPI_DATA_B3_REQ_LEN ||
+                   (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -710,6 +714,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
 
        if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+               if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
                mutex_lock(&cdev->lock);
                capincci_free(cdev, CAPIMSG_NCCI(skb->data));
                mutex_unlock(&cdev->lock);
index 114f3bc..726fba4 100644 (file)
@@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
                                printk(KERN_DEBUG
                                       "%s: %s: alloc urb for fifo %i failed",
                                       hw->name, __func__, fifo->fifonum);
+                               continue;
                        }
                        fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
                        fifo->iso[i].indx = i;
@@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
 static int
 setup_hfcsusb(struct hfcsusb *hw)
 {
+       void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
        u_char b;
+       int ret;
 
        if (debug & DBG_HFC_CALL_TRACE)
                printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
 
+       if (!dmabuf)
+               return -ENOMEM;
+
+       ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+       memcpy(&b, dmabuf, sizeof(u_char));
+       kfree(dmabuf);
+
        /* check the chip id */
-       if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+       if (ret != 1) {
                printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
                       hw->name, __func__);
                return 1;
@@ -1963,6 +1974,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
                                /* get endpoint base */
                                idx = ((ep_addr & 0x7f) - 1) * 2;
+                               if (idx > 15)
+                                       return -EIO;
+
                                if (ep_addr & 0x80)
                                        idx++;
                                attr = ep->desc.bmAttributes;
index 9cf826d..b4ad852 100644 (file)
@@ -389,11 +389,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
 
        of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
                if (!strncmp(name, mbox_name, strlen(name)))
-                       break;
+                       return mbox_request_channel(cl, index);
                index++;
        }
 
-       return mbox_request_channel(cl, index);
+       dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+               __func__, name);
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
 
index bfbf5dd..adef364 100644 (file)
@@ -1403,7 +1403,7 @@ static void cache_set_flush(struct closure *cl)
        kobject_put(&c->internal);
        kobject_del(&c->kobj);
 
-       if (c->gc_thread)
+       if (!IS_ERR_OR_NULL(c->gc_thread))
                kthread_stop(c->gc_thread);
 
        if (!IS_ERR_OR_NULL(c->root))
index 52f2613..ee8bc7e 100644 (file)
@@ -1168,7 +1168,7 @@ void dm_table_event(struct dm_table *t)
 }
 EXPORT_SYMBOL(dm_table_event);
 
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
 {
        return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
 }
@@ -1193,6 +1193,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
        unsigned int l, n = 0, k = 0;
        sector_t *node;
 
+       if (unlikely(sector >= dm_table_get_size(t)))
+               return &t->targets[t->num_targets];
+
        for (l = 0; l < t->depth; l++) {
                n = get_child(n, k);
                node = get_node(t, l, n);
index 880b7de..fa9039a 100644 (file)
@@ -616,39 +616,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 
        new_parent = shadow_current(s);
 
+       pn = dm_block_data(new_parent);
+       size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+               sizeof(__le64) : s->info->value_type.size;
+
+       /* create & init the left block */
        r = new_block(s->info, &left);
        if (r < 0)
                return r;
 
+       ln = dm_block_data(left);
+       nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+       ln->header.flags = pn->header.flags;
+       ln->header.nr_entries = cpu_to_le32(nr_left);
+       ln->header.max_entries = pn->header.max_entries;
+       ln->header.value_size = pn->header.value_size;
+       memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+       memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+       /* create & init the right block */
        r = new_block(s->info, &right);
        if (r < 0) {
                unlock_block(s->info, left);
                return r;
        }
 
-       pn = dm_block_data(new_parent);
-       ln = dm_block_data(left);
        rn = dm_block_data(right);
-
-       nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
        nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 
-       ln->header.flags = pn->header.flags;
-       ln->header.nr_entries = cpu_to_le32(nr_left);
-       ln->header.max_entries = pn->header.max_entries;
-       ln->header.value_size = pn->header.value_size;
-
        rn->header.flags = pn->header.flags;
        rn->header.nr_entries = cpu_to_le32(nr_right);
        rn->header.max_entries = pn->header.max_entries;
        rn->header.value_size = pn->header.value_size;
-
-       memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
        memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
-       size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
-               sizeof(__le64) : s->info->value_type.size;
-       memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
        memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
               nr_right * size);
 
index 20557e2..1d29771 100644 (file)
@@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
        }
 
        if (smm->recursion_count == 1)
-               apply_bops(smm);
+               r = apply_bops(smm);
 
        smm->recursion_count--;
 
index 029384d..7a3e91c 100644 (file)
@@ -80,8 +80,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
        struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
        struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };
 
-#define _R 4
-#define _P 32
+#define _R_VAL 4
+#define _P_VAL 32
 #define _ri 4000000
 
        // setup register 0
@@ -96,14 +96,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
        else
                reg1[1] = 0x0c;
 
-       if (_P == 64)
+       if (_P_VAL == 64)
                reg1[1] |= 0x40;
        if (c->frequency >= 1525000)
                reg1[1] |= 0x80;
 
        // register 2
-       reg2[1] = (_R >> 8) & 0x03;
-       reg2[2] = _R;
+       reg2[1] = (_R_VAL >> 8) & 0x03;
+       reg2[2] = _R_VAL;
        if (c->frequency < 1455000)
                reg2[1] |= 0x1c;
        else if (c->frequency < 1630000)
@@ -115,18 +115,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
         * The N divisor ratio (note: c->frequency is in kHz, but we
         * need it in Hz)
         */
-       prediv = (c->frequency * _R) / (_ri / 1000);
-       div = prediv / _P;
+       prediv = (c->frequency * _R_VAL) / (_ri / 1000);
+       div = prediv / _P_VAL;
        reg1[1] |= (div >> 9) & 0x03;
        reg1[2] = div >> 1;
        reg1[3] = (div << 7);
-       priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
+       priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;
 
        // Finally, calculate and store the value for A
-       reg1[3] |= (prediv - (div*_P)) & 0x7f;
+       reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;
 
-#undef _R
-#undef _P
+#undef _R_VAL
+#undef _P_VAL
 #undef _ri
 
        if (fe->ops.i2c_gate_ctrl)
index eec9e87..7f127e5 100644 (file)
@@ -41,7 +41,7 @@ endif
 obj-$(CONFIG_VIDEO_ADV7481) += adv7481.o
 obj-$(CONFIG_VIDEO_TVTUNER) += tvtuner.o
 obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
-obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
 obj-$(CONFIG_VIDEO_VS6624)  += vs6624.o
 obj-$(CONFIG_VIDEO_BT819) += bt819.o
similarity index 99%
rename from drivers/media/i2c/adv7511.c
rename to drivers/media/i2c/adv7511-v4l2.c
index c24839c..b35400e 100644 (file)
  * SOFTWARE.
  */
 
+/*
+ * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
+ * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
+ */
+
 
 #include <linux/kernel.h>
 #include <linux/module.h>
index a463981..439cb76 100644 (file)
@@ -1581,6 +1581,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
                coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
                return -ETIMEDOUT;
        }
+       ctx->sequence_offset = ~0U;
        ctx->initialized = 1;
 
        /* Update kfifo out pointer from coda bitstream read pointer */
@@ -1966,12 +1967,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
                else if (ctx->display_idx < 0)
                        ctx->hold = true;
        } else if (decoded_idx == -2) {
+               if (ctx->display_idx >= 0 &&
+                   ctx->display_idx < ctx->num_internal_frames)
+                       ctx->sequence_offset++;
                /* no frame was decoded, we still return remaining buffers */
        } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
                v4l2_err(&dev->v4l2_dev,
                         "decoded frame index out of range: %d\n", decoded_idx);
        } else {
-               val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
+               val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
+               if (ctx->sequence_offset == -1)
+                       ctx->sequence_offset = val;
                val -= ctx->sequence_offset;
                spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
                if (!list_empty(&ctx->buffer_meta_list)) {
@@ -2101,7 +2107,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
        if (ctx == NULL) {
                v4l2_err(&dev->v4l2_dev,
                         "Instance released before the end of transaction\n");
-               mutex_unlock(&dev->coda_mutex);
                return IRQ_HANDLED;
        }
 
index fce86f1..c2c6898 100644 (file)
@@ -523,6 +523,11 @@ static int __init vpss_init(void)
                return -EBUSY;
 
        oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+       if (unlikely(!oper_cfg.vpss_regs_base2)) {
+               release_mem_region(VPSS_CLK_CTRL, 4);
+               return -ENOMEM;
+       }
+
        writel(VPSS_CLK_CTRL_VENCCLKEN |
                     VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
 
index aa2b440..22fe771 100644 (file)
@@ -209,7 +209,6 @@ struct mcam_vb_buffer {
        struct list_head queue;
        struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
        dma_addr_t dma_desc_pa;         /* Descriptor physical address */
-       int dma_desc_nent;              /* Number of mapped descriptors */
 };
 
 static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
@@ -616,9 +615,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
 static void mcam_sg_next_buffer(struct mcam_camera *cam)
 {
        struct mcam_vb_buffer *buf;
+       struct sg_table *sg_table;
 
        buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
        list_del_init(&buf->queue);
+       sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
        /*
         * Very Bad Not Good Things happen if you don't clear
         * C1_DESC_ENA before making any descriptor changes.
@@ -626,7 +627,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
        mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
        mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
        mcam_reg_write(cam, REG_DESC_LEN_Y,
-                       buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+                       sg_table->nents * sizeof(struct mcam_dma_desc));
        mcam_reg_write(cam, REG_DESC_LEN_U, 0);
        mcam_reg_write(cam, REG_DESC_LEN_V, 0);
        mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
index 1445943..b19ec31 100644 (file)
@@ -206,7 +206,7 @@ static int32_t msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev,
        if (rc < 0 || !ds_entries) {
                pr_err("%s: NO D/S entries found\n", __func__);
        } else {
-               ds_settings = kzalloc(sizeof(uint32_t) * ds_entries,
+               ds_settings = kcalloc(ds_entries, sizeof(uint32_t),
                                GFP_KERNEL);
                if (!ds_settings) {
                        pr_err("%s:%d No memory\n", __func__, __LINE__);
index 69155d6..d75af08 100644 (file)
@@ -1498,8 +1498,6 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
        uint32_t i;
        int rc = -1;
-       int counter = 0;
-       u32 result = 0;
        struct cpp_device *cpp_dev = NULL;
        struct msm_device_queue *processing_q = NULL;
        struct msm_device_queue *eventData_q = NULL;
@@ -1581,60 +1579,6 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
                pr_debug("DEBUG_R1: 0x%x\n",
                        msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
 
-               /* Update bandwidth usage to enable AXI/ABH clock,
-                * which will help to reset CPP AXI.Bandwidth will be
-                * made zero at cpp_release_hardware.
-                */
-               msm_cpp_update_bandwidth(cpp_dev, 0x1000, 0x1000);
-
-               /* mask IRQ status */
-               msm_camera_io_w(0xB, cpp_dev->cpp_hw_base + 0xC);
-
-               /* clear IRQ status */
-               msm_camera_io_w(0xFFFFF, cpp_dev->cpp_hw_base + 0x14);
-
-               /* MMSS_A_CPP_AXI_CMD = 0x16C, reset 0x1*/
-               msm_camera_io_w(0x1, cpp_dev->cpp_hw_base + 0x16C);
-
-               while (counter < MSM_CPP_POLL_RETRIES) {
-                       result = msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10);
-                       if (result & 0x2)
-                               break;
-                       /*
-                        * Below usleep values are chosen based on experiments
-                        * and this was the smallest number which works. This
-                        * sleep is needed to leave enough time for hardware
-                        * to update status register.
-                        */
-                       usleep_range(200, 250);
-                       counter++;
-               }
-
-               pr_debug("CPP AXI done counter %d result 0x%x\n",
-                       counter, result);
-
-               /* clear IRQ status */
-               msm_camera_io_w(0xFFFFF, cpp_dev->cpp_hw_base + 0x14);
-               counter = 0;
-               /* MMSS_A_CPP_RST_CMD_0 = 0x8, firmware reset = 0x3DF77 */
-               msm_camera_io_w(0x3DF77, cpp_dev->cpp_hw_base + 0x8);
-
-               while (counter < MSM_CPP_POLL_RETRIES) {
-                       result = msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10);
-                       if (result & 0x1)
-                               break;
-                       /*
-                        * Below usleep values are chosen based on experiments
-                        * and this was the smallest number which works. This
-                        * sleep is needed to leave enough time for hardware
-                        * to update status register.
-                        */
-                       usleep_range(200, 250);
-                       counter++;
-               }
-               pr_debug("CPP reset done counter %d result 0x%x\n",
-                       counter, result);
-
                msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
                msm_cpp_clear_timer(cpp_dev);
                cpp_release_hardware(cpp_dev);
index ec55bc7..d3a7e4c 100644 (file)
@@ -1006,6 +1006,9 @@ static enum vidc_status hfi_parse_init_done_properties(
                        }
                        while (prof_count) {
                                prof_level = (struct hfi_profile_level *)ptr;
+                               VALIDATE_PROPERTY_STRUCTURE_SIZE(rem_bytes -
+                                       next_offset,
+                                       sizeof(*prof_level));
                                capability.
                                profile_level.profile_level[count].profile
                                        = prof_level->profile;
index bfb3a6d..10958ba 100644 (file)
@@ -283,6 +283,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
        return 0;
 }
 
+static void raremono_device_release(struct v4l2_device *v4l2_dev)
+{
+       struct raremono_device *radio = to_raremono_dev(v4l2_dev);
+
+       kfree(radio->buffer);
+       kfree(radio);
+}
+
 /* File system interface */
 static const struct v4l2_file_operations usb_raremono_fops = {
        .owner          = THIS_MODULE,
@@ -307,12 +315,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
        struct raremono_device *radio;
        int retval = 0;
 
-       radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
-       if (radio)
-               radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
-
-       if (!radio || !radio->buffer)
+       radio = kzalloc(sizeof(*radio), GFP_KERNEL);
+       if (!radio)
+               return -ENOMEM;
+       radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+       if (!radio->buffer) {
+               kfree(radio);
                return -ENOMEM;
+       }
 
        radio->usbdev = interface_to_usbdev(intf);
        radio->intf = intf;
@@ -336,7 +346,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
        if (retval != 3 ||
            (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
                dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
-               return -ENODEV;
+               retval = -ENODEV;
+               goto free_mem;
        }
 
        dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
@@ -345,7 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
        retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
        if (retval < 0) {
                dev_err(&intf->dev, "couldn't register v4l2_device\n");
-               return retval;
+               goto free_mem;
        }
 
        mutex_init(&radio->lock);
@@ -357,6 +368,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
        radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
        radio->vdev.lock = &radio->lock;
        radio->vdev.release = video_device_release_empty;
+       radio->v4l2_dev.release = raremono_device_release;
 
        usb_set_intfdata(intf, &radio->v4l2_dev);
 
@@ -372,6 +384,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
        }
        dev_err(&intf->dev, "could not register video device\n");
        v4l2_device_unregister(&radio->v4l2_dev);
+
+free_mem:
+       kfree(radio->buffer);
+       kfree(radio);
        return retval;
 }
 
index fb42f0f..add26ea 100644 (file)
@@ -553,6 +553,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
 
        /* Register with V4L2 subsystem as RADIO device */
        if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
+               v4l2_device_unregister(&fmdev->v4l2_dev);
                fmerr("Could not register video device\n");
                return -ENOMEM;
        }
@@ -566,6 +567,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
        if (ret < 0) {
                fmerr("(fmdev): Can't init ctrl handler\n");
                v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
+               video_unregister_device(fmdev->radio_dev);
+               v4l2_device_unregister(&fmdev->v4l2_dev);
                return -EBUSY;
        }
 
index 351a78a..41ea00a 100644 (file)
@@ -884,7 +884,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
        cpia2_unregister_camera(cam);
        v4l2_device_disconnect(&cam->v4l2_dev);
        mutex_unlock(&cam->v4l2_lock);
-       v4l2_device_put(&cam->v4l2_dev);
 
        if(cam->buffers) {
                DBG("Wakeup waiting processes\n");
@@ -897,6 +896,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
        DBG("Releasing interface\n");
        usb_driver_release_interface(&cpia2_driver, intf);
 
+       v4l2_device_put(&cam->v4l2_dev);
+
        LOG("CPiA2 camera disconnected.\n");
 }
 
index 1adf325..97a89ef 100644 (file)
@@ -286,12 +286,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
 void dvb_usb_device_exit(struct usb_interface *intf)
 {
        struct dvb_usb_device *d = usb_get_intfdata(intf);
-       const char *name = "generic DVB-USB module";
+       const char *default_name = "generic DVB-USB module";
+       char name[40];
 
        usb_set_intfdata(intf, NULL);
        if (d != NULL && d->desc != NULL) {
-               name = d->desc->name;
+               strscpy(name, d->desc->name, sizeof(name));
                dvb_usb_exit(d);
+       } else {
+               strscpy(name, default_name, sizeof(name));
        }
        info("%s successfully deinitialized and disconnected.", name);
 
index 6c3c477..30a8c21 100644 (file)
@@ -594,9 +594,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
 
 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
 {
-       u8 buf[62], *b;
-       int ret;
+       u8 buf[62];
        struct ir_raw_event ev;
+       int i, ret;
 
        buf[0] = GET_IR_DATA_VENDOR_REQUEST;
        buf[1] = 0x08;
@@ -632,26 +632,25 @@ unlock:
                return 0; /* no key pressed */
 
        /* decoding */
-       b = buf+1;
 
 #if 0
        deb_rc("RC: %d ", ret);
-       debug_dump(b, ret, deb_rc);
+       debug_dump(buf + 1, ret, deb_rc);
 #endif
 
        ev.pulse = 0;
-       while (1) {
-               ev.pulse = !ev.pulse;
-               ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
-               ir_raw_event_store(d->rc_dev, &ev);
-
-               b++;
-               if (*b == 0xff) {
+       for (i = 1; i < ARRAY_SIZE(buf); i++) {
+               if (buf[i] == 0xff) {
                        ev.pulse = 0;
                        ev.duration = 888888*2;
                        ir_raw_event_store(d->rc_dev, &ev);
                        break;
                }
+
+               ev.pulse = !ev.pulse;
+               ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
+                              FIRMWARE_CLOCK_TICK) / 1000;
+               ir_raw_event_store(d->rc_dev, &ev);
        }
 
        ir_raw_event_handle(d->rc_dev);
index 4f317e2..87401b1 100644 (file)
@@ -111,6 +111,7 @@ static void tm6000_urb_received(struct urb *urb)
                        printk(KERN_ERR "tm6000:  error %s\n", __func__);
                        kfree(urb->transfer_buffer);
                        usb_free_urb(urb);
+                       dev->dvb->bulk_urb = NULL;
                }
        }
 }
@@ -143,6 +144,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
        dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
        if (dvb->bulk_urb->transfer_buffer == NULL) {
                usb_free_urb(dvb->bulk_urb);
+               dvb->bulk_urb = NULL;
                printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
                return -ENOMEM;
        }
@@ -170,6 +172,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
 
                kfree(dvb->bulk_urb->transfer_buffer);
                usb_free_urb(dvb->bulk_urb);
+               dvb->bulk_urb = NULL;
                return ret;
        }
 
index abf3c62..6bb6d8a 100644 (file)
@@ -2078,16 +2078,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
                v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
                                                                &def, &flags);
 
-       is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
-                  cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+       is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+                  type == V4L2_CTRL_TYPE_INTEGER_MENU);
        if (is_menu)
                WARN_ON(step);
        else
                WARN_ON(cfg->menu_skip_mask);
-       if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+       if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
                qmenu = v4l2_ctrl_get_menu(cfg->id);
-       else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
-                qmenu_int == NULL) {
+       } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
                handler_set_err(hdl, -EINVAL);
                return NULL;
        }
index 4d673a6..1041eb7 100644 (file)
@@ -629,13 +629,18 @@ static int __init memstick_init(void)
                return -ENOMEM;
 
        rc = bus_register(&memstick_bus_type);
-       if (!rc)
-               rc = class_register(&memstick_host_class);
+       if (rc)
+               goto error_destroy_workqueue;
 
-       if (!rc)
-               return 0;
+       rc = class_register(&memstick_host_class);
+       if (rc)
+               goto error_bus_unregister;
+
+       return 0;
 
+error_bus_unregister:
        bus_unregister(&memstick_bus_type);
+error_destroy_workqueue:
        destroy_workqueue(workqueue);
 
        return rc;
index d474732..fb54de5 100644 (file)
@@ -967,7 +967,7 @@ int arizona_dev_init(struct arizona *arizona)
        unsigned int reg, val, mask;
        int (*apply_patch)(struct arizona *) = NULL;
        const struct mfd_cell *subdevs = NULL;
-       int n_subdevs, ret, i;
+       int n_subdevs = 0, ret, i;
 
        dev_set_drvdata(arizona->dev, arizona);
        mutex_init(&arizona->clk_lock);
index 022c937..215bb5e 100644 (file)
@@ -178,6 +178,7 @@ static int mfd_add_device(struct device *parent, int id,
                for_each_child_of_node(parent->of_node, np) {
                        if (of_device_is_compatible(np, cell->of_compatible)) {
                                pdev->dev.of_node = np;
+                               pdev->dev.fwnode = &np->fwnode;
                                break;
                        }
                }
index e85b2b8..dfa3bb6 100644 (file)
@@ -1,6 +1,6 @@
 /*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
  *
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3265,6 +3265,33 @@ int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
        return 0;
 }
 
+static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req,
+                       struct qseecom_send_modfd_listener_resp *lstnr_resp,
+                       struct qseecom_dev_handle *data, int i)
+{
+
+       if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+                                               (req->ifd_data[i].fd > 0)) {
+               if ((req->cmd_req_len < sizeof(uint64_t)) ||
+                       (req->ifd_data[i].cmd_buf_offset >
+                       req->cmd_req_len - sizeof(uint64_t))) {
+                       pr_err("Invalid offset (req len) 0x%x\n",
+                               req->ifd_data[i].cmd_buf_offset);
+                       return -EINVAL;
+               }
+       } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+                                       (lstnr_resp->ifd_data[i].fd > 0)) {
+               if ((lstnr_resp->resp_len < sizeof(uint64_t)) ||
+                       (lstnr_resp->ifd_data[i].cmd_buf_offset >
+                       lstnr_resp->resp_len - sizeof(uint64_t))) {
+                       pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+                               lstnr_resp->ifd_data[i].cmd_buf_offset);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
                        struct qseecom_dev_handle *data)
 {
@@ -3622,7 +3649,8 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
                sg = sg_ptr->sgl;
                if (sg_ptr->nents == 1) {
                        uint64_t *update_64bit;
-                       if (__boundary_checks_offset(req, lstnr_resp, data, i))
+                       if (__boundary_checks_offset_64(req, lstnr_resp,
+                                               data, i))
                                goto err;
                                /* 64bit app uses 64bit address */
                        update_64bit = (uint64_t *) field;
@@ -6422,9 +6450,11 @@ static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
                                pr_err("Ion client can't retrieve the handle\n");
                                return -ENOMEM;
                        }
-                       if ((req->req_len < sizeof(uint32_t)) ||
+                       if ((req->req_len <
+                               sizeof(struct qseecom_param_memref)) ||
                                (req->ifd_data[i].cmd_buf_offset >
-                               req->req_len - sizeof(uint32_t))) {
+                               req->req_len -
+                               sizeof(struct qseecom_param_memref))) {
                                pr_err("Invalid offset/req len 0x%x/0x%x\n",
                                        req->req_len,
                                        req->ifd_data[i].cmd_buf_offset);
@@ -7281,6 +7311,13 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                break;
        }
        case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+               if ((data->type != QSEECOM_GENERIC) &&
+                       (data->type != QSEECOM_CLIENT_APP)) {
+                       pr_err("app loaded query req: invalid handle (%d)\n",
+                                                               data->type);
+                       ret = -EINVAL;
+                       break;
+               }
                data->type = QSEECOM_CLIENT_APP;
                mutex_lock(&app_access_lock);
                atomic_inc(&data->ioctl_count);
index a8cee33..305a344 100644 (file)
@@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
 
        entry = container_of(resource, struct dbell_entry, resource);
        if (entry->run_delayed) {
-               schedule_work(&entry->work);
+               if (!schedule_work(&entry->work))
+                       vmci_resource_put(resource);
        } else {
                entry->notify_cb(entry->client_data);
                vmci_resource_put(resource);
@@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
                    atomic_read(&dbell->active) == 1) {
                        if (dbell->run_delayed) {
                                vmci_resource_get(&dbell->resource);
-                               schedule_work(&dbell->work);
+                               if (!schedule_work(&dbell->work))
+                                       vmci_resource_put(&dbell->resource);
                        } else {
                                dbell->notify_cb(dbell->client_data);
                        }
index 3dee072..25150b5 100644 (file)
@@ -486,7 +486,7 @@ int mmc_recovery_fallback_lower_speed(struct mmc_host *host)
                mmc_host_clear_sdr104(host);
                err = mmc_hw_reset(host);
                host->card->sdr104_blocked = true;
-       } else {
+       } else if (mmc_card_sd(host->card)) {
                /* If sdr104_wa is not present, just return status */
                err = host->bus_ops->alive(host);
        }
index f1a4abf..f24c6ad 100644 (file)
@@ -1491,6 +1491,12 @@ int mmc_attach_sd(struct mmc_host *host)
                        goto err;
        }
 
+       /*
+        * Some SD cards claims an out of spec VDD voltage range. Let's treat
+        * these bits as being in-valid and especially also bit7.
+        */
+       ocr &= ~0x7FFF;
+
        rocr = mmc_select_voltage(host, ocr);
 
        /*
index a7bd93f..a7afa46 100644 (file)
@@ -2,7 +2,7 @@
  * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
  * driver source file
  *
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1288,6 +1288,34 @@ retry:
                } else {
                        pr_debug("%s: %s: found ## bad ## phase = %d\n",
                                mmc_hostname(mmc), __func__, phase);
+
+                       if (phase == 15 && tuned_phase_cnt) {
+                               pr_err("%s: %s: Ping with known good phase\n",
+                                       mmc_hostname(mmc), __func__);
+                               /* set the phase in delay line hw block */
+                               rc = msm_config_cm_dll_phase(host,
+                                       tuned_phases[tuned_phase_cnt - 1]);
+                               if (rc)
+                                       goto kfree;
+
+                               cmd.opcode = opcode;
+                               cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+                               data.blksz = size;
+                               data.blocks = 1;
+                               data.flags = MMC_DATA_READ;
+                               data.timeout_ns = 1000 * 1000 * 1000;
+
+                               data.sg = &sg;
+                               data.sg_len = 1;
+                               sg_init_one(&sg, data_buf, size);
+                               memset(data_buf, 0, size);
+                               mmc_wait_for_req(mmc, &mrq);
+
+                               if ((cmd.error || data.error))
+                                       pr_err("%s: %s: Ping with known good phase failed\n",
+                                       mmc_hostname(mmc), __func__);
+                       }
                }
        } while (++phase < 16);
 
index 06d0b50..4e374a0 100644 (file)
@@ -144,6 +144,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
 
        sdhci_get_of_property(pdev);
 
+       /* HS200 is broken at this moment */
+       host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+
        ret = sdhci_add_host(host);
        if (ret)
                goto clocks_disable_unprepare;
index 0397afc..fd6aff9 100644 (file)
@@ -1111,7 +1111,9 @@ static void bond_compute_features(struct bonding *bond)
 
 done:
        bond_dev->vlan_features = vlan_features;
-       bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
+       bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_HW_VLAN_STAG_TX;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
 
@@ -2079,6 +2081,15 @@ static void bond_miimon_commit(struct bonding *bond)
        bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
+                       /* For 802.3ad mode, check current slave speed and
+                        * duplex again in case its port was disabled after
+                        * invalid speed/duplex reporting but recovered before
+                        * link monitoring could make a decision on the actual
+                        * link status
+                        */
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+                           slave->link == BOND_LINK_UP)
+                               bond_3ad_adapter_speed_duplex_changed(slave);
                        continue;
 
                case BOND_LINK_UP:
@@ -3714,8 +3725,8 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
 static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct iphdr *iph = ip_hdr(skb);
        struct slave *slave;
+       int slave_cnt;
        u32 slave_id;
 
        /* Start with the curr_active_slave that joined the bond as the
@@ -3724,23 +3735,32 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
         * send the join/membership reports.  The curr_active_slave found
         * will send all of this type of traffic.
         */
-       if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
-               slave = rcu_dereference(bond->curr_active_slave);
-               if (slave)
-                       bond_dev_queue_xmit(bond, skb, slave->dev);
-               else
-                       bond_xmit_slave_id(bond, skb, 0);
-       } else {
-               int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               int noff = skb_network_offset(skb);
+               struct iphdr *iph;
 
-               if (likely(slave_cnt)) {
-                       slave_id = bond_rr_gen_slave_id(bond);
-                       bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
-               } else {
-                       bond_tx_drop(bond_dev, skb);
+               if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+                       goto non_igmp;
+
+               iph = ip_hdr(skb);
+               if (iph->protocol == IPPROTO_IGMP) {
+                       slave = rcu_dereference(bond->curr_active_slave);
+                       if (slave)
+                               bond_dev_queue_xmit(bond, skb, slave->dev);
+                       else
+                               bond_xmit_slave_id(bond, skb, 0);
+                       return NETDEV_TX_OK;
                }
        }
 
+non_igmp:
+       slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+       if (likely(slave_cnt)) {
+               slave_id = bond_rr_gen_slave_id(bond);
+               bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+       } else {
+               bond_tx_drop(bond_dev, skb);
+       }
        return NETDEV_TX_OK;
 }
 
index 615c65d..055a401 100644 (file)
@@ -1467,7 +1467,7 @@ static void __exit cfhsi_exit_module(void)
        rtnl_lock();
        list_for_each_safe(list_node, n, &cfhsi_list) {
                cfhsi = list_entry(list_node, struct cfhsi, list);
-               unregister_netdev(cfhsi->ndev);
+               unregister_netdevice(cfhsi->ndev);
        }
        rtnl_unlock();
 }
index 8b7c642..9dd968e 100644 (file)
@@ -1065,6 +1065,8 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
 int register_candev(struct net_device *dev)
 {
        dev->rtnl_link_ops = &can_link_ops;
+       netif_carrier_off(dev);
+
        return register_netdev(dev);
 }
 EXPORT_SYMBOL_GPL(register_candev);
index dd56133..fc9f8b0 100644 (file)
@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
                if (!netdev)
                        continue;
 
-               strncpy(name, netdev->name, IFNAMSIZ);
+               strlcpy(name, netdev->name, IFNAMSIZ);
 
                unregister_sja1000dev(netdev);
 
index 91be457..b1d68f4 100644 (file)
@@ -592,16 +592,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
        dev->state &= ~PCAN_USB_STATE_STARTED;
        netif_stop_queue(netdev);
 
+       close_candev(netdev);
+
+       dev->can.state = CAN_STATE_STOPPED;
+
        /* unlink all pending urbs and free used memory */
        peak_usb_unlink_all_urbs(dev);
 
        if (dev->adapter->dev_stop)
                dev->adapter->dev_stop(dev);
 
-       close_candev(netdev);
-
-       dev->can.state = CAN_STATE_STOPPED;
-
        /* can set bus off now */
        if (dev->adapter->dev_set_bus) {
                int err = dev->adapter->dev_set_bus(dev, 0);
@@ -881,7 +881,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
 
                dev_prev_siblings = dev->prev_siblings;
                dev->state &= ~PCAN_USB_STATE_CONNECTED;
-               strncpy(name, netdev->name, IFNAMSIZ);
+               strlcpy(name, netdev->name, IFNAMSIZ);
 
                unregister_netdev(netdev);
 
index 64cc86a..1b75d53 100644 (file)
@@ -851,7 +851,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
                        goto err_out;
 
                /* allocate command buffer once for all for the interface */
-               pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+               pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
                                                GFP_KERNEL);
                if (!pdev->cmd_buffer_addr)
                        goto err_out_1;
index bbdd605..d85fdc6 100644 (file)
@@ -500,7 +500,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
        u8 *buffer;
        int err;
 
-       buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+       buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
index 9cc5dae..b0285ac 100644 (file)
@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
                struct sk_buff *skb = tx_buff->skb;
                unsigned int info = le32_to_cpu(txbd->info);
 
-               if ((info & FOR_EMAC) || !txbd->data)
+               if ((info & FOR_EMAC) || !txbd->data || !skb)
                        break;
 
                if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
 
                txbd->data = 0;
                txbd->info = 0;
+               tx_buff->skb = NULL;
 
                *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
        }
@@ -619,7 +620,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
        dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
 
-       priv->tx_buff[*txbd_curr].skb = skb;
        priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
 
        /* Make sure pointer to data buffer is set */
@@ -629,6 +629,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
        *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
 
+       /* Make sure info word is set */
+       wmb();
+
+       priv->tx_buff[*txbd_curr].skb = skb;
+
        /* Increment index to point to the next BD */
        *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
 
index ebc4518..13de5ce 100644 (file)
@@ -288,6 +288,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
        sw_cons = txdata->tx_pkt_cons;
 
+       /* Ensure subsequent loads occur after hw_cons */
+       smp_rmb();
+
        while (sw_cons != hw_cons) {
                u16 pkt_cons;
 
@@ -1954,7 +1957,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
index 74dd48f..04fe570 100644 (file)
@@ -3090,39 +3090,42 @@ static void bcmgenet_timeout(struct net_device *dev)
        netif_tx_wake_all_queues(dev);
 }
 
-#define MAX_MC_COUNT   16
+#define MAX_MDF_FILTER 17
 
 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
                                         unsigned char *addr,
-                                        int *i,
-                                        int *mc)
+                                        int *i)
 {
-       u32 reg;
-
        bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
                             UMAC_MDF_ADDR + (*i * 4));
        bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
                             addr[4] << 8 | addr[5],
                             UMAC_MDF_ADDR + ((*i + 1) * 4));
-       reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
-       reg |= (1 << (MAX_MC_COUNT - *mc));
-       bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
        *i += 2;
-       (*mc)++;
 }
 
 static void bcmgenet_set_rx_mode(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct netdev_hw_addr *ha;
-       int i, mc;
+       int i, nfilter;
        u32 reg;
 
        netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
 
-       /* Promiscuous mode */
+       /* Number of filters needed */
+       nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
+
+       /*
+        * Turn on promicuous mode for three scenarios
+        * 1. IFF_PROMISC flag is set
+        * 2. IFF_ALLMULTI flag is set
+        * 3. The number of filters needed exceeds the number filters
+        *    supported by the hardware.
+       */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       if (dev->flags & IFF_PROMISC) {
+       if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+           (nfilter > MAX_MDF_FILTER)) {
                reg |= CMD_PROMISC;
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
                bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
@@ -3132,32 +3135,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        }
 
-       /* UniMac doesn't support ALLMULTI */
-       if (dev->flags & IFF_ALLMULTI) {
-               netdev_warn(dev, "ALLMULTI is not supported\n");
-               return;
-       }
-
        /* update MDF filter */
        i = 0;
-       mc = 0;
        /* Broadcast */
-       bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+       bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
        /* my own address.*/
-       bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
-       /* Unicast list*/
-       if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
-               return;
+       bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
 
-       if (!netdev_uc_empty(dev))
-               netdev_for_each_uc_addr(ha, dev)
-                       bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
-       /* Multicast */
-       if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
-               return;
+       /* Unicast */
+       netdev_for_each_uc_addr(ha, dev)
+               bcmgenet_set_mdf_addr(priv, ha->addr, &i);
 
+       /* Multicast */
        netdev_for_each_mc_addr(ha, dev)
-               bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+               bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+
+       /* Enable filters */
+       reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
+       bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
 }
 
 /* Set the hardware MAC address. */
index 3dd4c39..bee615c 100644 (file)
@@ -3260,7 +3260,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!adapter->regs) {
                dev_err(&pdev->dev, "cannot map device registers\n");
                err = -ENOMEM;
-               goto out_free_adapter;
+               goto out_free_adapter_nofail;
        }
 
        adapter->pdev = pdev;
@@ -3378,6 +3378,9 @@ out_free_dev:
                if (adapter->port[i])
                        free_netdev(adapter->port[i]);
 
+out_free_adapter_nofail:
+       kfree_skb(adapter->nofail_skb);
+
 out_free_adapter:
        kfree(adapter);
 
index 4269944..129d609 100644 (file)
@@ -2673,8 +2673,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
                return -ENOMEM;
 
        err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
-       if (err)
+       if (err) {
+               kvfree(t);
                return err;
+       }
 
        bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
        t4_free_mem(t);
index 2625586..7524a33 100644 (file)
@@ -4307,8 +4307,12 @@ int be_update_queues(struct be_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int status;
 
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               /* device cannot transmit now, avoid dev_watchdog timeouts */
+               netif_carrier_off(netdev);
+
                be_close(netdev);
+       }
 
        be_cancel_worker(adapter);
 
index ae8e4fc..0ee164d 100644 (file)
@@ -1699,10 +1699,10 @@ static void fec_get_mac(struct net_device *ndev)
         */
        if (!is_valid_ether_addr(iap)) {
                /* Report it and use a random ethernet address instead */
-               netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+               dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
                eth_hw_addr_random(ndev);
-               netdev_info(ndev, "Using random MAC address: %pM\n",
-                           ndev->dev_addr);
+               dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+                        ndev->dev_addr);
                return;
        }
 
index 60c727b..def831c 100644 (file)
@@ -157,6 +157,7 @@ struct hip04_priv {
        unsigned int reg_inten;
 
        struct napi_struct napi;
+       struct device *dev;
        struct net_device *ndev;
 
        struct tx_desc *tx_desc;
@@ -185,7 +186,7 @@ struct hip04_priv {
 
 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
 {
-       return (head - tail) % (TX_DESC_NUM - 1);
+       return (head - tail) % TX_DESC_NUM;
 }
 
 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
                }
 
                if (priv->tx_phys[tx_tail]) {
-                       dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+                       dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
                                         priv->tx_skb[tx_tail]->len,
                                         DMA_TO_DEVICE);
                        priv->tx_phys[tx_tail] = 0;
@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                return NETDEV_TX_BUSY;
        }
 
-       phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, phys)) {
+       phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->dev, phys)) {
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
@@ -497,13 +498,16 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
        u16 len;
        u32 err;
 
+       /* clean up tx descriptors */
+       tx_remaining = hip04_tx_reclaim(ndev, false);
+
        while (cnt && !last) {
                buf = priv->rx_buf[priv->rx_head];
                skb = build_skb(buf, priv->rx_buf_size);
                if (unlikely(!skb))
                        net_dbg_ratelimited("build_skb failed\n");
 
-               dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+               dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
                                 RX_BUF_SIZE, DMA_FROM_DEVICE);
                priv->rx_phys[priv->rx_head] = 0;
 
@@ -531,9 +535,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
                buf = netdev_alloc_frag(priv->rx_buf_size);
                if (!buf)
                        goto done;
-               phys = dma_map_single(&ndev->dev, buf,
+               phys = dma_map_single(priv->dev, buf,
                                      RX_BUF_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, phys))
+               if (dma_mapping_error(priv->dev, phys))
                        goto done;
                priv->rx_buf[priv->rx_head] = buf;
                priv->rx_phys[priv->rx_head] = phys;
@@ -554,8 +558,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
        }
        napi_complete(napi);
 done:
-       /* clean up tx descriptors and start a new timer if necessary */
-       tx_remaining = hip04_tx_reclaim(ndev, false);
+       /* start a new timer if necessary */
        if (rx < budget && tx_remaining)
                hip04_start_tx_timer(priv);
 
@@ -637,9 +640,9 @@ static int hip04_mac_open(struct net_device *ndev)
        for (i = 0; i < RX_DESC_NUM; i++) {
                dma_addr_t phys;
 
-               phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+               phys = dma_map_single(priv->dev, priv->rx_buf[i],
                                      RX_BUF_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, phys))
+               if (dma_mapping_error(priv->dev, phys))
                        return -EIO;
 
                priv->rx_phys[i] = phys;
@@ -673,7 +676,7 @@ static int hip04_mac_stop(struct net_device *ndev)
 
        for (i = 0; i < RX_DESC_NUM; i++) {
                if (priv->rx_phys[i]) {
-                       dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+                       dma_unmap_single(priv->dev, priv->rx_phys[i],
                                         RX_BUF_SIZE, DMA_FROM_DEVICE);
                        priv->rx_phys[i] = 0;
                }
@@ -824,6 +827,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        priv = netdev_priv(ndev);
+       priv->dev = d;
        priv->ndev = ndev;
        platform_set_drvdata(pdev, ndev);
 
index 70b3253..b46fc37 100644 (file)
@@ -1555,7 +1555,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        struct net_device *netdev;
        struct ibmveth_adapter *adapter;
        unsigned char *mac_addr_p;
-       unsigned int *mcastFilterSize_p;
+       __be32 *mcastFilterSize_p;
        long ret;
        unsigned long ret_attr;
 
@@ -1577,8 +1577,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
                return -EINVAL;
        }
 
-       mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
-                                               VETH_MCAST_FILTER_SIZE, NULL);
+       mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
+                                                       VETH_MCAST_FILTER_SIZE,
+                                                       NULL);
        if (!mcastFilterSize_p) {
                dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
                        "attribute\n");
@@ -1595,7 +1596,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        adapter->vdev = dev;
        adapter->netdev = netdev;
-       adapter->mcastFilterSize = *mcastFilterSize_p;
+       adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
        adapter->pool_config = 0;
 
        netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
index d681273..9d38634 100644 (file)
@@ -3133,7 +3133,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
                page_swap = true;
        }
 
-       if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+       if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
+           !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
                /* We have a SFP, but it does not support SFF-8472 */
                modinfo->type = ETH_MODULE_SFF_8079;
                modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
index 5abd66c..7b7dc6d 100644 (file)
@@ -70,6 +70,7 @@
 #define IXGBE_SFF_SOFT_RS_SELECT_10G           0x8
 #define IXGBE_SFF_SOFT_RS_SELECT_1G            0x0
 #define IXGBE_SFF_ADDRESSING_MODE              0x4
+#define IXGBE_SFF_DDM_IMPLEMENTED              0x40
 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE         0x1
 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE                0x8
 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
index 5cc05df..8ba9ead 100644 (file)
@@ -4939,6 +4939,20 @@ static const struct dmi_system_id msi_blacklist[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
                },
        },
+       {
+               .ident = "ASUS P6T",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+               },
+       },
+       {
+               .ident = "ASUS P6X",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+               },
+       },
        {}
 };
 
index 7ee3013..c1dd75f 100644 (file)
@@ -855,6 +855,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
+       if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+               return -EOPNOTSUPP;
+
        if (pauseparam->autoneg)
                return -EINVAL;
 
index 7c42be5..35bcc6d 100644 (file)
@@ -778,7 +778,7 @@ static void mlx5_unregister_device(struct mlx5_core_dev *dev)
        struct mlx5_interface *intf;
 
        mutex_lock(&intf_mutex);
-       list_for_each_entry(intf, &intf_list, list)
+       list_for_each_entry_reverse(intf, &intf_list, list)
                mlx5_remove_device(intf, priv);
        list_del(&priv->dev_list);
        mutex_unlock(&intf_mutex);
index 83651ac..8ebf361 100644 (file)
@@ -4114,7 +4114,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * setup (if available). */
        status = myri10ge_request_irq(mgp);
        if (status != 0)
-               goto abort_with_firmware;
+               goto abort_with_slices;
        myri10ge_free_irq(mgp);
 
        /* Save configuration space to be restored if the
index 29d31eb..fedfd94 100644 (file)
@@ -1,6 +1,6 @@
 /* Renesas Ethernet AVB device driver
  *
- * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2014-2019 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
  * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
@@ -501,7 +501,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
                        kfree(ts_skb);
                        if (tag == tfa_tag) {
                                skb_tstamp_tx(skb, &shhwtstamps);
+                               dev_consume_skb_any(skb);
                                break;
+                       } else {
+                               dev_kfree_skb_any(skb);
                        }
                }
                ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
@@ -1382,7 +1385,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                         DMA_TO_DEVICE);
                        goto unmap;
                }
-               ts_skb->skb = skb;
+               ts_skb->skb = skb_get(skb);
                ts_skb->tag = priv->ts_skb_tag++;
                priv->ts_skb_tag &= 0x3ff;
                list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1514,6 +1517,7 @@ static int ravb_close(struct net_device *ndev)
        /* Clear the timestamp list */
        list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
                list_del(&ts_skb->list);
+               kfree_skb(ts_skb->skb);
                kfree(ts_skb);
        }
 
index ca73366..2e5f7bb 100644 (file)
@@ -792,15 +792,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
                printk(KERN_ERR "Sgiseeq: Cannot register net device, "
                       "aborting.\n");
                err = -ENODEV;
-               goto err_out_free_page;
+               goto err_out_free_attrs;
        }
 
        printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
 
        return 0;
 
-err_out_free_page:
-       free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+       dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+                      sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
 err_out_free_dev:
        free_netdev(dev);
 
index 398b08e..68a5833 100644 (file)
@@ -429,10 +429,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
        int ret;
        struct device *dev = &bsp_priv->pdev->dev;
 
-       if (!ldo) {
-               dev_err(dev, "no regulator found\n");
-               return -1;
-       }
+       if (!ldo)
+               return 0;
 
        if (enable) {
                ret = regulator_enable(ldo);
index 371a669..1df84c8 100644 (file)
@@ -187,6 +187,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
                                            GMAC_ADDR_LOW(reg));
                        reg++;
                }
+
+               while (reg <= perfect_addr_number) {
+                       writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+                       writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+                       reg++;
+               }
        }
 
 #ifdef FRAME_FILTER_DEBUG
index 45ac38d..868fb63 100644 (file)
@@ -1528,7 +1528,7 @@ tc35815_rx(struct net_device *dev, int limit)
                        pci_unmap_single(lp->pci_dev,
                                         lp->rx_skbs[cur_bd].skb_dma,
                                         RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-                       if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+                       if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
                                memmove(skb->data, skb->data - NET_IP_ALIGN,
                                        pkt_len);
                        data = skb_put(skb, pkt_len);
index 520cf50..93fe0da 100644 (file)
@@ -379,9 +379,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
 static void tsi108_stat_carry(struct net_device *dev)
 {
        struct tsi108_prv_data *data = netdev_priv(dev);
+       unsigned long flags;
        u32 carry1, carry2;
 
-       spin_lock_irq(&data->misclock);
+       spin_lock_irqsave(&data->misclock, flags);
 
        carry1 = TSI_READ(TSI108_STAT_CARRY1);
        carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -449,7 +450,7 @@ static void tsi108_stat_carry(struct net_device *dev)
                              TSI108_STAT_TXPAUSEDROP_CARRY,
                              &data->tx_pause_drop);
 
-       spin_unlock_irq(&data->misclock);
+       spin_unlock_irqrestore(&data->misclock, flags);
 }
 
 /* Read a stat counter atomically with respect to carries.
index 58ba579..f1e9691 100644 (file)
@@ -613,6 +613,10 @@ static void axienet_start_xmit_done(struct net_device *ndev)
 
        ndev->stats.tx_packets += packets;
        ndev->stats.tx_bytes += size;
+
+       /* Matches barrier in axienet_start_xmit */
+       smp_mb();
+
        netif_wake_queue(ndev);
 }
 
@@ -667,9 +671,19 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 
        if (axienet_check_tx_bd_space(lp, num_frag)) {
-               if (!netif_queue_stopped(ndev))
-                       netif_stop_queue(ndev);
-               return NETDEV_TX_BUSY;
+               if (netif_queue_stopped(ndev))
+                       return NETDEV_TX_BUSY;
+
+               netif_stop_queue(ndev);
+
+               /* Matches barrier in axienet_start_xmit_done */
+               smp_mb();
+
+               /* Space might have just been freed - check again */
+               if (axienet_check_tx_bd_space(lp, num_frag))
+                       return NETDEV_TX_BUSY;
+
+               netif_wake_queue(ndev);
        }
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
index 70f26b3..c6a8783 100644 (file)
@@ -472,6 +472,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
 {
        int rc;
 
+       if (!dev)
+               return -EINVAL;
+
        rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
        if (rc)
                return rc;
@@ -704,6 +707,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
        struct device *d;
        int rc;
 
+       if (!dev)
+               return ERR_PTR(-EINVAL);
+
        /* Search the list of PHY devices on the mdio bus for the
         * PHY with the requested name
         */
index 02327e6..3997689 100644 (file)
@@ -1152,6 +1152,9 @@ static const struct proto_ops pppoe_ops = {
        .recvmsg        = pppoe_recvmsg,
        .mmap           = sock_no_mmap,
        .ioctl          = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = pppox_compat_ioctl,
+#endif
 };
 
 static const struct pppox_proto pppoe_proto = {
index 0e1b306..011fbd1 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/string.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/compat.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/net.h>
@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 
 EXPORT_SYMBOL(pppox_ioctl);
 
+#ifdef CONFIG_COMPAT
+int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       if (cmd == PPPOEIOCSFWD32)
+               cmd = PPPOEIOCSFWD;
+
+       return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+
+EXPORT_SYMBOL(pppox_compat_ioctl);
+#endif
+
 static int pppox_create(struct net *net, struct socket *sock, int protocol,
                        int kern)
 {
index 53c1f2b..19d0692 100644 (file)
@@ -674,6 +674,9 @@ static const struct proto_ops pptp_ops = {
        .recvmsg    = sock_no_recvmsg,
        .mmap       = sock_no_mmap,
        .ioctl      = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = pppox_compat_ioctl,
+#endif
 };
 
 static const struct pppox_proto pppox_pptp_proto = {
index d235d18..93c7024 100644 (file)
@@ -597,7 +597,8 @@ static void tun_detach_all(struct net_device *dev)
                module_put(THIS_MODULE);
 }
 
-static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
+static int tun_attach(struct tun_struct *tun, struct file *file,
+                     bool skip_filter, bool publish_tun)
 {
        struct tun_file *tfile = file->private_data;
        int err;
@@ -630,7 +631,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
        }
        tfile->queue_index = tun->numqueues;
        tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
-       rcu_assign_pointer(tfile->tun, tun);
+       if (publish_tun)
+               rcu_assign_pointer(tfile->tun, tun);
        rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
        tun->numqueues++;
 
@@ -1642,7 +1644,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                if (err < 0)
                        return err;
 
-               err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
+               err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, true);
                if (err < 0)
                        return err;
 
@@ -1723,13 +1725,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                                       NETIF_F_HW_VLAN_STAG_TX);
 
                INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false);
+               err = tun_attach(tun, file, false, false);
                if (err < 0)
                        goto err_free_flow;
 
                err = register_netdevice(tun->dev);
                if (err < 0)
                        goto err_detach;
+               /* free_netdev() won't check refcnt, to aovid race
+                * with dev_put() we need publish tun after registration.
+                */
+               rcu_assign_pointer(tfile->tun, tun);
        }
 
        netif_carrier_on(tun->dev);
@@ -1868,7 +1874,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = security_tun_dev_attach_queue(tun->security);
                if (ret < 0)
                        goto unlock;
-               ret = tun_attach(tun, file, false);
+               ret = tun_attach(tun, file, false, true);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
index f71abe5..3707aab 100644 (file)
@@ -212,9 +212,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
                goto bad_desc;
        }
 skip:
-       if (    rndis &&
-               header.usb_cdc_acm_descriptor &&
-               header.usb_cdc_acm_descriptor->bmCapabilities) {
+       /* Communcation class functions with bmCapabilities are not
+        * RNDIS.  But some Wireless class RNDIS functions use
+        * bmCapabilities for their own purpose. The failsafe is
+        * therefore applied only to Communication class RNDIS
+        * functions.  The rndis test is redundant, but a cheap
+        * optimization.
+        */
+       if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+           header.usb_cdc_acm_descriptor &&
+           header.usb_cdc_acm_descriptor->bmCapabilities) {
                        dev_dbg(&intf->dev,
                                "ACM capabilities %02x, not really RNDIS?\n",
                                header.usb_cdc_acm_descriptor->bmCapabilities);
index 947bea8..dfbdea2 100644 (file)
@@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
        }
        if (!timeout) {
                dev_err(&udev->dev, "firmware not ready in time\n");
-               return -ETIMEDOUT;
+               ret = -ETIMEDOUT;
+               goto err;
        }
 
        /* enable ethernet mode (?) */
index 5662bab..d385b67 100644 (file)
@@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
        status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
                / sizeof(init_msg_1[0]), usb_buf, 24);
        if (status != 0)
-               return status;
+               goto out;
 
        memcpy(usb_buf, init_msg_2, 12);
        status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
                / sizeof(init_msg_2[0]), usb_buf, 28);
        if (status != 0)
-               return status;
+               goto out;
 
        memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
-
+out:
        kfree(usb_buf);
        return status;
 }
index 17fac01..4f505eb 100644 (file)
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
        int i;
-       __u8 tmp;
+       __u8 tmp = 0;
        __le16 retdatai;
        int ret;
 
index ee6fefe..4391430 100644 (file)
@@ -719,6 +719,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
        {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
+       {QMI_FIXED_INTF(0x2020, 0x2060, 4)},    /* BroadMobi BM818 */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
index 2d83689..10dd307 100644 (file)
@@ -671,8 +671,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
        ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
                              RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
                              value, index, tmp, size, 500);
+       if (ret < 0)
+               memset(data, 0xff, size);
+       else
+               memcpy(data, tmp, size);
 
-       memcpy(data, tmp, size);
        kfree(tmp);
 
        return ret;
index c9c711d..0e6c665 100644 (file)
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
                        }
                        result = i2400m_barker_db_add(barker);
                        if (result < 0)
-                               goto error_add;
+                               goto error_parse_add;
                }
                kfree(options_orig);
        }
        return 0;
 
+error_parse_add:
 error_parse:
+       kfree(options_orig);
 error_add:
        kfree(i2400m_barker_db);
        return result;
index 558214c..5b7faab 100644 (file)
@@ -488,7 +488,7 @@ const struct ath10k_hw_values qca6174_values = {
 
 const struct ath10k_hw_values qca99x0_values = {
        .pdev_suspend_option            = WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
-       .rtc_state_val_on               = 5,
+       .rtc_state_val_on               = 7,
        .ce_count                       = 12,
        .msi_assign_ce_max              = 12,
        .num_target_ce_config_wlan      = 10,
index 79c1a09..803ec58 100644 (file)
@@ -1615,6 +1615,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
        if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
                return 0;
 
+        /* For mesh, probe response and beacon share the same template */
+       if (ieee80211_vif_is_mesh(vif))
+               return 0;
+
        prb = ieee80211_proberesp_get(hw, vif);
        if (!prb) {
                ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
index a5e1de7..b2ec254 100644 (file)
@@ -1178,6 +1178,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
                return -EINVAL;
 
        ev = (struct wmi_pstream_timeout_event *) datap;
+       if (ev->traffic_class >= WMM_NUM_AC) {
+               ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
+               return -EINVAL;
+       }
 
        /*
         * When the pstream (fat pipe == AC) timesout, it means there were
@@ -1519,6 +1523,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
                return -EINVAL;
 
        reply = (struct wmi_cac_event *) datap;
+       if (reply->ac >= WMM_NUM_AC) {
+               ath6kl_err("invalid AC: %d\n", reply->ac);
+               return -EINVAL;
+       }
 
        if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
            (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
@@ -2631,7 +2639,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
        u16 active_tsids = 0;
        int ret;
 
-       if (traffic_class > 3) {
+       if (traffic_class >= WMM_NUM_AC) {
                ath6kl_err("invalid traffic class: %d\n", traffic_class);
                return -EINVAL;
        }
index 4435c7b..d50e2e8 100644 (file)
@@ -250,8 +250,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
 /* Chip Revisions */
 /******************/
 
-static void ath9k_hw_read_revisions(struct ath_hw *ah)
+static bool ath9k_hw_read_revisions(struct ath_hw *ah)
 {
+       u32 srev;
        u32 val;
 
        if (ah->get_mac_revision)
@@ -267,25 +268,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                        val = REG_READ(ah, AR_SREV);
                        ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
                }
-               return;
+               return true;
        case AR9300_DEVID_AR9340:
                ah->hw_version.macVersion = AR_SREV_VERSION_9340;
-               return;
+               return true;
        case AR9300_DEVID_QCA955X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9550;
-               return;
+               return true;
        case AR9300_DEVID_AR953X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9531;
-               return;
+               return true;
        case AR9300_DEVID_QCA956X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9561;
-               return;
+               return true;
        }
 
-       val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+       srev = REG_READ(ah, AR_SREV);
+
+       if (srev == -EIO) {
+               ath_err(ath9k_hw_common(ah),
+                       "Failed to read SREV register");
+               return false;
+       }
+
+       val = srev & AR_SREV_ID;
 
        if (val == 0xFF) {
-               val = REG_READ(ah, AR_SREV);
+               val = srev;
                ah->hw_version.macVersion =
                        (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
                ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
@@ -304,6 +313,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
                        ah->is_pciexpress = true;
        }
+
+       return true;
 }
 
 /************************************/
@@ -556,7 +567,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        struct ath_common *common = ath9k_hw_common(ah);
        int r = 0;
 
-       ath9k_hw_read_revisions(ah);
+       if (!ath9k_hw_read_revisions(ah)) {
+               ath_err(common, "Could not read hardware revisions");
+               return -EOPNOTSUPP;
+       }
 
        switch (ah->hw_version.macVersion) {
        case AR_SREV_VERSION_5416_PCI:
index 2303ef9..0835828 100644 (file)
@@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
        JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
        JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
        JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
-       JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+       JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
        JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
        JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
        JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
index f70dfaa..4507f57 100644 (file)
@@ -61,6 +61,40 @@ module_param(quirks, ulong, 0600);
 MODULE_PARM_DESC(quirks, "Debug quirks for the driver");
 #endif
 
+static unsigned int wow_wake_gpionum;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(wow_wake_gpionum, uint, 0600);
+MODULE_PARM_DESC(wow_wake_gpionum, "configure gpio number for wow wake");
+#endif
+
+static unsigned int wow_wake_enable;
+int cnss_enable_wow_wake(const char *val, const struct kernel_param *kp)
+{
+       int ret;
+       unsigned int prev_val;
+
+       prev_val = *(unsigned int *)kp->arg;
+       ret = param_set_uint(val, kp);
+       if (ret || prev_val == wow_wake_enable) {
+               cnss_pr_err("failed set new wow_enable ret = %d", ret);
+               return ret;
+       }
+       if (wow_wake_enable) {
+               if (!wow_wake_gpionum)
+                       wow_wake_gpionum = HOST_WAKE_GPIO_IN;
+               cnss_set_wlan_chip_to_host_wakeup(wow_wake_gpionum);
+       }
+       return 0;
+}
+
+static const struct kernel_param_ops cnss_param_ops_uint = {
+       .set = &cnss_enable_wow_wake,
+       .get = &param_get_uint
+};
+
+module_param_cb(wow_wake_enable, &cnss_param_ops_uint,
+               &wow_wake_enable, 0600);
+
 static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
        "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
        "utfbd30.bin", "epping30.bin", "evicted30.bin"
@@ -1227,6 +1261,26 @@ int cnss_force_collect_rddm(struct device *dev)
 }
 EXPORT_SYMBOL(cnss_force_collect_rddm);
 
+int cnss_qmi_send_get(struct device *dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL(cnss_qmi_send_get);
+
+int cnss_qmi_send_put(struct device *dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL(cnss_qmi_send_put);
+
+int cnss_qmi_send(struct device *dev, int type, void *cmd,
+                 int cmd_len, void *cb_ctx,
+                 int (*cb)(void *ctx, void *event, int event_len))
+{
+       return -EINVAL;
+}
+EXPORT_SYMBOL(cnss_qmi_send);
+
 static int cnss_wlfw_server_arrive_hdlr(struct cnss_plat_data *plat_priv)
 {
        int ret;
index aa197f0..969ff00 100644 (file)
@@ -31,6 +31,7 @@
 #define CNSS_EVENT_SYNC_UNINTERRUPTIBLE (CNSS_EVENT_SYNC | \
                                CNSS_EVENT_UNINTERRUPTIBLE)
 #define QCN7605_CALDB_SIZE 614400
+#define HOST_WAKE_GPIO_IN 144
 
 enum cnss_dev_bus_type {
        CNSS_BUS_NONE = -1,
@@ -260,4 +261,6 @@ void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
 u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
 bool *cnss_get_qmi_bypass(void);
 bool is_qcn7605_device(u16 device_id);
+void cnss_set_wlan_chip_to_host_wakeup(unsigned int wakeup_gpio_num);
+int cnss_enable_wow_wake(const char *val, const struct kernel_param *kp);
 #endif /* _CNSS_MAIN_H */
index 8a58a53..ed145d7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 #include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
 
 #include "main.h"
 #include "debug.h"
@@ -388,3 +389,52 @@ void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
 
        plat_priv->pin_result.host_pin_result = pin_status;
 }
+
+static irqreturn_t wlan_wakeup_interrupt(int irq, void *dev_id)
+{
+       return IRQ_HANDLED;
+}
+
+void cnss_set_wlan_chip_to_host_wakeup(unsigned int wakeup_gpio_num)
+{
+       int ret = 0;
+       int wakeup_irq_num;
+
+       ret = gpio_request(wakeup_gpio_num, "qcom_wlan_wakeup");
+       if (ret)
+               cnss_pr_err("wakeup gpio request failed\n");
+
+       ret = gpio_direction_input(wakeup_gpio_num);
+       if (ret) {
+               cnss_pr_err("wake gpio set dir output failed\n");
+               goto free_gpio;
+       }
+
+       wakeup_irq_num = gpio_to_irq(wakeup_gpio_num);
+       if (wakeup_irq_num < 0) {
+               cnss_pr_err("wake gpio_to_irq err %d\n", wakeup_irq_num);
+               goto free_gpio;
+       }
+       ret = request_irq(wakeup_irq_num, wlan_wakeup_interrupt,
+                         IRQF_TRIGGER_FALLING, "qcom_wlan_wakeup_irq", NULL);
+       if (ret) {
+               cnss_pr_err("request_irq err %d\n", ret);
+               goto free_gpio;
+       }
+
+       ret = enable_irq_wake(wakeup_irq_num);
+       if (!ret) {
+               cnss_pr_err("enable irq wakeup success %d\n", ret);
+       } else {
+               cnss_pr_err("enable irq wakeup FAILURE %d\n", ret);
+               goto irq_free;
+       }
+       ret = gpio_get_value(wakeup_gpio_num);
+       cnss_pr_err("gpio get val ret = %d wakeup_gpio_num %d\n", ret,
+                   wakeup_gpio_num);
+       return;
+irq_free:
+       free_irq(wakeup_irq_num, NULL);
+free_gpio:
+       gpio_free(wakeup_gpio_num);
+}
index 57a80cf..3d0b932 100644 (file)
@@ -193,10 +193,23 @@ static void mt7601u_complete_rx(struct urb *urb)
        struct mt7601u_rx_queue *q = &dev->rx_q;
        unsigned long flags;
 
-       spin_lock_irqsave(&dev->rx_lock, flags);
+       /* do no schedule rx tasklet if urb has been unlinked
+        * or the device has been removed
+        */
+       switch (urb->status) {
+       case -ECONNRESET:
+       case -ESHUTDOWN:
+       case -ENOENT:
+               return;
+       default:
+               dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+                                   urb->status);
+               /* fall through */
+       case 0:
+               break;
+       }
 
-       if (mt7601u_urb_has_error(urb))
-               dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+       spin_lock_irqsave(&dev->rx_lock, flags);
        if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
                goto out;
 
@@ -228,14 +241,25 @@ static void mt7601u_complete_tx(struct urb *urb)
        struct sk_buff *skb;
        unsigned long flags;
 
-       spin_lock_irqsave(&dev->tx_lock, flags);
+       switch (urb->status) {
+       case -ECONNRESET:
+       case -ESHUTDOWN:
+       case -ENOENT:
+               return;
+       default:
+               dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+                                   urb->status);
+               /* fall through */
+       case 0:
+               break;
+       }
 
-       if (mt7601u_urb_has_error(urb))
-               dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+       spin_lock_irqsave(&dev->tx_lock, flags);
        if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
                goto out;
 
        skb = q->e[q->start].skb;
+       q->e[q->start].skb = NULL;
        trace_mt_tx_dma_done(dev, skb);
 
        __skb_queue_tail(&dev->tx_skb_done, skb);
@@ -363,19 +387,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
 {
        int i;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->rx_lock, flags);
-
-       for (i = 0; i < dev->rx_q.entries; i++) {
-               int next = dev->rx_q.end;
 
-               spin_unlock_irqrestore(&dev->rx_lock, flags);
-               usb_poison_urb(dev->rx_q.e[next].urb);
-               spin_lock_irqsave(&dev->rx_lock, flags);
-       }
-
-       spin_unlock_irqrestore(&dev->rx_lock, flags);
+       for (i = 0; i < dev->rx_q.entries; i++)
+               usb_poison_urb(dev->rx_q.e[i].urb);
 }
 
 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
@@ -445,10 +459,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
 {
        int i;
 
-       WARN_ON(q->used);
-
        for (i = 0; i < q->entries; i++)  {
                usb_poison_urb(q->e[i].urb);
+               if (q->e[i].skb)
+                       mt7601u_tx_status(q->dev, q->e[i].skb);
                usb_free_urb(q->e[i].urb);
        }
 }
index a0a33dc..a1b6db2 100644 (file)
@@ -117,9 +117,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
        info->status.rates[0].idx = -1;
        info->flags |= IEEE80211_TX_STAT_ACK;
 
-       spin_lock(&dev->mac_lock);
+       spin_lock_bh(&dev->mac_lock);
        ieee80211_tx_status(dev->hw, skb);
-       spin_unlock(&dev->mac_lock);
+       spin_unlock_bh(&dev->mac_lock);
 }
 
 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
index de84357..4255fb8 100644 (file)
@@ -240,6 +240,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
                }
 
                vs_ie = (struct ieee_types_header *)vendor_ie;
+               if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+                       IEEE_MAX_IE_SIZE)
+                       return -EINVAL;
                memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
                       vs_ie, vs_ie->len + 2);
                le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
index 3959f1c..466ea4e 100644 (file)
@@ -108,6 +108,7 @@ enum {
 
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME    (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
+#define WPA_GTK_OUI_OFFSET                             2
 #define RSN_GTK_OUI_OFFSET                             2
 
 #define MWIFIEX_OUI_NOT_PRESENT                        0
index 673ca81..b3fa3e4 100644 (file)
@@ -151,7 +151,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
        if (((bss_desc->bcn_wpa_ie) &&
             ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
              WLAN_EID_VENDOR_SPECIFIC))) {
-               iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+               iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+                                           WPA_GTK_OUI_OFFSET);
                oui = &mwifiex_wpa_oui[cipher][0];
                ret = mwifiex_search_oui_in_ie(iebody, oui);
                if (ret)
index 759a6ad..60bba1c 100644 (file)
@@ -286,6 +286,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
 
        rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
        if (rate_ie) {
+               if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+                       return;
                memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
                rate_len = rate_ie->len;
        }
@@ -293,8 +295,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
        rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
                                           params->beacon.tail,
                                           params->beacon.tail_len);
-       if (rate_ie)
+       if (rate_ie) {
+               if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+                       return;
                memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+       }
 
        return;
 }
@@ -412,6 +417,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
                                            params->beacon.tail_len);
        if (vendor_ie) {
                wmm_ie = (struct ieee_types_header *)vendor_ie;
+               if (*(vendor_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+                       return;
                memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
                       sizeof(bss_cfg->wmm_info));
                priv->wmm_enabled = 1;
index d324ac3..65d3725 100644 (file)
@@ -1421,6 +1421,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
                        nskb = xenvif_alloc_skb(0);
                        if (unlikely(nskb == NULL)) {
+                               skb_shinfo(skb)->nr_frags = 0;
                                kfree_skb(skb);
                                xenvif_tx_err(queue, &txreq, idx);
                                if (net_ratelimit())
@@ -1436,6 +1437,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
+                               skb_shinfo(skb)->nr_frags = 0;
                                kfree_skb(skb);
                                kfree_skb(nskb);
                                break;
index 6f55ab4..574c93a 100644 (file)
@@ -893,7 +893,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-                       queue->rx.rsp_cons = ++cons;
+                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
                        kfree_skb(nskb);
                        return ~0U;
                }
index dbab722..6f9d9b9 100644 (file)
@@ -346,6 +346,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
 
                transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
                                            skb->len - 2, GFP_KERNEL);
+               if (!transaction)
+                       return -ENOMEM;
 
                transaction->aid_len = skb->data[1];
                memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
index 2d4b6e9..452c350 100644 (file)
@@ -334,6 +334,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
 
                transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
                                                   skb->len - 2, GFP_KERNEL);
+               if (!transaction)
+                       return -ENOMEM;
 
                transaction->aid_len = skb->data[1];
                memcpy(transaction->aid, &skb->data[2],
index b180e67..98aac17 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2447,6 +2447,11 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
 
                break;
        case 13: /* dump all registers of base_sel */
+               if (!base_sel) {
+                       PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+                       break;
+               }
+
                if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
                                        (!dev->res[base_sel - 1].resource)) {
                        PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
@@ -2454,10 +2459,7 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
                        break;
                }
 
-               if (!base_sel) {
-                       PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
-                       break;
-               } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
+               if (base_sel - 1 == MSM_PCIE_RES_PARF) {
                        pcie_parf_dump(dev);
                        break;
                } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
index 5fb4ed6..6ac6618 100644 (file)
@@ -371,7 +371,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
                pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
        return count;
 }
-static struct device_attribute dev_remove_attr = __ATTR(remove,
+static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
                                                        (S_IWUSR|S_IWGRP),
                                                        NULL, remove_store);
 
index 5073ab0..82b0c2c 100644 (file)
@@ -1736,6 +1736,13 @@ static void pci_pme_list_scan(struct work_struct *work)
                         */
                        if (bridge && bridge->current_state != PCI_D0)
                                continue;
+                       /*
+                        * If the device is in D3cold it should not be
+                        * polled either.
+                        */
+                       if (pme_dev->dev->current_state == PCI_D3cold)
+                               continue;
+
                        pci_pme_wakeup(pme_dev->dev, NULL);
                } else {
                        list_del(&pme_dev->list);
index c7a0599..99d2b73 100644 (file)
@@ -287,6 +287,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
                error = of_property_read_u32(np, "reg", &channel_num);
                if (error || channel_num > 2) {
                        dev_err(dev, "Invalid \"reg\" property\n");
+                       of_node_put(np);
                        return error;
                }
                channel->select_mask = select_mask[channel_num];
@@ -302,6 +303,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
                                                   &rcar_gen2_phy_ops);
                        if (IS_ERR(phy->phy)) {
                                dev_err(dev, "Failed to create PHY\n");
+                               of_node_put(np);
                                return PTR_ERR(phy->phy);
                        }
                        phy_set_drvdata(phy->phy, phy);
index a065112..616055b 100644 (file)
@@ -1837,6 +1837,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
                                                    base,
                                                    &rockchip_regmap_config);
                }
+               of_node_put(node);
        }
 
        bank->irq = irq_of_parse_and_map(bank->of_node, 0);
index 92773ff..cb7105e 100644 (file)
@@ -214,4 +214,7 @@ config MSM_EXT_DISPLAY
          Enabling this option adds MSM External Display Driver.
          External Display driver was added to support the communication
          between external display driver and its couterparts.
+
+source "drivers/platform/msm/qcn/Kconfig"
+
 endmenu
index f2a0d7e..75145aa 100644 (file)
@@ -16,3 +16,4 @@ obj-$(CONFIG_SEEMP_CORE) += seemp_core/
 obj-$(CONFIG_USB_BAM) += usb_bam.o
 obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
 obj-$(CONFIG_MSM_EXT_DISPLAY) += msm_ext_display.o
+obj-$(CONFIG_SDIO_QCN) += qcn/
index 9afdfdb..060b40a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,7 +51,7 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
        memset(&gen_params, 0, sizeof(gen_params));
 
        gen_params.ipt = ip;
-       if (entry->rt_tbl)
+       if (entry->rt_tbl && (!ipa3_check_idr_if_freed(entry->rt_tbl)))
                gen_params.rt_tbl_idx = entry->rt_tbl->idx;
        else
                gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
@@ -1402,7 +1402,9 @@ int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only)
                                        entry->ipacm_installed) {
                                list_del(&entry->link);
                                entry->tbl->rule_cnt--;
-                               if (entry->rt_tbl)
+                               if (entry->rt_tbl &&
+                                       (!ipa3_check_idr_if_freed(
+                                               entry->rt_tbl)))
                                        entry->rt_tbl->ref_cnt--;
                                /* if rule id was allocated from idr, remove */
                                rule_id = entry->rule_id;
index 4d6bc6f..7691aa9 100644 (file)
@@ -2125,5 +2125,6 @@ struct dentry *ipa_debugfs_get_root(void);
 bool ipa3_is_msm_device(void);
 struct device *ipa3_get_pdev(void);
 int ipa3_allocate_dma_task_for_gsi(void);
+bool ipa3_check_idr_if_freed(void *ptr);
 void ipa3_free_dma_task_for_gsi(void);
 #endif /* _IPA3_I_H_ */
index 659d38e..5413e62 100644 (file)
@@ -94,6 +94,7 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
                struct ipa3_hdr_proc_ctx_entry *proc_ctx;
                proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
                if ((proc_ctx == NULL) ||
+                       ipa3_check_idr_if_freed(proc_ctx) ||
                        (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
                        gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
                        gen_params.hdr_ofst = 0;
@@ -731,7 +732,8 @@ struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
 
        set = &ipa3_ctx->rt_tbl_set[ip];
        list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
-               if (!strcmp(name, entry->name))
+               if (!ipa3_check_idr_if_freed(entry) &&
+                       !strcmp(name, entry->name))
                        return entry;
        }
 
@@ -1366,7 +1368,8 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
 
        if (entry->hdr)
                __ipa3_release_hdr(entry->hdr->id);
-       else if (entry->proc_ctx)
+       else if (entry->proc_ctx &&
+               (!ipa3_check_idr_if_freed(entry->proc_ctx)))
                __ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
        list_del(&entry->link);
        entry->tbl->rule_cnt--;
@@ -1567,7 +1570,9 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
                                tbl->rule_cnt--;
                                if (rule->hdr)
                                        __ipa3_release_hdr(rule->hdr->id);
-                               else if (rule->proc_ctx)
+                               else if (rule->proc_ctx &&
+                                       (!ipa3_check_idr_if_freed(
+                                               rule->proc_ctx)))
                                        __ipa3_release_hdr_proc_ctx(
                                                rule->proc_ctx->id);
                                rule->cookie = 0;
index f4bce29..a00fcae 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3865,3 +3865,19 @@ struct device *ipa3_get_pdev(void)
 
        return ipa3_ctx->pdev;
 }
+
+bool ipa3_check_idr_if_freed(void *ptr)
+{
+       int id;
+       void *iter_ptr;
+
+       spin_lock(&ipa3_ctx->idr_lock);
+       idr_for_each_entry(&ipa3_ctx->ipa_idr, iter_ptr, id) {
+               if ((uintptr_t)ptr == (uintptr_t)iter_ptr) {
+                       spin_unlock(&ipa3_ctx->idr_lock);
+                       return false;
+               }
+       }
+       spin_unlock(&ipa3_ctx->idr_lock);
+       return true;
+}
index c5be534..e9ad7bc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3164,6 +3164,7 @@ bdy_alloc_fail:
 int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
 {
        struct ipahal_fltrt_obj *obj;
+       gfp_t flag = GFP_KERNEL;
 
        IPAHAL_DBG_LOW("Entry\n");
 
@@ -3181,10 +3182,14 @@ int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
 
        /* add word for rule-set terminator */
        tbl_mem->size += obj->tbl_width;
-
+alloc:
        tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
-               &tbl_mem->phys_base, GFP_KERNEL);
+               &tbl_mem->phys_base, flag);
        if (!tbl_mem->base) {
+               if (flag == GFP_KERNEL) {
+                       flag = GFP_ATOMIC;
+                       goto alloc;
+               }
                IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
                        tbl_mem->size);
                return -ENOMEM;
diff --git a/drivers/platform/msm/qcn/Kconfig b/drivers/platform/msm/qcn/Kconfig
new file mode 100644 (file)
index 0000000..50f798f
--- /dev/null
@@ -0,0 +1,8 @@
+config SDIO_QCN
+       tristate "Qualcomm Technologies, Inc SDIO Function 1 Driver"
+       depends on MMC
+       default n
+       help
+         This module adds the support for SDIO based Wi-Fi devices with QCN7605
+         chipsets. This module interfaces the diag, IPC router, Qsahara and
+         WLAN driver with the external sdio based soc.
diff --git a/drivers/platform/msm/qcn/Makefile b/drivers/platform/msm/qcn/Makefile
new file mode 100644 (file)
index 0000000..0e57bd9
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_SDIO_QCN) += qcn_sdio.o
diff --git a/drivers/platform/msm/qcn/qcn_sdio.c b/drivers/platform/msm/qcn/qcn_sdio.c
new file mode 100644 (file)
index 0000000..4aa5810
--- /dev/null
@@ -0,0 +1,1265 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/kthread.h>
+#include "qcn_sdio.h"
+
+static bool tx_dump;
+module_param(tx_dump, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static bool rx_dump;
+module_param(rx_dump, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int dump_len = 32;
+module_param(dump_len, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static bool retune;
+module_param(retune, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* driver_state :
+ *     QCN_SDIO_SW_RESET = 0,
+ *     QCN_SDIO_SW_PBL,
+ *     QCN_SDIO_SW_SBL,
+ *     QCN_SDIO_SW_RDDM,
+ *     QCN_SDIO_SW_MROM,
+*/
+static int driver_state;
+module_param(driver_state, int, S_IRUGO | S_IRUSR | S_IRGRP);
+
+static struct mmc_host *current_host;
+
+#define HEX_DUMP(mode, buf, len)                               \
+       print_hex_dump(KERN_ERR, mode, 2, 32, 4, buf,           \
+                       dump_len > len ? len : dump_len, 0)
+
+struct qcn_sdio {
+       enum qcn_sdio_sw_mode curr_sw_mode;
+       struct sdio_func *func;
+       const struct sdio_device_id *id;
+       struct qcn_sdio_ch_info *ch[QCN_SDIO_CH_MAX];
+       atomic_t ch_status[QCN_SDIO_CH_MAX];
+       spinlock_t lock_free_q;
+       spinlock_t lock_wait_q;
+       u32 rx_addr_base;
+       u32 tx_addr_base;
+       u8 rx_cnum_base;
+       u8 tx_cnum_base;
+       struct qcn_sdio_rw_info rw_req_info[QCN_SDIO_RW_REQ_MAX];
+       struct list_head rw_free_q;
+       struct list_head rw_wait_q;
+       atomic_t free_list_count;
+       atomic_t wait_list_count;
+       struct workqueue_struct *qcn_sdio_wq;
+       struct work_struct sdio_rw_w;
+};
+
+static struct qcn_sdio *sdio_ctxt;
+struct completion client_probe_complete;
+static struct mutex lock;
+static struct list_head cinfo_head;
+static atomic_t status;
+static atomic_t xport_status;
+static spinlock_t async_lock;
+static struct task_struct *reset_task;
+
+static int qcn_create_sysfs(struct device *dev);
+
+#if (QCN_SDIO_META_VER_0)
+#define        META_INFO(event, data)                                            \
+       ((u32)((u32)data << QCN_SDIO_HMETA_DATA_SHFT) |                   \
+       (u32)(((u32)event << QCN_SDIO_HMETA_EVENT_SHFT) &                 \
+       QCN_SDIO_HMETA_EVENT_BMSK) | (u32)(((u32)(sdio_ctxt->curr_sw_mode)\
+       << QCN_SDIO_HMETA_SW_SHFT) & QCN_SDIO_HMETA_SW_BMSK) |            \
+       (u32)(QCN_SDIO_HMETA_FMT_VER & QCN_SDIO_HMETA_VER_BMSK))
+#elif (QCN_SDIO_META_VER_1)
+#define        META_INFO(even, data)                                             \
+       ((u32)(((u32)event << QCN_SDIO_HMETA_EVENT_SHFT) &                \
+       QCN_SDIO_HMETA_EVENT_BMSK) | (u32)(((u32)data <<                  \
+       QCN_SDIO_HMETA_DATA_SHFT) & QCN_SDIO_HMETA_DATA_BMSK))
+#endif
+
+#define        SDIO_RW_OFFSET          31
+#define        SDIO_RW_MASK            1
+#define        SDIO_FUNCTION_OFFSET    28
+#define        SDIO_FUNCTION_MASK      7
+#define        SDIO_MODE_OFFSET        27
+#define        SDIO_MODE_MASK          1
+#define        SDIO_OPCODE_OFFSET      26
+#define        SDIO_OPCODE_MASK        1
+#define        SDIO_ADDRESS_OFFSET     9
+#define        SDIO_ADDRESS_MASK       0x1FFFF
+#define        SDIO_RAW_OFFSET         27
+#define        SDIO_RAW_MASK           1
+#define        SDIO_STUFF_OFFSET1      26
+#define        SDIO_STUFF_OFFSET2      8
+#define        SDIO_STUFF_MASK         1
+#define        SDIO_BLOCKSZ_MASK       0x1FF
+#define        SDIO_DATA_MASK          0xFF
+
+static inline
+void qcn_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, u8 mode, u8 opcode,
+                                                       u32 addr, u16 blksz)
+{
+       *arg = (((rw & SDIO_RW_MASK) << SDIO_RW_OFFSET) |
+               ((func & SDIO_FUNCTION_MASK) << SDIO_FUNCTION_OFFSET) |
+               ((mode & SDIO_MODE_MASK) << SDIO_MODE_OFFSET) |
+               ((opcode & SDIO_OPCODE_MASK) << SDIO_OPCODE_OFFSET) |
+               ((addr & SDIO_ADDRESS_MASK) << SDIO_ADDRESS_OFFSET) |
+               (blksz & SDIO_BLOCKSZ_MASK));
+}
+
+static inline
+void qcn_sdio_set_cmd52_arg(u32 *arg, u8 rw, u8 func, u8 raw, u32 addr, u8 val)
+{
+       *arg = ((rw & SDIO_RW_MASK) << SDIO_RW_OFFSET) |
+               ((func & SDIO_FUNCTION_MASK) << SDIO_FUNCTION_OFFSET) |
+               ((raw & SDIO_RAW_MASK) << SDIO_RAW_OFFSET) |
+               (SDIO_STUFF_MASK << SDIO_STUFF_OFFSET1) |
+               ((addr & SDIO_ADDRESS_MASK) << SDIO_ADDRESS_OFFSET) |
+               (SDIO_STUFF_MASK << SDIO_STUFF_OFFSET2) |
+               (val & SDIO_DATA_MASK);
+}
+
+static void qcn_sdio_free_rw_req(struct qcn_sdio_rw_info *rw_req)
+{
+       spin_lock(&sdio_ctxt->lock_free_q);
+       list_add_tail(&rw_req->list, &sdio_ctxt->rw_free_q);
+       atomic_inc(&sdio_ctxt->free_list_count);
+       spin_unlock(&sdio_ctxt->lock_free_q);
+}
+
+static void qcn_sdio_purge_rw_buff(void)
+{
+       struct qcn_sdio_rw_info *rw_req = NULL;
+
+       while (!list_empty(&sdio_ctxt->rw_wait_q)) {
+               rw_req = list_first_entry(&sdio_ctxt->rw_wait_q,
+                                               struct qcn_sdio_rw_info, list);
+               list_del(&rw_req->list);
+               qcn_sdio_free_rw_req(rw_req);
+       }
+}
+
+void qcn_sdio_client_probe_complete(int id)
+{
+       complete(&client_probe_complete);
+}
+EXPORT_SYMBOL(qcn_sdio_client_probe_complete);
+
+static struct qcn_sdio_rw_info *qcn_sdio_alloc_rw_req(void)
+{
+       struct qcn_sdio_rw_info *rw_req = NULL;
+
+       spin_lock(&sdio_ctxt->lock_free_q);
+       if (list_empty(&sdio_ctxt->rw_free_q)) {
+               spin_unlock(&sdio_ctxt->lock_free_q);
+               return rw_req;
+       }
+
+       rw_req = list_first_entry(&sdio_ctxt->rw_free_q,
+                                               struct qcn_sdio_rw_info, list);
+       list_del(&rw_req->list);
+       atomic_dec(&sdio_ctxt->free_list_count);
+       spin_unlock(&sdio_ctxt->lock_free_q);
+
+       return rw_req;
+}
+
+static void qcn_sdio_add_rw_req(struct qcn_sdio_rw_info *rw_req)
+{
+       spin_lock_bh(&sdio_ctxt->lock_wait_q);
+       list_add_tail(&rw_req->list, &sdio_ctxt->rw_wait_q);
+       atomic_inc(&sdio_ctxt->wait_list_count);
+       spin_unlock_bh(&sdio_ctxt->lock_wait_q);
+}
+
+static int qcn_enable_async_irq(bool enable)
+{
+       unsigned int num = 0;
+       int ret = 0;
+       u32 data = 0;
+
+       num = sdio_ctxt->func->num;
+       sdio_claim_host(sdio_ctxt->func);
+       sdio_ctxt->func->num = 0;
+       data = sdio_readb(sdio_ctxt->func, SDIO_CCCR_INTERRUPT_EXTENSION, NULL);
+       if (enable)
+               data |= SDIO_ENABLE_ASYNC_INTR;
+       else
+               data &= ~SDIO_ENABLE_ASYNC_INTR;
+       sdio_writeb(sdio_ctxt->func, data, SDIO_CCCR_INTERRUPT_EXTENSION, &ret);
+       sdio_ctxt->func->num = num;
+       sdio_release_host(sdio_ctxt->func);
+
+       return ret;
+}
+
+static int qcn_send_io_abort(void)
+{
+       unsigned int num = 0;
+       int ret = 0;
+
+       num = sdio_ctxt->func->num;
+       sdio_claim_host(sdio_ctxt->func);
+       sdio_ctxt->func->num = 0;
+       sdio_writeb(sdio_ctxt->func, 0x1, SDIO_CCCR_ABORT, &ret);
+       sdio_ctxt->func->num = num;
+       sdio_release_host(sdio_ctxt->func);
+
+       return ret;
+}
+
+static int qcn_send_meta_info(u8 event, u32 data)
+{
+       int ret = 0;
+       u32 i = 0;
+       u32 value = 0;
+       u8 temp = 0;
+
+       value = META_INFO(event, data);
+
+       sdio_claim_host(sdio_ctxt->func);
+       if (sdio_ctxt->curr_sw_mode < QCN_SDIO_SW_SBL) {
+               for (i = 0; i < 4; i++) {
+                       temp = (u8)((value >> (i * 8)) & 0x000000FF);
+                       sdio_writeb(sdio_ctxt->func, temp,
+                                               (SDIO_QCN_HRQ_PUSH + i), &ret);
+               }
+       } else {
+               sdio_writel(sdio_ctxt->func, value, SDIO_QCN_HRQ_PUSH, &ret);
+       }
+
+       sdio_release_host(sdio_ctxt->func);
+
+       return ret;
+}
+
+static int qcn_read_crq_info(void)
+{
+       int ret = 0;
+       u32 i = 0;
+       u32 temp = 0;
+       u32 data = 0;
+       u32 len = 0;
+       u8 cid = 0;
+
+       struct sdio_al_channel_handle *ch_handle = NULL;
+
+       sdio_claim_host(sdio_ctxt->func);
+       if (sdio_ctxt->curr_sw_mode < QCN_SDIO_SW_SBL) {
+               for (i = 0; i < 4; i++) {
+                       temp = sdio_readb(sdio_ctxt->func,
+                                               (SDIO_QCN_CRQ_PULL + i), &ret);
+                       temp = temp << (i * 8);
+                       data |= temp;
+               }
+       } else {
+               data = sdio_readl(sdio_ctxt->func, SDIO_QCN_CRQ_PULL, &ret);
+       }
+
+       sdio_release_host(sdio_ctxt->func);
+       if (ret)
+               return ret;
+
+       if (data & SDIO_QCN_CRQ_PULL_TRANS_MASK) {
+               cid = (u8)(data & SDIO_QCN_CRQ_PULL_CH_NUM_MASK);
+               cid -= sdio_ctxt->rx_cnum_base;
+               len = (data & SDIO_QCN_CRQ_PULL_BLK_CNT_MASK) >>
+                       SDIO_QCN_CRQ_PULL_BLK_CNT_SHIFT;
+
+               if (data & SDIO_QCN_CRQ_PULL_BLK_MASK)
+                       len *= sdio_ctxt->func->cur_blksize;
+               temp = (data & SDIO_QCN_CRQ_PULL_UD_MASK) >>
+                                               SDIO_QCN_CRQ_PULL_UD_SHIFT;
+
+               if (!sdio_ctxt->ch[cid]) {
+                       pr_err("Client Id not initialized\n");
+                       return -EINVAL;
+               }
+               switch (temp) {
+               case QCN_SDIO_CRQ_START:
+                       sdio_ctxt->ch[cid]->crq_len = len;
+                       return ret;
+               case QCN_SDIO_CRQ_END:
+                       sdio_ctxt->ch[cid]->crq_len += len;
+                       break;
+               default:
+                       sdio_ctxt->ch[cid]->crq_len = len;
+               }
+
+               ch_handle = &(sdio_ctxt->ch[cid]->ch_handle);
+               if (sdio_ctxt->ch[cid]->ch_data.dl_data_avail_cb)
+                       sdio_ctxt->ch[cid]->ch_data.dl_data_avail_cb(ch_handle,
+                                       sdio_ctxt->ch[cid]->crq_len);
+       }
+
+       return ret;
+}
+
+static int qcn_sdio_config(struct qcn_sdio_client_info *cinfo)
+{
+       int ret = 0;
+       u32 data = 0;
+
+       sdio_claim_host(sdio_ctxt->func);
+       ret = sdio_set_block_size(sdio_ctxt->func,
+                                 cinfo->cli_handle.block_size);
+
+       if (ret) {
+               sdio_release_host(sdio_ctxt->func);
+               goto err;
+       }
+
+       data = SDIO_QCN_CONFIG_QE_MASK;
+
+       sdio_writeb(sdio_ctxt->func, (u8)data, SDIO_QCN_CONFIG, &ret);
+       if (ret) {
+               sdio_release_host(sdio_ctxt->func);
+               goto err;
+       }
+
+       data = (SDIO_QCN_IRQ_EN_LOCAL_MASK |
+                       SDIO_QCN_IRQ_EN_SYS_ERR_MASK |
+                       SDIO_QCN_IRQ_UNDERFLOW_MASK |
+                       SDIO_QCN_IRQ_OVERFLOW_MASK |
+                       SDIO_QCN_IRQ_CH_MISMATCH_MASK |
+                       SDIO_QCN_IRQ_CRQ_READY_MASK);
+
+       sdio_writeb(sdio_ctxt->func, (u8)data, SDIO_QCN_IRQ_EN, &ret);
+       sdio_release_host(sdio_ctxt->func);
+       if (ret) {
+               pr_err("%s: failed write config\n", __func__);
+               goto err;
+       }
+
+       sdio_ctxt->rx_addr_base = SDIO_QCN_MC_DMA0_RX_CH0;
+       sdio_ctxt->rx_cnum_base = QCN_SDIO_DMA0_RX_CNUM;
+       sdio_ctxt->tx_addr_base = SDIO_QCN_MC_DMA1_TX_CH0;
+       sdio_ctxt->tx_cnum_base = QCN_SDIO_DMA1_TX_CNUM;
+
+#if (QCN_SDIO_META_VER_0)
+       data = ((cinfo->cli_handle.block_size / 8) - 1);
+#elif (QCN_SDIO_META_VER_1)
+       data = cinfo->cli_handle.block_size;
+#endif
+       ret = qcn_send_meta_info(QCN_SDIO_BLK_SZ_HEVENT, data);
+err:
+       return ret;
+}
+
+
+int qcn_sw_mode_change(enum qcn_sdio_sw_mode mode)
+{
+       struct qcn_sdio_client_info *cinfo = NULL;
+       struct qcn_sdio_ch_info *chinfo = NULL;
+       int ret = 0;
+
+       if (!(mode) && !(mode < QCN_SDIO_SW_MAX))
+               return -EINVAL;
+
+       if (sdio_ctxt->curr_sw_mode == mode)
+               return 0;
+
+       if ((sdio_ctxt->curr_sw_mode == QCN_SDIO_SW_PBL) &&
+                                               (mode == QCN_SDIO_SW_SBL)) {
+               sdio_ctxt->curr_sw_mode = QCN_SDIO_SW_SBL;
+               qcn_send_meta_info(QCN_SDIO_BLK_SZ_HEVENT,
+                                               sdio_ctxt->func->cur_blksize);
+               qcn_send_meta_info(QCN_SDIO_DOORBELL_HEVENT, (u32)0);
+               return 0;
+       }
+
+       switch (sdio_ctxt->curr_sw_mode) {
+       case QCN_SDIO_SW_PBL:
+       case QCN_SDIO_SW_SBL:
+       case QCN_SDIO_SW_RDDM:
+               mutex_lock(&lock);
+               list_for_each_entry(cinfo, &cinfo_head, cli_list) {
+                       while (!list_empty(&cinfo->ch_head)) {
+                               chinfo = list_first_entry(&cinfo->ch_head,
+                                             struct qcn_sdio_ch_info, ch_list);
+                               sdio_al_deregister_channel(&chinfo->ch_handle);
+                       }
+                       cinfo->cli_handle.func = NULL;
+
+                       if (cinfo->is_probed) {
+                               cinfo->cli_data.remove(&cinfo->cli_handle);
+                               cinfo->is_probed = 0;
+                       }
+                       if (((cinfo->cli_handle.id == QCN_SDIO_CLI_ID_WLAN) ||
+                            (cinfo->cli_handle.id == QCN_SDIO_CLI_ID_QMI) ||
+                            (cinfo->cli_handle.id == QCN_SDIO_CLI_ID_DIAG)) &&
+                            (mode == QCN_SDIO_SW_MROM)) {
+                               qcn_send_meta_info((u8)QCN_SDIO_SW_MODE_HEVENT,
+                                               (u32)(mode | QCN_SDIO_MAJOR_VER
+                                               | QCN_SDIO_MINOR_VER));
+                               cinfo->cli_handle.block_size =
+                                                       QCN_SDIO_MROM_BLK_SZ;
+                               cinfo->cli_handle.func = sdio_ctxt->func;
+                               qcn_sdio_config(cinfo);
+                               cinfo->is_probed = !cinfo->cli_data.probe(
+                                                       &cinfo->cli_handle);
+                               qcn_send_meta_info(QCN_SDIO_DOORBELL_HEVENT,
+                                                                       (u32)0);
+                       }
+               }
+               mutex_unlock(&lock);
+               break;
+       case QCN_SDIO_SW_RESET:
+       case QCN_SDIO_SW_MROM:
+               ret = wait_for_completion_timeout(&client_probe_complete,
+                                                       msecs_to_jiffies(3000));
+               if (!ret)
+                       pr_err("Timeout waiting for clients\n");
+
+               mutex_lock(&lock);
+               list_for_each_entry(cinfo, &cinfo_head, cli_list) {
+                       while (!list_empty(&cinfo->ch_head)) {
+                               chinfo = list_first_entry(&cinfo->ch_head,
+                                             struct qcn_sdio_ch_info, ch_list);
+                               sdio_al_deregister_channel(&chinfo->ch_handle);
+                       }
+                       cinfo->cli_handle.func = NULL;
+
+
+                       if (cinfo->is_probed) {
+                               cinfo->cli_data.remove(&cinfo->cli_handle);
+                               cinfo->is_probed = 0;
+                       }
+
+                       if ((cinfo->cli_handle.id == QCN_SDIO_CLI_ID_TTY) &&
+                                                  (mode <= QCN_SDIO_SW_MROM)) {
+                               qcn_send_meta_info((u8)QCN_SDIO_SW_MODE_HEVENT,
+                                               (u32)(mode | QCN_SDIO_MAJOR_VER
+                                               | QCN_SDIO_MINOR_VER));
+                               cinfo->cli_handle.block_size =
+                                                       QCN_SDIO_TTY_BLK_SZ;
+                               cinfo->cli_handle.func = sdio_ctxt->func;
+                               qcn_sdio_config(cinfo);
+                               cinfo->is_probed = !cinfo->cli_data.probe(
+                                                       &cinfo->cli_handle);
+                               qcn_send_meta_info(QCN_SDIO_DOORBELL_HEVENT,
+                                                                       (u32)0);
+                       }
+               }
+               mutex_unlock(&lock);
+               break;
+       default:
+               pr_err("Invalid mode\n");
+       }
+
+       driver_state = mode;
+       sdio_ctxt->curr_sw_mode = mode;
+       return 0;
+}
+
+static int qcn_read_meta_info(void)
+{
+       int ret = 0;
+       u32 i = 0;
+       u32 data = 0;
+       u32 temp = 0;
+
+       sdio_claim_host(sdio_ctxt->func);
+
+       if (sdio_ctxt->curr_sw_mode < QCN_SDIO_SW_SBL) {
+               for (i = 0; i < 4; i++) {
+                       temp = sdio_readb(sdio_ctxt->func,
+                                       (SDIO_QCN_LOCAL_INFO + i), &ret);
+                       temp = temp << (i * 8);
+                       data |= temp;
+               }
+       } else {
+               data = sdio_readl(sdio_ctxt->func, SDIO_QCN_LOCAL_INFO, &ret);
+       }
+
+       sdio_writeb(sdio_ctxt->func, (u8)SDIO_QCN_IRQ_CLR_LOCAL_MASK,
+                   SDIO_QCN_IRQ_CLR, NULL);
+
+       sdio_release_host(sdio_ctxt->func);
+
+       if (ret)
+               return ret;
+
+       temp = (data & QCN_SDIO_LMETA_EVENT_BMSK) >> QCN_SDIO_LMETA_EVENT_SHFT;
+       switch (temp) {
+       case QCN_SDIO_SW_MODE_LEVENT:
+               temp = (data & QCN_SDIO_LMETA_SW_BMSK) >>
+                                               QCN_SDIO_LMETA_SW_SHFT;
+               qcn_sw_mode_change((enum qcn_sdio_sw_mode)temp);
+               break;
+       default:
+               if ((temp >= QCN_SDIO_META_START_CH0) &&
+                               (temp < QCN_SDIO_META_START_CH1)) {
+                       if (sdio_ctxt->ch[0] &&
+                               sdio_ctxt->ch[0]->ch_data.dl_meta_data_cb)
+                               sdio_ctxt->ch[0]->ch_data.dl_meta_data_cb(
+                                       &(sdio_ctxt->ch[0]->ch_handle), data);
+               } else if ((temp >= QCN_SDIO_META_START_CH1) &&
+                       (temp < QCN_SDIO_META_START_CH2)) {
+                       if (sdio_ctxt->ch[1] &&
+                               sdio_ctxt->ch[1]->ch_data.dl_meta_data_cb)
+                               sdio_ctxt->ch[1]->ch_data.dl_meta_data_cb(
+                                       &(sdio_ctxt->ch[1]->ch_handle), data);
+               } else if ((temp >= QCN_SDIO_META_START_CH2) &&
+                               (temp < QCN_SDIO_META_START_CH3)) {
+                       if (sdio_ctxt->ch[2] &&
+                               sdio_ctxt->ch[2]->ch_data.dl_meta_data_cb)
+                               sdio_ctxt->ch[2]->ch_data.dl_meta_data_cb(
+                                       &(sdio_ctxt->ch[2]->ch_handle), data);
+               } else if ((temp >= QCN_SDIO_META_START_CH3) &&
+                                       (temp < QCN_SDIO_META_END)) {
+                       if (sdio_ctxt->ch[3] &&
+                               sdio_ctxt->ch[3]->ch_data.dl_meta_data_cb)
+                               sdio_ctxt->ch[3]->ch_data.dl_meta_data_cb(
+                                       &(sdio_ctxt->ch[3]->ch_handle), data);
+               } else {
+                       ret = -EINVAL;
+               }
+       }
+
+       return ret;
+}
+
+static int reset_thread(void *data)
+{
+       qcn_sdio_purge_rw_buff();
+       qcn_sdio_card_state(false);
+       qcn_sdio_card_state(true);
+       kthread_stop(reset_task);
+       reset_task = NULL;
+
+       return 0;
+}
+
+static void qcn_sdio_irq_handler(struct sdio_func *func)
+{
+       u8 data = 0;
+       int ret = 0;
+
+       sdio_claim_host(sdio_ctxt->func);
+       data = sdio_readb(sdio_ctxt->func, SDIO_QCN_IRQ_STATUS, &ret);
+       if (ret == -ETIMEDOUT) {
+               sdio_release_host(sdio_ctxt->func);
+
+               pr_err("%s: IRQ status read error ret = %d\n", __func__, ret);
+
+               reset_task = kthread_run(reset_thread, NULL, "qcn_reset");
+               if (IS_ERR(reset_task))
+                       pr_err("Failed to run qcn_reset thread\n");
+
+               return;
+       }
+       sdio_release_host(sdio_ctxt->func);
+
+       if (data & SDIO_QCN_IRQ_CRQ_READY_MASK) {
+               qcn_read_crq_info();
+       } else if (data & SDIO_QCN_IRQ_LOCAL_MASK) {
+               qcn_read_meta_info();
+       } else if (data & SDIO_QCN_IRQ_EN_SYS_ERR_MASK) {
+               sdio_claim_host(sdio_ctxt->func);
+               sdio_writeb(sdio_ctxt->func, (u8)SDIO_QCN_IRQ_CLR_SYS_ERR_MASK,
+                               SDIO_QCN_IRQ_CLR, NULL);
+               sdio_release_host(sdio_ctxt->func);
+               pr_err("%s: sys_err interrupt triggered\n", __func__);
+       } else if (data & SDIO_QCN_IRQ_EN_UNDERFLOW_MASK) {
+               sdio_claim_host(sdio_ctxt->func);
+               sdio_writeb(sdio_ctxt->func,
+                                       (u8)SDIO_QCN_IRQ_CLR_UNDERFLOW_MASK,
+                                       SDIO_QCN_IRQ_CLR, NULL);
+               sdio_release_host(sdio_ctxt->func);
+               pr_err("%s: underflow interrupt triggered\n", __func__);
+       } else if (data & SDIO_QCN_IRQ_EN_OVERFLOW_MASK) {
+               sdio_claim_host(sdio_ctxt->func);
+               sdio_writeb(sdio_ctxt->func, (u8)SDIO_QCN_IRQ_CLR_OVERFLOW_MASK,
+                               SDIO_QCN_IRQ_CLR, NULL);
+               sdio_release_host(sdio_ctxt->func);
+               pr_err("%s: overflow interrupt triggered\n", __func__);
+       } else if (data & SDIO_QCN_IRQ_EN_CH_MISMATCH_MASK) {
+               sdio_claim_host(sdio_ctxt->func);
+               sdio_writeb(sdio_ctxt->func,
+                                       (u8)SDIO_QCN_IRQ_CLR_CH_MISMATCH_MASK,
+                                       SDIO_QCN_IRQ_CLR, NULL);
+               sdio_release_host(sdio_ctxt->func);
+               pr_err("%s: channel mismatch interrupt triggered\n", __func__);
+       } else {
+               sdio_claim_host(sdio_ctxt->func);
+               sdio_writeb(sdio_ctxt->func, (u8)data, SDIO_QCN_IRQ_CLR, NULL);
+               sdio_release_host(sdio_ctxt->func);
+       }
+}
+
+static int qcn_sdio_send_buff(u32 cid, void *buff, size_t len)
+{
+       int ret = 0;
+
+       sdio_claim_host(sdio_ctxt->func);
+       ret = sdio_writesb(sdio_ctxt->func,
+                       (sdio_ctxt->tx_addr_base + (cid * (u32)4)), buff, len);
+
+       if (ret)
+               qcn_send_io_abort();
+
+       sdio_release_host(sdio_ctxt->func);
+
+       return ret;
+}
+
+static int qcn_sdio_recv_buff(u32 cid, void *buff, size_t len)
+{
+       int ret = 0;
+
+       sdio_claim_host(sdio_ctxt->func);
+       ret = sdio_readsb(sdio_ctxt->func, buff,
+                       (sdio_ctxt->rx_addr_base + (cid * (u32)4)), len);
+
+       if (ret)
+               qcn_send_io_abort();
+
+       sdio_release_host(sdio_ctxt->func);
+
+       return ret;
+}
+
+static void qcn_sdio_rw_work(struct work_struct *work)
+{
+       int ret = 0;
+       struct qcn_sdio_rw_info *rw_req = NULL;
+       struct sdio_al_xfer_result *result = NULL;
+       struct sdio_al_channel_handle *ch_handle = NULL;
+
+       while (1) {
+               spin_lock_bh(&sdio_ctxt->lock_wait_q);
+               if (list_empty(&sdio_ctxt->rw_wait_q)) {
+                       spin_unlock_bh(&sdio_ctxt->lock_wait_q);
+                       break;
+               }
+               rw_req = list_first_entry(&sdio_ctxt->rw_wait_q,
+                                               struct qcn_sdio_rw_info, list);
+               list_del(&rw_req->list);
+               spin_unlock_bh(&sdio_ctxt->lock_wait_q);
+
+               if (rw_req->dir) {
+                       ret = qcn_sdio_recv_buff(rw_req->cid, rw_req->buf,
+                                                               rw_req->len);
+                       if (rx_dump)
+                               HEX_DUMP("ASYNC_RECV: ", rw_req->buf,
+                                                               rw_req->len);
+               } else {
+                       ret = qcn_sdio_send_buff(rw_req->cid, rw_req->buf,
+                                                               rw_req->len);
+                       if (tx_dump)
+                               HEX_DUMP("ASYNC_SEND: ", rw_req->buf,
+                                                               rw_req->len);
+               }
+
+               ch_handle = &sdio_ctxt->ch[rw_req->cid]->ch_handle;
+               result = &sdio_ctxt->ch[rw_req->cid]->result;
+               result->xfer_status = ret;
+               result->buf_addr = rw_req->buf;
+               result->xfer_len = rw_req->len;
+               if (rw_req->dir)
+                       sdio_ctxt->ch[rw_req->cid]->ch_data.dl_xfer_cb(
+                                       ch_handle, result, rw_req->ctxt);
+               else
+                       sdio_ctxt->ch[rw_req->cid]->ch_data.ul_xfer_cb(
+                                       ch_handle, result, rw_req->ctxt);
+               atomic_set(&sdio_ctxt->ch_status[rw_req->cid], 0);
+               qcn_sdio_free_rw_req(rw_req);
+               atomic_dec(&sdio_ctxt->wait_list_count);
+       }
+}
+
+static
+int qcn_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+{
+       int ret = 0;
+
+       sdio_ctxt = kzalloc(sizeof(struct qcn_sdio), GFP_KERNEL);
+       if (!sdio_ctxt)
+               return -ENOMEM;
+
+       sdio_ctxt->func = func;
+       sdio_ctxt->id = id;
+       sdio_set_drvdata(func, sdio_ctxt);
+       sdio_ctxt->qcn_sdio_wq = create_singlethread_workqueue("qcn_sdio");
+       if (!sdio_ctxt->qcn_sdio_wq) {
+               pr_err("%s: Error: SDIO create wq\n", __func__);
+               goto err;
+       }
+
+       for (ret = 0; ret < QCN_SDIO_CH_MAX; ret++) {
+               sdio_ctxt->ch[ret] = NULL;
+               atomic_set(&sdio_ctxt->ch_status[ret], -1);
+       }
+
+       spin_lock_init(&sdio_ctxt->lock_free_q);
+       spin_lock_init(&sdio_ctxt->lock_wait_q);
+       spin_lock_init(&async_lock);
+       INIT_WORK(&sdio_ctxt->sdio_rw_w, qcn_sdio_rw_work);
+       INIT_LIST_HEAD(&sdio_ctxt->rw_free_q);
+       INIT_LIST_HEAD(&sdio_ctxt->rw_wait_q);
+
+       for (ret = 0; ret < QCN_SDIO_RW_REQ_MAX; ret++)
+               qcn_sdio_free_rw_req(&sdio_ctxt->rw_req_info[ret]);
+
+       sdio_claim_host(sdio_ctxt->func);
+       ret = sdio_enable_func(sdio_ctxt->func);
+       if (ret) {
+               pr_err("%s: Error:%d SDIO enable func\n", __func__, ret);
+               sdio_release_host(sdio_ctxt->func);
+               goto err;
+       }
+       ret = sdio_claim_irq(sdio_ctxt->func, qcn_sdio_irq_handler);
+       if (ret) {
+               pr_err("%s: Error:%d SDIO claim irq\n", __func__, ret);
+               sdio_release_host(sdio_ctxt->func);
+               goto err;
+       }
+
+       qcn_enable_async_irq(true);
+       sdio_release_host(sdio_ctxt->func);
+
+       if (qcn_read_meta_info()) {
+               pr_err("%s: Error: SDIO Config\n", __func__);
+               qcn_send_meta_info((u8)QCN_SDIO_SW_MODE_HEVENT, (u32)0);
+       }
+
+       current_host = func->card->host;
+
+       if (!retune) {
+               pr_debug("%s Probing driver with retune disabled\n", __func__);
+               mmc_retune_disable(current_host);
+       }
+
+       atomic_set(&xport_status, 1);
+
+       return 0;
+err:
+       kfree(sdio_ctxt);
+       sdio_ctxt = NULL;
+       return ret;
+}
+
+static void qcn_sdio_remove(struct sdio_func *func)
+{
+       struct qcn_sdio_client_info *cinfo = NULL;
+       struct qcn_sdio_ch_info *ch_info = NULL;
+
+       atomic_set(&xport_status, 0);
+       sdio_claim_host(sdio_ctxt->func);
+       qcn_enable_async_irq(false);
+       sdio_release_irq(sdio_ctxt->func);
+       sdio_release_host(sdio_ctxt->func);
+
+       qcn_sdio_purge_rw_buff();
+
+       destroy_workqueue(sdio_ctxt->qcn_sdio_wq);
+       mutex_lock(&lock);
+       list_for_each_entry(cinfo, &cinfo_head, cli_list) {
+               while (!list_empty(&cinfo->ch_head)) {
+                       ch_info = list_first_entry(&cinfo->ch_head,
+                                       struct qcn_sdio_ch_info, ch_list);
+                       sdio_al_deregister_channel(&ch_info->ch_handle);
+               }
+               mutex_unlock(&lock);
+               if (cinfo->is_probed) {
+                       cinfo->cli_data.remove(&cinfo->cli_handle);
+                       cinfo->is_probed = 0;
+               }
+               mutex_lock(&lock);
+       }
+       mutex_unlock(&lock);
+
+       kfree(sdio_ctxt);
+       sdio_ctxt = NULL;
+       mmc_retune_enable(current_host);
+}
+
+static const struct sdio_device_id qcn_sdio_devices[] = {
+       {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCN_BASE | 0x0))},
+       {},
+};
+
+MODULE_DEVICE_TABLE(sdio, qcn_sdio_devices);
+
+static struct sdio_driver qcn_sdio_driver = {
+       .name = "qcn_sdio",
+       .id_table = qcn_sdio_devices,
+       .probe = qcn_sdio_probe,
+       .remove = qcn_sdio_remove,
+};
+
+static int qcn_sdio_plat_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+
+       mutex_init(&lock);
+       INIT_LIST_HEAD(&cinfo_head);
+       atomic_set(&status, 1);
+
+       ret = sdio_register_driver(&qcn_sdio_driver);
+       if (ret) {
+               pr_err("%s: SDIO driver registration failed: %d\n", __func__,
+                                                                       ret);
+               mutex_destroy(&lock);
+               atomic_set(&status, 0);
+       }
+
+       init_completion(&client_probe_complete);
+
+       qcn_create_sysfs(&pdev->dev);
+
+       return ret;
+}
+
+static int qcn_sdio_plat_remove(struct platform_device *pdev)
+{
+       struct qcn_sdio_client_info *cinfo = NULL;
+
+       mutex_lock(&lock);
+       while (!list_empty(&cinfo_head)) {
+               cinfo = list_first_entry(&cinfo_head, struct
+                                               qcn_sdio_client_info, cli_list);
+               mutex_unlock(&lock);
+               sdio_al_deregister_client(&cinfo->cli_handle);
+               mutex_lock(&lock);
+               list_del(&cinfo->cli_list);
+       }
+       mutex_unlock(&lock);
+       mutex_destroy(&lock);
+       if (sdio_ctxt) {
+               destroy_workqueue(sdio_ctxt->qcn_sdio_wq);
+               sdio_release_irq(sdio_ctxt->func);
+               kfree(sdio_ctxt);
+               sdio_ctxt = NULL;
+       }
+       sdio_unregister_driver(&qcn_sdio_driver);
+       atomic_set(&status, 0);
+
+       return 0;
+}
+
+static const struct of_device_id qcn_sdio_dt_match[] = {
+       {.compatible = "qcom,qcn-sdio"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, qcn_sdio_dt_match);
+
+static struct platform_driver qcn_sdio_plat_driver = {
+       .probe  = qcn_sdio_plat_probe,
+       .remove = qcn_sdio_plat_remove,
+       .driver = {
+               .name = "qcn-sdio",
+               .owner = THIS_MODULE,
+               .of_match_table = qcn_sdio_dt_match,
+       },
+};
+
+static int __init qcn_sdio_init(void)
+{
+       return platform_driver_register(&qcn_sdio_plat_driver);
+}
+
+static void __exit qcn_sdio_exit(void)
+{
+       platform_driver_unregister(&qcn_sdio_plat_driver);
+}
+
+module_init(qcn_sdio_init);
+module_exit(qcn_sdio_exit);
+
+int sdio_al_is_ready(void)
+{
+       if (atomic_read(&status))
+               return 0;
+       else
+               return -EBUSY;
+}
+EXPORT_SYMBOL(sdio_al_is_ready);
+
+struct sdio_al_client_handle *sdio_al_register_client(
+                                       struct sdio_al_client_data *client_data)
+{
+       struct qcn_sdio_client_info *client_info = NULL;
+
+       if (!((client_data) && (client_data->name) &&
+                       (client_data->probe) && (client_data->remove))) {
+               pr_err("%s: SDIO: Invalid param\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       client_info = (struct qcn_sdio_client_info *)
+               kzalloc(sizeof(struct qcn_sdio_client_info), GFP_KERNEL);
+       if (!client_info)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&client_info->cli_data, client_data,
+                                       sizeof(struct sdio_al_client_data));
+
+       if (!strcmp(client_data->name, "SDIO_AL_CLIENT_TTY")) {
+               client_info->cli_handle.id = QCN_SDIO_CLI_ID_TTY;
+               client_info->cli_handle.block_size = QCN_SDIO_TTY_BLK_SZ;
+       } else if (!strcmp(client_data->name, "SDIO_AL_CLIENT_WLAN")) {
+               client_info->cli_handle.id = QCN_SDIO_CLI_ID_WLAN;
+               client_info->cli_handle.block_size = QCN_SDIO_MROM_BLK_SZ;
+       } else if (!strcmp(client_data->name, "SDIO_AL_CLIENT_QMI")) {
+               client_info->cli_handle.id = QCN_SDIO_CLI_ID_QMI;
+               client_info->cli_handle.block_size = QCN_SDIO_MROM_BLK_SZ;
+       } else if (!strcmp(client_data->name, "SDIO_AL_CLIENT_DIAG")) {
+               client_info->cli_handle.id = QCN_SDIO_CLI_ID_DIAG;
+               client_info->cli_handle.block_size = QCN_SDIO_MROM_BLK_SZ;
+       } else {
+               pr_err("%s: SDIO: Invalid name\n", __func__);
+               kfree(client_info);
+               return ERR_PTR(-EINVAL);
+       }
+       client_info->cli_handle.client_data = &client_info->cli_data;
+
+       INIT_LIST_HEAD(&client_info->ch_head);
+       mutex_lock(&lock);
+       list_add_tail(&client_info->cli_list, &cinfo_head);
+       mutex_unlock(&lock);
+
+       client_info->is_probed = 0;
+       if ((sdio_ctxt) && (sdio_ctxt->curr_sw_mode)) {
+               if ((sdio_ctxt->curr_sw_mode == QCN_SDIO_SW_MROM) &&
+                       (client_info->cli_handle.id > QCN_SDIO_CLI_ID_TTY)) {
+                       qcn_sdio_config(client_info);
+                       client_info->is_probed = !client_data->probe(
+                                               &client_info->cli_handle);
+                       qcn_send_meta_info(QCN_SDIO_DOORBELL_HEVENT, (u32)0);
+               }
+       }
+
+       return &client_info->cli_handle;
+}
+EXPORT_SYMBOL(sdio_al_register_client);
+
+void sdio_al_deregister_client(struct sdio_al_client_handle *handle)
+{
+       struct qcn_sdio_ch_info *ch_info = NULL;
+       struct qcn_sdio_client_info *client_info = NULL;
+
+       if (!handle) {
+               pr_err("%s: SDIO: Invalid param\n", __func__);
+               return;
+       }
+
+       client_info = container_of(handle, struct qcn_sdio_client_info,
+                                                               cli_handle);
+
+       while (!list_empty(&client_info->ch_head)) {
+               ch_info = list_first_entry(&client_info->ch_head,
+                                       struct qcn_sdio_ch_info, ch_list);
+               sdio_al_deregister_channel(&ch_info->ch_handle);
+       }
+       mutex_lock(&lock);
+       list_del(&client_info->cli_list);
+       kfree(client_info);
+       mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(sdio_al_deregister_client);
+
+struct sdio_al_channel_handle *sdio_al_register_channel(
+               struct sdio_al_client_handle *client_handle,
+               struct sdio_al_channel_data *channel_data)
+{
+       struct qcn_sdio_ch_info *ch_info = NULL;
+       struct qcn_sdio_client_info *client_info = NULL;
+
+       if (!((channel_data) && (channel_data->name) && (client_handle) &&
+                               (channel_data->client_data))) {
+               pr_err("%s: SDIO: Invalid param\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       ch_info = kzalloc(sizeof(struct qcn_sdio_ch_info), GFP_KERNEL);
+       if (!ch_info)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&ch_info->ch_data, channel_data,
+                                       sizeof(struct sdio_al_channel_data));
+
+       if ((!strcmp(channel_data->name, "SDIO_AL_WLAN_CH0")) ||
+                       (!strcmp(channel_data->name, "SDIO_AL_TTY_CH0"))) {
+               if (atomic_read(&sdio_ctxt->ch_status[QCN_SDIO_CH_0]) < 0)
+                       ch_info->ch_handle.channel_id = QCN_SDIO_CH_0;
+       } else if (!strcmp(channel_data->name, "SDIO_AL_WLAN_CH1")) {
+               if (atomic_read(&sdio_ctxt->ch_status[QCN_SDIO_CH_1]) < 0)
+                       ch_info->ch_handle.channel_id = QCN_SDIO_CH_1;
+       } else if (!strcmp(channel_data->name, "SDIO_AL_QMI_CH0")) {
+               if (atomic_read(&sdio_ctxt->ch_status[QCN_SDIO_CH_2]) < 0)
+                       ch_info->ch_handle.channel_id = QCN_SDIO_CH_2;
+       } else if (!strcmp(channel_data->name, "SDIO_AL_DIAG_CH0")) {
+               if (atomic_read(&sdio_ctxt->ch_status[QCN_SDIO_CH_3]) < 0)
+                       ch_info->ch_handle.channel_id = QCN_SDIO_CH_3;
+       } else {
+               pr_err("%s: SDIO: Invalid CH name: %s\n", __func__,
+                                                       channel_data->name);
+               kfree(ch_info);
+               return ERR_PTR(-EINVAL);
+       }
+
+       client_info = container_of(client_handle, struct qcn_sdio_client_info,
+                                                               cli_handle);
+       ch_info->ch_handle.channel_data = &ch_info->ch_data;
+       ch_info->chandle = &client_info->cli_handle;
+       list_add_tail(&ch_info->ch_list, &client_info->ch_head);
+       sdio_ctxt->ch[ch_info->ch_handle.channel_id] = ch_info;
+       atomic_set(&sdio_ctxt->ch_status[ch_info->ch_handle.channel_id], 0);
+
+       return &ch_info->ch_handle;
+}
+EXPORT_SYMBOL(sdio_al_register_channel);
+
+void sdio_al_deregister_channel(struct sdio_al_channel_handle *ch_handle)
+{
+       int ret = 0;
+       struct qcn_sdio_ch_info *ch_info = NULL;
+
+       if (!ch_handle) {
+               pr_err("%s: Error: Invalid Param\n", __func__);
+               return;
+       }
+
+       do {
+               ret = atomic_cmpxchg(
+                       &sdio_ctxt->ch_status[ch_handle->channel_id], 0, 1);
+               if (ret) {
+                       if (ret == -1)
+                               return;
+
+                       usleep_range(1000, 1500);
+               }
+       } while (ret);
+
+       ch_info = sdio_ctxt->ch[ch_handle->channel_id];
+       if (ch_info) {
+               list_del(&ch_info->ch_list);
+               sdio_ctxt->ch[ch_handle->channel_id] = NULL;
+               atomic_set(&sdio_ctxt->ch_status[ch_handle->channel_id], -1);
+               kfree(ch_info);
+       }
+}
+EXPORT_SYMBOL(sdio_al_deregister_channel);
+
+int sdio_al_queue_transfer_async(struct sdio_al_channel_handle *handle,
+               enum sdio_al_dma_direction dir,
+               void *buf, size_t len, int priority, void *ctxt)
+{
+       struct qcn_sdio_rw_info *rw_req = NULL;
+       u32 cid = QCN_SDIO_CH_MAX;
+
+       if (!atomic_read(&xport_status))
+               return -ENODEV;
+
+       if (!handle) {
+               pr_err("%s: Error: Invalid Param\n", __func__);
+               return -EINVAL;
+       }
+
+       cid = handle->channel_id;
+
+       if (!(cid < QCN_SDIO_CH_MAX) &&
+                               (atomic_read(&sdio_ctxt->ch_status[cid]) < 0))
+               return -EINVAL;
+
+       if (dir == SDIO_AL_TX && atomic_read(&sdio_ctxt->free_list_count) <= 8)
+               return -ENOMEM;
+
+       rw_req = qcn_sdio_alloc_rw_req();
+       if (!rw_req)
+               return -ENOMEM;
+
+       rw_req->cid = cid;
+       rw_req->dir = dir;
+       rw_req->buf = buf;
+       rw_req->len = len;
+       rw_req->ctxt = ctxt;
+
+       if (dir == SDIO_AL_RX)
+               spin_lock(&async_lock);
+
+       qcn_sdio_add_rw_req(rw_req);
+       queue_work(sdio_ctxt->qcn_sdio_wq, &sdio_ctxt->sdio_rw_w);
+
+       if (dir == SDIO_AL_RX)
+               spin_unlock(&async_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(sdio_al_queue_transfer_async);
+
+int sdio_al_queue_transfer(struct sdio_al_channel_handle *ch_handle,
+               enum sdio_al_dma_direction dir,
+               void *buf, size_t len, int priority)
+{
+       int ret = 0;
+       u32 cid = QCN_SDIO_CH_MAX;
+
+       if (!atomic_read(&xport_status))
+               return -ENODEV;
+
+       if (!ch_handle) {
+               pr_err("%s: SDIO: Invalid Param\n", __func__);
+               return -EINVAL;
+       }
+
+       if (dir == SDIO_AL_RX && !list_empty(&sdio_ctxt->rw_wait_q) &&
+                               !atomic_read(&sdio_ctxt->wait_list_count)) {
+               sdio_al_queue_transfer_async(ch_handle, dir, buf, len, true,
+                                                       (void *)(uintptr_t)len);
+               pr_info("%s: switching to async\n", __func__);
+               ret = 1;
+       } else {
+               cid = ch_handle->channel_id;
+
+               if (!(cid < QCN_SDIO_CH_MAX))
+                       return -EINVAL;
+
+               if (dir == SDIO_AL_RX) {
+                       if (!atomic_read(&sdio_ctxt->wait_list_count))
+                               ret = qcn_sdio_recv_buff(cid, buf, len);
+                       else {
+                               sdio_al_queue_transfer_async(ch_handle, dir,
+                                       buf, len, true, (void *)(uintptr_t)len);
+                               pr_info("%s switching to async\n", __func__);
+                               ret = 1;
+                       }
+
+                       if (rx_dump)
+                               HEX_DUMP("SYNC_RECV: ", buf, len);
+               } else if (dir == SDIO_AL_TX) {
+                       ret = qcn_sdio_send_buff(cid, buf, len);
+                       if (tx_dump)
+                               HEX_DUMP("SYNC_SEND: ", buf, len);
+               } else
+                       ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(sdio_al_queue_transfer);
+
+int sdio_al_meta_transfer(struct sdio_al_channel_handle *handle,
+                                       unsigned int data, unsigned int trans)
+{
+       u32 cid = QCN_SDIO_CH_MAX;
+       u8 event = 0;
+
+       if (!atomic_read(&xport_status))
+               return -ENODEV;
+
+       if (!handle)
+               return -EINVAL;
+
+       cid = handle->channel_id;
+
+       if (!(cid < QCN_SDIO_CH_MAX))
+               return -EINVAL;
+
+       event = (u8)((data & QCN_SDIO_HMETA_EVENT_BMSK) >>
+                                               QCN_SDIO_HMETA_EVENT_SHFT);
+
+       if (cid == QCN_SDIO_CH_0) {
+               if ((event < QCN_SDIO_META_START_CH0) &&
+                                       (event >= QCN_SDIO_META_START_CH1)) {
+                       return -EINVAL;
+               }
+       } else if (cid == QCN_SDIO_CH_1) {
+               if ((event < QCN_SDIO_META_START_CH1) &&
+                                       (event >= QCN_SDIO_META_START_CH2)) {
+                       return -EINVAL;
+               }
+       } else if (cid == QCN_SDIO_CH_2) {
+               if ((event < QCN_SDIO_META_START_CH2) &&
+                                       (event >= QCN_SDIO_META_START_CH3)) {
+                       return -EINVAL;
+               }
+       } else if (cid == QCN_SDIO_CH_3) {
+               if ((event < QCN_SDIO_META_START_CH3) &&
+                                       (event >= QCN_SDIO_META_END)) {
+                       return -EINVAL;
+               }
+       }
+
+       return qcn_send_meta_info(event, data);
+}
+EXPORT_SYMBOL(sdio_al_meta_transfer);
+
+int qcn_sdio_card_state(bool enable)
+{
+       int ret = 0;
+
+       if (!current_host)
+               return -ENODEV;
+
+       mmc_try_claim_host(current_host, 2000);
+       if (enable) {
+               if (!atomic_read(&xport_status)) {
+                       ret = mmc_add_host(current_host);
+                       if (ret)
+                               pr_err("%s ret = %d\n", __func__, ret);
+               }
+       } else {
+               if (atomic_read(&xport_status))
+                       mmc_remove_host(current_host);
+       }
+       mmc_release_host(current_host);
+
+       return ret;
+}
+EXPORT_SYMBOL(qcn_sdio_card_state);
+
+static ssize_t qcn_card_state(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf,
+                             size_t count)
+{
+       int state = 0;
+
+       if (sscanf(buf, "%du", &state) != 1)
+               return -EINVAL;
+
+       qcn_sdio_card_state(state);
+
+       return count;
+}
+static DEVICE_ATTR(card_state, 0220, NULL, qcn_card_state);
+
+static int qcn_create_sysfs(struct device *dev)
+{
+       int ret = 0;
+
+       ret = device_create_file(dev, &dev_attr_card_state);
+       if (ret) {
+               pr_err("Failed to create device file, err = %d\n", ret);
+               goto out;
+       }
+
+       return 0;
+out:
+       return ret;
+}
diff --git a/drivers/platform/msm/qcn/qcn_sdio.h b/drivers/platform/msm/qcn/qcn_sdio.h
new file mode 100644 (file)
index 0000000..1a63e8e
--- /dev/null
@@ -0,0 +1,175 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCN_SDIO_H_
+#define _QCN_SDIO_H_
+
+#include <linux/qcn_sdio_al.h>
+#include "qcn_sdio_hwio.h"
+
+
+#define MANUFACTURER_CODE              (0x70)
+#define MANUFACTURER_ID_QCN_BASE       (0x400B)
+#define QCN_SDIO_TTY_BLK_SZ            (512)
+#define QCN_SDIO_MROM_BLK_SZ           (512)
+#define QCN_SDIO_RW_REQ_MAX            (128)
+
+#define QCN_SDIO_DMA0_RX_CNUM          (0x4)
+#define QCN_SDIO_DMA0_TX_CNUM          (0xC)
+#define QCN_SDIO_DMA1_RX_CNUM          (0x14)
+#define QCN_SDIO_DMA1_TX_CNUM          (0x1C)
+
+#define QCN_SDIO_CRQ_START             (0x1)
+#define QCN_SDIO_CRQ_END               (0x2)
+
+#define QCN_SDIO_META_VER_0            (0)
+#define QCN_SDIO_META_VER_1            (1)
+
+#if (QCN_SDIO_META_VER_0)
+#define QCN_SDIO_LMETA_FMT_VER         (0)
+#define QCN_SDIO_LMETA_VER_BMSK                (0x0000000F)
+#define QCN_SDIO_LMETA_VER_SHFT                (0)
+#define QCN_SDIO_LMETA_SW_BMSK         (0x000000F0)
+#define QCN_SDIO_LMETA_SW_SHFT         (4)
+#define QCN_SDIO_LMETA_EVENT_BMSK      (0x0000FF00)
+#define QCN_SDIO_LMETA_EVENT_SHFT      (8)
+#define QCN_SDIO_LMETA_DATA_BMSK       (0xFFFF0000)
+#define QCN_SDIO_LMETA_DATA_SHFT       (16)
+#define QCN_SDIO_LMETA_TREG_BMSK       (0x00F00000)
+#define QCN_SDIO_LMETA_TREG_SHFT       (20)
+#define QCN_SDIO_LMETA_TLEN_BMSK       (0x000F0000)
+#define QCN_SDIO_LMETA_TLEN_SHFT       (16)
+
+#define QCN_SDIO_HMETA_FMT_VER         (0)
+#define QCN_SDIO_HMETA_VER_BMSK                (0x0000000F)
+#define QCN_SDIO_HMETA_VER_SHFT                (0)
+#define QCN_SDIO_HMETA_SW_BMSK         (0x000000F0)
+#define QCN_SDIO_HMETA_SW_SHFT         (4)
+#define QCN_SDIO_HMETA_EVENT_BMSK      (0x00003F00)
+#define QCN_SDIO_HMETA_EVENT_SHFT      (8)
+#define QCN_SDIO_HMETA_TRANS_BMSK      (0x00400000)
+#define QCN_SDIO_HMETA_TRANS_SHFT      (22)
+#define QCN_SDIO_HMETA_DATA_BMSK       (0xFF000000)
+#define QCN_SDIO_HMETA_DATA_SHFT       (24)
+#define QCN_SDIO_HMETA_TREG_BMSK       (0xF0000000)
+#define QCN_SDIO_HMETA_TREG_SHFT       (28)
+#define QCN_SDIO_HMETA_TLEN_BMSK       (0x0F000000)
+#define QCN_SDIO_HMETA_TLEN_SHFT       (24)
+
+#elif (QCN_SDIO_META_VER_1)
+
+#define QCN_SDIO_MAJOR_VER             (0x00000000)
+#define QCN_SDIO_MINOR_VER             (0x00000010)
+
+#define QCN_SDIO_LMETA_FMT_VER         (1)
+#define QCN_SDIO_LMETA_EVENT_BMSK      (0xFF000000)
+#define QCN_SDIO_LMETA_EVENT_SHFT      (24)
+#define QCN_SDIO_LMETA_DATA_BMSK       (0x00003FFF)
+#define QCN_SDIO_LMETA_DATA_SHFT       (0)
+#define QCN_SDIO_LMETA_SW_BMSK         (0x0000000F)
+#define QCN_SDIO_LMETA_SW_SHFT         (0)
+#define QCN_SDIO_LMETA_VER_MAJ_BMSK    (0x000000F0)
+#define QCN_SDIO_LMETA_VER_MAJ_SHFT    (4)
+#define QCN_SDIO_LMETA_VER_MIN_BMSK    (0x00000F00)
+#define QCN_SDIO_LMETA_VER_MIN_SHFT    (8)
+
+#define QCN_SDIO_HMETA_FMT_VER         (1)
+#define QCN_SDIO_HMETA_EVENT_BMSK      (0xFF000000)
+#define QCN_SDIO_HMETA_EVENT_SHFT      (24)
+#define QCN_SDIO_HMETA_DATA_BMSK       (0x00003FFF)
+#define QCN_SDIO_HMETA_DATA_SHFT       (0)
+#define QCN_SDIO_HMETA_TRANS_BMSK      (0x00400000)
+#define QCN_SDIO_HMETA_TRANS_SHFT      (22)
+#define QCN_SDIO_HMETA_SW_BMSK         (0x0000000F)
+#define QCN_SDIO_HMETA_SW_SHFT         (0)
+#define QCN_SDIO_HMETA_VER_MAJ_BMSK    (0x000000F0)
+#define QCN_SDIO_HMETA_VER_MAJ_SHFT    (4)
+#define QCN_SDIO_HMETA_VER_MIN_BMSK    (0x00000F00)
+#define QCN_SDIO_HMETA_VER_MIN_SHFT    (8)
+
+#define QCN_SDIO_META_START_CH0                (0x20)
+#define QCN_SDIO_META_START_CH1                (0x40)
+#define QCN_SDIO_META_START_CH2                (0x60)
+#define QCN_SDIO_META_START_CH3                (0x80)
+#define QCN_SDIO_META_END              (0xA0)
+
+#endif /* QCN_SDIO_META_VER */
+
+enum qcn_sdio_cli_id {
+       QCN_SDIO_CLI_ID_INVALID = 0,
+       QCN_SDIO_CLI_ID_TTY,
+       QCN_SDIO_CLI_ID_WLAN,
+       QCN_SDIO_CLI_ID_QMI,
+       QCN_SDIO_CLI_ID_DIAG,
+       QCN_SDIO_CLI_ID_MAX
+};
+
+enum qcn_sdio_ch_id {
+       QCN_SDIO_CH_0 = 0,
+       QCN_SDIO_CH_1,
+       QCN_SDIO_CH_2,
+       QCN_SDIO_CH_3,
+       QCN_SDIO_CH_MAX,
+};
+
+enum qcn_sdio_sw_mode {
+       QCN_SDIO_SW_RESET = 0,
+       QCN_SDIO_SW_PBL,
+       QCN_SDIO_SW_SBL,
+       QCN_SDIO_SW_RDDM,
+       QCN_SDIO_SW_MROM,
+       QCN_SDIO_SW_MAX,
+};
+
+enum qcn_sdio_host_event {
+       QCN_SDIO_INVALID_HEVENT = 0,
+       QCN_SDIO_SW_MODE_HEVENT,
+       QCN_SDIO_BLK_SZ_HEVENT,
+       QCN_SDIO_DOORBELL_HEVENT,
+       QCN_SDIO_MAX_HEVENT = 63,
+};
+
+enum qcn_sdio_local_event {
+       QCN_SDIO_INVALID_LEVENT = 0,
+       QCN_SDIO_SW_MODE_LEVENT,
+       QCN_SDIO_MAX_LEVENT = 255,
+};
+
+
+struct qcn_sdio_client_info {
+       int is_probed;
+       struct sdio_al_client_data cli_data;
+       struct sdio_al_client_handle cli_handle;
+       struct list_head cli_list;
+       struct list_head ch_head;
+};
+
+struct qcn_sdio_ch_info {
+       struct sdio_al_xfer_result result;
+       struct sdio_al_client_handle *chandle;
+       struct sdio_al_channel_data ch_data;
+       struct sdio_al_channel_handle ch_handle;
+       struct list_head ch_list;
+       u32 crq_len;
+};
+
+struct qcn_sdio_rw_info {
+       struct list_head list;
+       u32 cid;
+       enum sdio_al_dma_direction dir;
+       void *buf;
+       size_t len;
+       void *ctxt;
+};
+
+#endif /* _QCN_SDIO_H_ */
+
diff --git a/drivers/platform/msm/qcn/qcn_sdio_hwio.h b/drivers/platform/msm/qcn/qcn_sdio_hwio.h
new file mode 100644 (file)
index 0000000..9ea9b6a
--- /dev/null
@@ -0,0 +1,296 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCN_SDIO_HWIO_
+#define _QCN_SDIO_HWIO_
+
+#define SDIO_QCN_DMA0_RX                       (0x0)
+#define SDIO_QCN_DMA0_RX_MASK                  (0xffffffff)
+#define SDIO_QCN_DMA0_RX_SHIFT                 (0)
+
+#define SDIO_QCN_DMA1_RX                       (0x4)
+#define SDIO_QCN_DMA1_RX_MASK                  (0xffffffff)
+#define SDIO_QCN_DMA1_RX_SHIFT                 (0)
+
+#define SDIO_QCN_DMA0_TX                       (0x8)
+#define SDIO_QCN_DMA0_TX_MASK                  (0xffffffff)
+#define SDIO_QCN_DMA0_TX_SHIFT                 (0)
+
+#define SDIO_QCN_DMA1_TX                       (0xC)
+#define SDIO_QCN_DMA1_TX_MASK                  (0xffffffff)
+#define SDIO_QCN_DMA1_TX_SHIFT                 (0)
+
+
+#define SDIO_QCN_MC_DMA0_RX_CH0                        (0x10)
+#define SDIO_QCN_MC_DMA0_RX_CH0_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_RX_CH0_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_RX_CH1                        (0x14)
+#define SDIO_QCN_MC_DMA0_RX_CH1_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_RX_CH1_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_RX_CH2                        (0x18)
+#define SDIO_QCN_MC_DMA0_RX_CH2_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_RX_CH2_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_RX_CH3                        (0x1C)
+#define SDIO_QCN_MC_DMA0_RX_CH3_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_RX_CH3_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_TX_CH0                        (0x30)
+#define SDIO_QCN_MC_DMA0_TX_CH0_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_TX_CH0_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_TX_CH1                        (0x34)
+#define SDIO_QCN_MC_DMA0_TX_CH1_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_TX_CH1_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_TX_CH2                        (0x38)
+#define SDIO_QCN_MC_DMA0_TX_CH2_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_TX_CH2_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA0_TX_CH3                        (0x3C)
+#define SDIO_QCN_MC_DMA0_TX_CH3_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA0_TX_CH3_SHIFT          (0)
+
+
+#define SDIO_QCN_MC_DMA1_RX_CH0                        (0x50)
+#define SDIO_QCN_MC_DMA1_RX_CH0_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_RX_CH0_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_RX_CH1                        (0x54)
+#define SDIO_QCN_MC_DMA1_RX_CH1_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_RX_CH1_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_RX_CH2                        (0x58)
+#define SDIO_QCN_MC_DMA1_RX_CH2_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_RX_CH2_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_RX_CH3                        (0x5C)
+#define SDIO_QCN_MC_DMA1_RX_CH3_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_RX_CH3_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_TX_CH0                        (0x70)
+#define SDIO_QCN_MC_DMA1_TX_CH0_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_TX_CH0_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_TX_CH1                        (0x74)
+#define SDIO_QCN_MC_DMA1_TX_CH1_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_TX_CH1_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_TX_CH2                        (0x78)
+#define SDIO_QCN_MC_DMA1_TX_CH2_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_TX_CH2_SHIFT          (0)
+
+#define SDIO_QCN_MC_DMA1_TX_CH3                        (0x7C)
+#define SDIO_QCN_MC_DMA1_TX_CH3_MASK           (0xffffffff)
+#define SDIO_QCN_MC_DMA1_TX_CH3_SHIFT          (0)
+
+
+#define SDIO_QCN_CONFIG                                (0x100)
+#define SDIO_QCN_CONFIG_OOB_MASK               (0x00000002)
+#define SDIO_QCN_CONFIG_OOB_SHIFT              (1)
+#define SDIO_QCN_CONFIG_QE_MASK                        (0x00000001)
+#define SDIO_QCN_CONFIG_QE_SHIFT               (0)
+
+
+#define SDIO_QCN_HRQ_PUSH                      (0x104)
+#define SDIO_QCN_HRQ_PUSH_UD_MASK              (0xff000000)
+#define SDIO_QCN_HRQ_PUSH_UD_SHIFT             (24)
+#define SDIO_QCN_HRQ_PUSH_BLK_MASK             (0x00800000)
+#define SDIO_QCN_HRQ_PUSH_BLK_SHIFT            (23)
+#define SDIO_QCN_HRQ_PUSH_TRANS_MASK           (0x00400000)
+#define SDIO_QCN_HRQ_PUSH_TRANS_SHIFT          (22)
+#define SDIO_QCN_HRQ_PUSH_BLK_CNT_MASK         (0x00003ffe)
+#define SDIO_QCN_HRQ_PUSH_BLK_CNT_SHIFT                (1)
+#define SDIO_QCN_HRQ_PUSH_DIR_MASK             (0x00000001)
+#define SDIO_QCN_HRQ_PUSH_DIR_SHIFT            (0)
+#define SDIO_QCN_HRQ_PUSH_UD1_MASK             (0xffc00000)
+#define SDIO_QCN_HRQ_PUSH_UD1_SHIFT            (22)
+#define SDIO_QCN_HRQ_PUSH_UD0_MASK             (0x00003fff)
+#define SDIO_QCN_HRQ_PUSH_UD0_SHIFT            (0)
+
+
+#define SDIO_QCN_CRQ_TX_PULL                   (0x108)
+#define SDIO_QCN_CRQ_TX_PULL_REQ_ID_MASK       (0x3c)
+#define SDIO_QCN_CRQ_TX_PULL_REQ_ID_SHIFT      (2)
+#define SDIO_QCN_CRQ_TX_PULL_CH_NUM_MASK       (0x3)
+#define SDIO_QCN_CRQ_TX_PULL_CH_NUM_SHIFT      (0)
+
+#define SDIO_QCN_CRQ_RX_PULL                   (0x108)
+#define SDIO_QCN_CRQ_RX_PULL_REQ_ID_MASK       (0x7ffc)
+#define SDIO_QCN_CRQ_RX_PULL_REQ_ID_SHIFT      (2)
+#define SDIO_QCN_CRQ_RX_PULL_CH_NUM_MASK       (0x3)
+#define SDIO_QCN_CRQ_RX_PULL_CH_NUM_SHIFT      (0)
+
+
+#define SDIO_QCN_IRQ_STATUS                    (0x10C)
+#define SDIO_QCN_IRQ_LOCAL_MASK                        (0x80)
+#define SDIO_QCN_IRQ_LOCAL_SHIFT               (7)
+#define SDIO_QCN_IRQ_DAM_WACCESS_MASK          (0x40)
+#define SDIO_QCN_IRQ_DAM_WACCESS_SHIFT         (6)
+#define SDIO_QCN_IRQ_DAM_MISMATCH_MASK         (0x20)
+#define SDIO_QCN_IRQ_DAM_MISMATCH_SHIFT                (5)
+#define SDIO_QCN_IRQ_SYS_ERR_MASK              (0x10)
+#define SDIO_QCN_IRQ_SYS_ERR_SHIFT             (4)
+#define SDIO_QCN_IRQ_UNDERFLOW_MASK            (0x8)
+#define SDIO_QCN_IRQ_UNDERFLOW_SHIFT           (3)
+#define SDIO_QCN_IRQ_OVERFLOW_MASK             (0x4)
+#define SDIO_QCN_IRQ_OVERFLOW_SHIFT            (2)
+#define SDIO_QCN_IRQ_CH_MISMATCH_MASK          (0x2)
+#define SDIO_QCN_IRQ_CH_MISMATCH_SHIFT         (1)
+#define SDIO_QCN_IRQ_CRQ_READY_MASK            (0x1)
+#define SDIO_QCN_IRQ_CRQ_READY_SHIFT           (0)
+
+
+#define SDIO_QCN_IRQ_EN                                (0x110)
+#define SDIO_QCN_IRQ_EN_LOCAL_MASK             (0x80)
+#define SDIO_QCN_IRQ_EN_LOCAL_SHIFT            (7)
+#define SDIO_QCN_IRQ_EN_DAM_WACCESS_MASK       (0x40)
+#define SDIO_QCN_IRQ_EN_DAM_WACCESS_SHIFT      (6)
+#define SDIO_QCN_IRQ_EN_DAM_MISMATCH_MASK      (0x20)
+#define SDIO_QCN_IRQ_EN_DAM_MISMATCH_SHIFT     (5)
+#define SDIO_QCN_IRQ_EN_SYS_ERR_MASK           (0x10)
+#define SDIO_QCN_IRQ_EN_SYS_ERR_SHIFT          (4)
+#define SDIO_QCN_IRQ_EN_UNDERFLOW_MASK         (0x8)
+#define SDIO_QCN_IRQ_EN_UNDERFLOW_SHIFT                (3)
+#define SDIO_QCN_IRQ_EN_OVERFLOW_MASK          (0x4)
+#define SDIO_QCN_IRQ_EN_OVERFLOW_SHIFT         (2)
+#define SDIO_QCN_IRQ_EN_CH_MISMATCH_MASK       (0x2)
+#define SDIO_QCN_IRQ_EN_CH_MISMATCH_SHIFT      (1)
+#define SDIO_QCN_IRQ_EN_CRQ_READY_MASK         (0x1)
+#define SDIO_QCN_IRQ_EN_CRQ_READY_SHIFT                (0)
+
+
+#define SDIO_QCN_IRQ_CLR                       (0x114)
+#define SDIO_QCN_IRQ_CLR_LOCAL_MASK            (0x80)
+#define SDIO_QCN_IRQ_CLR_LOCAL_SHIFT           (7)
+#define SDIO_QCN_IRQ_CLR_DAM_WACCESS_MASK      (0x40)
+#define SDIO_QCN_IRQ_CLR_DAM_WACCESS_SHIFT     (6)
+#define SDIO_QCN_IRQ_CLR_DAM_MISMATCH_MASK     (0x20)
+#define SDIO_QCN_IRQ_CLR_DAM_MISMATCH_SHIFT    (5)
+#define SDIO_QCN_IRQ_CLR_SYS_ERR_MASK          (0x10)
+#define SDIO_QCN_IRQ_CLR_SYS_ERR_SHIFT         (4)
+#define SDIO_QCN_IRQ_CLR_UNDERFLOW_MASK                (0x8)
+#define SDIO_QCN_IRQ_CLR_UNDERFLOW_SHIFT       (3)
+#define SDIO_QCN_IRQ_CLR_OVERFLOW_MASK         (0x4)
+#define SDIO_QCN_IRQ_CLR_OVERFLOW_SHIFT                (2)
+#define SDIO_QCN_IRQ_CLR_CH_MISMATCH_MASK      (0x2)
+#define SDIO_QCN_IRQ_CLR_CH_MISMATCH_SHIFT     (1)
+#define SDIO_QCN_IRQ_CLR_CRQ_READY_MASK                (0x1)
+#define SDIO_QCN_IRQ_CLR_CRQ_READY_SHIFT       (0)
+
+
+#define SDIO_QCN_IRQ_FRC                       (0x11c)
+#define SDIO_QCN_IRQ_FRC_LOCAL_MASK            (0x80)
+#define SDIO_QCN_IRQ_FRC_LOCAL_SHIFT           (7)
+#define SDIO_QCN_IRQ_FRC_DAM_WACCESS_MASK      (0x40)
+#define SDIO_QCN_IRQ_FRC_DAM_WACCESS_SHIFT     (6)
+#define SDIO_QCN_IRQ_FRC_DAM_MISMATCH_MASK     (0x20)
+#define SDIO_QCN_IRQ_FRC_DAM_MISMATCH_SHIFT    (5)
+#define SDIO_QCN_IRQ_FRC_SYS_ERR_MASK          (0x10)
+#define SDIO_QCN_IRQ_FRC_SYS_ERR_SHIFT         (4)
+#define SDIO_QCN_IRQ_FRC_UNDERFLOW_MASK                (0x8)
+#define SDIO_QCN_IRQ_FRC_UNDERFLOW_SHIFT       (3)
+#define SDIO_QCN_IRQ_FRC_OVERFLOW_MASK         (0x4)
+#define SDIO_QCN_IRQ_FRC_OVERFLOW_SHIFT                (2)
+#define SDIO_QCN_IRQ_FRC_CH_MISMATCH_MASK      (0x2)
+#define SDIO_QCN_IRQ_FRC_CH_MISMATCH_SHIFT     (1)
+#define SDIO_QCN_IRQ_FRC_CRQ_READY_MASK                (0x1)
+#define SDIO_QCN_IRQ_FRC_CRQ_READY_SHIFT       (0)
+
+
+#define SDIO_QCN_HRQ_ACK_PULL                  (0x118)
+#define SDIO_QCN_HRQ_ACK_PULL_REQ_ID_MASK      (0x3c0)
+#define SDIO_QCN_HRQ_ACK_PULL_REQ_ID_SHIFT     (6)
+#define SDIO_QCN_HRQ_ACK_PULL_CH_NUM_MASK      (0x3f)
+#define SDIO_QCN_HRQ_ACK_PULL_CH_NUM_SHIFT     (0)
+
+
+#define SDIO_QCN_CRQ_PULL                      (0x118)
+#define SDIO_QCN_CRQ_PULL_UD_MASK              (0xff000000)
+#define SDIO_QCN_CRQ_PULL_UD_SHIFT             (24)
+#define SDIO_QCN_CRQ_PULL_BLK_MASK             (0x00800000)
+#define SDIO_QCN_CRQ_PULL_BLK_SHIFT            (23)
+#define SDIO_QCN_CRQ_PULL_TRANS_MASK           (0x00400000)
+#define SDIO_QCN_CRQ_PULL_TRANS_SHIFT          (22)
+#define SDIO_QCN_CRQ_PULL_BLK_CNT_MASK         (0x0007ffc0)
+#define SDIO_QCN_CRQ_PULL_BLK_CNT_SHIFT                (6)
+#define SDIO_QCN_CRQ_PULL_CH_NUM_MASK          (0x3f)
+#define SDIO_QCN_CRQ_PULL_CH_NUM_SHIFT         (0)
+
+
+#define SDIO_QCN_LOW_PWR                       (0x120)
+#define SDIO_QCN_LOW_PWR_GO_MASK               (0x1)
+#define SDIO_QCN_LOW_PWR_GO_SHIFT              (0)
+
+
+#define SDIO_QCN_HOST_TRANS_REG0               (0x124)
+#define SDIO_QCN_HOST_TRANS_REG0_MASK          (0xffffffff)
+#define SDIO_QCN_HOST_TRANS_REG0_SHIFT         (0)
+#define SDIO_QCN_HOST_TRANS_REG1               (0x128)
+#define SDIO_QCN_HOST_TRANS_REG1_MASK          (0xffffffff)
+#define SDIO_QCN_HOST_TRANS_REG1_SHIFT         (0)
+#define SDIO_QCN_HOST_TRANS_REG2               (0x12C)
+#define SDIO_QCN_HOST_TRANS_REG2_MASK          (0xffffffff)
+#define SDIO_QCN_HOST_TRANS_REG2_SHIFT         (0)
+#define SDIO_QCN_HOST_TRANS_REG3               (0x130)
+#define SDIO_QCN_HOST_TRANS_REG3_MASK          (0xffffffff)
+#define SDIO_QCN_HOST_TRANS_REG3_SHIFT         (0)
+
+
+#define SDIO_QCN_CLIENT_TRANS_REG0             (0x144)
+#define SDIO_QCN_CLIENT_TRANS_REG0_MASK                (0xffffffff)
+#define SDIO_QCN_CLIENT_TRANS_REG0_SHIFT       (0)
+#define SDIO_QCN_CLIENT_TRANS_REG1             (0x148)
+#define SDIO_QCN_CLIENT_TRANS_REG1_MASK                (0xffffffff)
+#define SDIO_QCN_CLIENT_TRANS_REG1_SHIFT       (0)
+#define SDIO_QCN_CLIENT_TRANS_REG2             (0x14C)
+#define SDIO_QCN_CLIENT_TRANS_REG2_MASK                (0xffffffff)
+#define SDIO_QCN_CLIENT_TRANS_REG2_SHIFT       (0)
+#define SDIO_QCN_CLIENT_TRANS_REG3             (0x150)
+#define SDIO_QCN_CLIENT_TRANS_REG3_MASK                (0xffffffff)
+#define SDIO_QCN_CLIENT_TRANS_REG3_SHIFT       (0)
+
+
+#define SDIO_QCN_LOCAL_INFO                    (0x00000164)
+#define SDIO_QCN_LOCAL_INFO_MASK               (0xffffffff)
+#define SDIO_QCN_LOCAL_INFO_SHIFT              (0)
+
+#define SDIO_QCN_SDIOC_CONFIG                  (0x200)
+#define SDIO_QCN_SDIOC_CONFIG_MC_DMA1_DIR_BMSK  (0x40000)
+#define SDIO_QCN_SDIOC_CONFIG_MC_DMA1_DIR_SHFT  (0x12)
+#define SDIO_QCN_SDIOC_CONFIG_MC_DMA0_DIR_BMSK  (0x20000)
+#define SDIO_QCN_SDIOC_CONFIG_MC_DMA0_DIR_SHFT  (0x11)
+#define SDIO_QCN_SDIOC_CONFIG_MC_DMA_EN_BMSK    (0x10000)
+#define SDIO_QCN_SDIOC_CONFIG_MC_DMA_EN_SHFT    (0x10)
+#define SDIO_QCN_SDIOC_CONFIG_SW_CLK_EN_BMSK    (0x80)
+#define SDIO_QCN_SDIOC_CONFIG_SW_CLK_EN_SHFT    (0x7)
+#define SDIO_QCN_SDIOC_CONFIG_WR_HRQ_MEM_BMSK  (0x40)
+#define SDIO_QCN_SDIOC_CONFIG_WR_HRQ_MEM_SHFT   (0x6)
+#define SDIO_QCN_SDIOC_CONFIG_CLR_CRQ_PUSH_BMSK (0x20)
+#define SDIO_QCN_SDIOC_CONFIG_CLR_CRQ_PUSH_SHFT (0x5)
+#define SDIO_QCN_SDIOC_CONFIG_DIS_LEN_CHK_BMSK  (0x10)
+#define SDIO_QCN_SDIOC_CONFIG_DIS_LEN_CHK_SHFT  (0x4)
+#define SDIO_QCN_SDIOC_CONFIG_WRAP_ERROR_BMSK   (0x8)
+#define SDIO_QCN_SDIOC_CONFIG_WRAP_ERROR_SHFT   (0x3)
+#define SDIO_QCN_SDIOC_CONFIG_ADMA_64BIT_BMSK   (0x4)
+#define SDIO_QCN_SDIOC_CONFIG_ADMA_64BIT_SHFT   (0x2)
+#define SDIO_QCN_SDIOC_CONFIG_ADMA_INT_BMSK     (0x2)
+#define SDIO_QCN_SDIOC_CONFIG_ADMA_INT_SHFT     (0x1)
+#define SDIO_QCN_SDIOC_CONFIG_DMA_ENABLE_BMSK   (0x1)
+#define SDIO_QCN_SDIOC_CONFIG_DMA_ENABLE_SHFT   (0x0)
+
+
+
+#endif /* _QCN_SDIO_HWIO_ */
+
index 2f07cd6..76ae384 100644 (file)
@@ -129,6 +129,14 @@ static long pps_cdev_ioctl(struct file *file,
                        pps->params.mode |= PPS_CANWAIT;
                pps->params.api_version = PPS_API_VERS;
 
+               /*
+                * Clear unused fields of pps_kparams to avoid leaking
+                * uninitialized data of the PPS_SETPARAMS caller via
+                * PPS_GETPARAMS
+                */
+               pps->params.assert_off_tu.flags = 0;
+               pps->params.clear_off_tu.flags = 0;
+
                spin_unlock_irq(&pps->lock);
 
                break;
index 47694dd..fa3a124 100644 (file)
@@ -382,8 +382,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
        regulator_desc_s2mps11_buck1_4(4),
        regulator_desc_s2mps11_buck5,
        regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
-       regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
+       regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV),
+       regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV),
        regulator_desc_s2mps11_buck9,
        regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
 };
index 286782c..6601047 100644 (file)
@@ -396,6 +396,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
        char msg_format;
        char msg_no;
 
+       /*
+        * intrc values ENODEV, ENOLINK and EPERM
+        * will be optained from sleep_on to indicate that no
+        * IO operation can be started
+        */
+       if (cqr->intrc == -ENODEV)
+               return 1;
+
+       if (cqr->intrc == -ENOLINK)
+               return 1;
+
+       if (cqr->intrc == -EPERM)
+               return 1;
+
        sense = dasd_get_sense(&cqr->irb);
        if (!sense)
                return 0;
@@ -460,12 +474,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
        lcu->flags &= ~NEED_UAC_UPDATE;
        spin_unlock_irqrestore(&lcu->lock, flags);
 
-       do {
-               rc = dasd_sleep_on(cqr);
-               if (rc && suborder_not_supported(cqr))
-                       return -EOPNOTSUPP;
-       } while (rc && (cqr->retries > 0));
-       if (rc) {
+       rc = dasd_sleep_on(cqr);
+       if (rc && !suborder_not_supported(cqr)) {
                spin_lock_irqsave(&lcu->lock, flags);
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
index d64b401..adf322a 100644 (file)
@@ -752,6 +752,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
 
        switch (state) {
        case SLSB_P_OUTPUT_EMPTY:
+       case SLSB_P_OUTPUT_PENDING:
                /* the adapter got it */
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
                        "out empty:%1d %02x", q->nr, count);
@@ -1575,13 +1576,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
                rc = qdio_kick_outbound_q(q, phys_aob);
        } else if (need_siga_sync(q)) {
                rc = qdio_siga_sync_q(q);
+       } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+                  get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+                  state == SLSB_CU_OUTPUT_PRIMED) {
+               /* The previous buffer is not processed yet, tack on. */
+               qperf_inc(q, fast_requeue);
        } else {
-               /* try to fast requeue buffers */
-               get_buf_state(q, prev_buf(bufnr), &state, 0);
-               if (state != SLSB_CU_OUTPUT_PRIMED)
-                       rc = qdio_kick_outbound_q(q, 0);
-               else
-                       qperf_inc(q, fast_requeue);
+               rc = qdio_kick_outbound_q(q, 0);
        }
 
        /* in case of SIGA errors we must process the error immediately */
index abe460e..cc62d8c 100644 (file)
@@ -10,6 +10,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/kthread.h>
+#include <linux/bug.h>
 #include "zfcp_ext.h"
 #include "zfcp_reqlist.h"
 
@@ -244,6 +245,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
        struct zfcp_erp_action *erp_action;
        struct zfcp_scsi_dev *zfcp_sdev;
 
+       if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+                        need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+                        need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+                        need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+               return NULL;
+
        switch (need) {
        case ZFCP_ERP_ACTION_REOPEN_LUN:
                zfcp_sdev = sdev_to_zfcp(sdev);
index d5184aa..3bc610d 100644 (file)
@@ -1973,7 +1973,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
  */
 static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
 {
-       return (struct fcoe_rport *)(rdata + 1);
+       return container_of(rdata, struct fcoe_rport, rdata);
 }
 
 /**
@@ -2233,7 +2233,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
  */
 static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
                              struct sk_buff *skb,
-                             struct fc_rport_priv *rdata)
+                             struct fcoe_rport *frport)
 {
        struct fip_header *fiph;
        struct fip_desc *desc = NULL;
@@ -2241,16 +2241,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
        struct fip_wwn_desc *wwn = NULL;
        struct fip_vn_desc *vn = NULL;
        struct fip_size_desc *size = NULL;
-       struct fcoe_rport *frport;
        size_t rlen;
        size_t dlen;
        u32 desc_mask = 0;
        u32 dtype;
        u8 sub;
 
-       memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-       frport = fcoe_ctlr_rport(rdata);
-
        fiph = (struct fip_header *)skb->data;
        frport->flags = ntohs(fiph->fip_flags);
 
@@ -2313,15 +2309,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
                        if (dlen != sizeof(struct fip_wwn_desc))
                                goto len_err;
                        wwn = (struct fip_wwn_desc *)desc;
-                       rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+                       frport->rdata.ids.node_name =
+                               get_unaligned_be64(&wwn->fd_wwn);
                        break;
                case FIP_DT_VN_ID:
                        if (dlen != sizeof(struct fip_vn_desc))
                                goto len_err;
                        vn = (struct fip_vn_desc *)desc;
                        memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
-                       rdata->ids.port_id = ntoh24(vn->fd_fc_id);
-                       rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+                       frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+                       frport->rdata.ids.port_name =
+                               get_unaligned_be64(&vn->fd_wwpn);
                        break;
                case FIP_DT_FC4F:
                        if (dlen != sizeof(struct fip_fc4_feat))
@@ -2664,16 +2662,13 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fip_header *fiph;
        enum fip_vn2vn_subcode sub;
-       struct {
-               struct fc_rport_priv rdata;
-               struct fcoe_rport frport;
-       } buf;
+       struct fcoe_rport frport = { };
        int rc;
 
        fiph = (struct fip_header *)skb->data;
        sub = fiph->fip_subcode;
 
-       rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+       rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
        if (rc) {
                LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
                goto drop;
@@ -2682,19 +2677,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
        mutex_lock(&fip->ctlr_mutex);
        switch (sub) {
        case FIP_SC_VN_PROBE_REQ:
-               fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+               fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
                break;
        case FIP_SC_VN_PROBE_REP:
-               fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+               fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
                break;
        case FIP_SC_VN_CLAIM_NOTIFY:
-               fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+               fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
                break;
        case FIP_SC_VN_CLAIM_REP:
-               fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+               fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
                break;
        case FIP_SC_VN_BEACON:
-               fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+               fcoe_ctlr_vn_beacon(fip, &frport.rdata);
                break;
        default:
                LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
index e095288..fcce3ae 100644 (file)
@@ -2153,6 +2153,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
        case IOACCEL2_SERV_RESPONSE_COMPLETE:
                switch (c2->error_data.status) {
                case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+                       if (cmd)
+                               cmd->result = 0;
                        break;
                case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
                        cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2320,8 +2322,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
 
        /* check for good status */
        if (likely(c2->error_data.serv_response == 0 &&
-                       c2->error_data.status == 0))
+                       c2->error_data.status == 0)) {
+               cmd->result = 0;
                return hpsa_cmd_free_and_done(h, c, cmd);
+       }
 
        /*
         * Any RAID offload error results in retry which will use
@@ -5237,6 +5241,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
        c = cmd_tagged_alloc(h, cmd);
 
        /*
+        * This is necessary because the SML doesn't zero out this field during
+        * error recovery.
+        */
+       cmd->result = 0;
+
+       /*
         * Call alternate submit routine for I/O accelerated commands.
         * Retries always go down the normal I/O path.
         */
index 1f9f9e5..0526a47 100644 (file)
@@ -4869,8 +4869,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
 
        spin_lock_irqsave(vhost->host->host_lock, flags);
        ibmvfc_purge_requests(vhost, DID_ERROR);
-       ibmvfc_free_event_pool(vhost);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       ibmvfc_free_event_pool(vhost);
 
        ibmvfc_free_mem(vhost);
        spin_lock(&ibmvfc_driver_lock);
index 589ff9a..e69940e 100644 (file)
@@ -121,12 +121,15 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
                                             u32 port_id)
 {
        struct fc_rport_priv *rdata;
+       size_t rport_priv_size = sizeof(*rdata);
 
        rdata = lport->tt.rport_lookup(lport, port_id);
        if (rdata)
                return rdata;
 
-       rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+       if (lport->rport_priv_size > 0)
+               rport_priv_size = lport->rport_priv_size;
+       rdata = kzalloc(rport_priv_size, GFP_KERNEL);
        if (!rdata)
                return NULL;
 
index 2422094..5e0bac8 100644 (file)
@@ -2752,6 +2752,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
        u32 size;
        unsigned long buff_addr;
        unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+       unsigned long chunk_left_bytes;
        unsigned long src_addr;
        unsigned long flags;
        u32 buff_offset;
@@ -2777,6 +2778,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
        }
 
        size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+       chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+       size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
        size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
 
        src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
index 9b53672..7af7a08 100644 (file)
@@ -1686,9 +1686,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
        struct sysinfo s;
        u64 consistent_dma_mask;
+       /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+       int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
 
        if (ioc->dma_mask)
-               consistent_dma_mask = DMA_BIT_MASK(64);
+               consistent_dma_mask = DMA_BIT_MASK(dma_mask);
        else
                consistent_dma_mask = DMA_BIT_MASK(32);
 
@@ -1696,11 +1698,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
                const uint64_t required_mask =
                    dma_get_required_mask(&pdev->dev);
                if ((required_mask > DMA_BIT_MASK(32)) &&
-                   !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+                   !pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) &&
                    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
                        ioc->base_add_sg_single = &_base_add_sg_single_64;
                        ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-                       ioc->dma_mask = 64;
+                       ioc->dma_mask = dma_mask;
                        goto out;
                }
        }
@@ -1726,7 +1728,7 @@ static int
 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
                                      struct pci_dev *pdev)
 {
-       if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
                        return -ENODEV;
        }
@@ -3325,7 +3327,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
                total_sz += sz;
        } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
 
-       if (ioc->dma_mask == 64) {
+       if (ioc->dma_mask > 32) {
                if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
                        pr_warn(MPT3SAS_FMT
                            "no suitable consistent DMA mask for %s\n",
index 94edba9..02f8977 100644 (file)
@@ -6289,8 +6289,8 @@ static void ufshcd_rls_handler(struct work_struct *work)
        u32 mode;
 
        hba = container_of(work, struct ufs_hba, rls_work);
-       ufshcd_scsi_block_requests(hba);
        pm_runtime_get_sync(hba->dev);
+       ufshcd_scsi_block_requests(hba);
        ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
        if (ret) {
                dev_err(hba->dev,
index 12c5004..70fd403 100644 (file)
@@ -326,6 +326,16 @@ config MSM_IPC_ROUTER_GLINK_XPRT
          this layer registers a transport with IPC Router and enable
          message exchange.
 
+config MSM_IPC_ROUTER_SDIO_XPRT
+       depends on QCOM_SDIO_CLIENT
+       depends on IPC_ROUTER
+       bool "MSM SDIO XPRT Layer"
+       help
+         SDIO Transport Layer that enables off-chip communication of
+         IPC Router. When the SDIO endpoint becomes available, this layer
+         registers the transport with IPC Router and enable message
+         exchange.
+
 config MSM_SYSTEM_HEALTH_MONITOR
        bool "System Health Monitor"
        depends on MSM_QMI_INTERFACE && MSM_SUBSYSTEM_RESTART
index 9b80e95..12482ee 100644 (file)
@@ -24,6 +24,7 @@ obj-$(CONFIG_MSM_RPM_SMD)     +=      rpm-smd-debug.o
 endif
 obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT)  +=      ipc_router_smd_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) +=      ipc_router_hsic_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_SDIO_XPRT) +=      ipc_router_sdio_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT)  +=      ipc_router_mhi_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT)        +=      ipc_router_glink_xprt.o
 obj-$(CONFIG_MSM_SPCOM) += spcom.o
index 4331af8..42d8fd5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2019, The Linux Foundation. All rights reserved.
 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -91,6 +91,13 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
                }
 
                payload1 = data->payload;
+
+               if (data->payload_size < 2 * sizeof(uint32_t)) {
+                       pr_err("%s: payload has invalid size %d\n",
+                               __func__, data->payload_size);
+                       return -EINVAL;
+               }
+
                switch (payload1[0]) {
                case AVCS_CMD_REMOTE_AVTIMER_RELEASE_REQUEST:
                        pr_debug("%s: Cmd = TIMER RELEASE status[0x%x]\n",
@@ -116,6 +123,11 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
        }
 
        case AVCS_CMD_RSP_REMOTE_AVTIMER_VOTE_REQUEST:
+               if (data->payload_size < sizeof(uint32_t)) {
+                       pr_err("%s: payload has invalid size %d\n",
+                               __func__, data->payload_size);
+                       return -EINVAL;
+               }
                payload1 = data->payload;
                pr_debug("%s: RSP_REMOTE_AVTIMER_VOTE_REQUEST handle %x\n",
                        __func__, payload1[0]);
index fbd5797..0234172 100644 (file)
@@ -53,6 +53,12 @@ int physical_channel_send(struct physical_channel *pchan,
        GIPC_Result result = GIPC_Success;
        uint8_t *msg = NULL;
 
+       if (!dev) {
+               pr_err("no send pchan %s has been de-alloced msg for %zd bytes\n",
+                       pchan->name);
+               return -ENODEV;
+       }
+
        spin_lock_bh(&dev->io_lock);
 
        result = GIPC_PrepareMessage(dev->endpoint, sizebytes+sizeof(*header),
@@ -104,10 +110,15 @@ void physical_channel_rx_dispatch(unsigned long physical_channel)
                (struct physical_channel *)physical_channel;
        struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
        GIPC_Result result = GIPC_Success;
-
        uint32_t events;
        unsigned long flags;
 
+       if (!dev) {
+               pr_err("no recv pchan %s has been de-alloced msg for %zd bytes\n",
+                       pchan->name);
+               return;
+       }
+
        spin_lock_irqsave(&pchan->rxbuf_lock, flags);
        events = kgipc_dequeue_events(dev->endpoint);
        spin_unlock_irqrestore(&pchan->rxbuf_lock, flags);
index 2c8fb14..f479d69 100644 (file)
@@ -263,7 +263,8 @@ int habhyp_commdev_dealloc(void *commdev)
        kgipc_endpoint_free(dev->endpoint);
        kfree(dev->read_data);
        kfree(dev);
-
+       pchan->closed = 1;
+       pchan->hyp_data = NULL;
        if (get_refcnt(pchan->refcount) > 1) {
                pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
                        pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
index 714a4f1..45da4ad 100644 (file)
@@ -1311,7 +1311,7 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
        for (i = 0; i < resp.mem_region_info_len; i++) {
 
                if (resp.mem_region_info[i].size > penv->msa_mem_size ||
-                   resp.mem_region_info[i].region_addr > max_mapped_addr ||
+                   resp.mem_region_info[i].region_addr >= max_mapped_addr ||
                    resp.mem_region_info[i].region_addr < penv->msa_pa ||
                    resp.mem_region_info[i].size +
                    resp.mem_region_info[i].region_addr > max_mapped_addr) {
diff --git a/drivers/soc/qcom/ipc_router_sdio_xprt.c b/drivers/soc/qcom/ipc_router_sdio_xprt.c
new file mode 100644 (file)
index 0000000..c30e375
--- /dev/null
@@ -0,0 +1,790 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER SDIO XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <soc/qcom/subsystem_restart.h>
+
+static int msm_ipc_router_sdio_xprt_debug_mask = 1;
+module_param_named(debug_mask, msm_ipc_router_sdio_xprt_debug_mask,
+                  int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (msm_ipc_router_sdio_xprt_debug_mask) \
+       pr_err(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define NUM_SDIO_XPRTS 1
+#define XPRT_NAME_LEN 32
+
+/**
+ * msm_ipc_router_sdio_xprt - IPC Router's SDIO XPRT structure
+ * @list: IPC router's SDIO XPRTs list.
+ * @ch_name: Name of the SDIO endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain SDIO XPRT specific info.
+ * @pdev: Platform device registered by IPC Bridge function driver.
+ * @sdio_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @read_work: Read Work to perform read operation from SDIO's ipc_bridge.
+ * @in_pkt: Pointer to any partially read packet.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ */
+struct msm_ipc_router_sdio_xprt {
+       struct list_head list;
+       char ch_name[XPRT_NAME_LEN];
+       char xprt_name[XPRT_NAME_LEN];
+       struct platform_driver driver;
+       struct msm_ipc_router_xprt xprt;
+       struct platform_device *pdev;
+       struct workqueue_struct *sdio_xprt_wq;
+       struct delayed_work read_work;
+       struct rr_packet *in_pkt;
+       struct mutex ss_reset_lock;
+       int ss_reset;
+       struct completion sft_close_complete;
+       unsigned xprt_version;
+       unsigned xprt_option;
+};
+
+struct ipc_bridge_platform_data {
+       unsigned int max_read_size;
+       unsigned int max_write_size;
+       int (*open)(int id, void *ops);
+       int (*read)(int id, char *buf, size_t count);
+       int (*write)(int id, char *buf, size_t count);
+       int (*close)(int id);
+};
+
+struct msm_ipc_router_sdio_xprt_work {
+       struct msm_ipc_router_xprt *xprt;
+       struct work_struct work;
+};
+
+static void sdio_xprt_read_data(struct work_struct *work);
+
+/**
+ * msm_ipc_router_sdio_xprt_config - Config. Info. of each SDIO XPRT
+ * @ch_name: Name of the SDIO endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @sdio_pdev_id: ID to differentiate among multiple ipc_bridge endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct msm_ipc_router_sdio_xprt_config {
+       char ch_name[XPRT_NAME_LEN];
+       char xprt_name[XPRT_NAME_LEN];
+       int sdio_pdev_id;
+       uint32_t link_id;
+       unsigned xprt_version;
+};
+
+struct msm_ipc_router_sdio_xprt_config sdio_xprt_cfg[] = {
+       {"ipc_bridge_sdio", "ipc_rtr_ipc_bridge_sdio", 1, 1, 3},
+};
+
+#define MODULE_NAME "ipc_router_sdio_xprt"
+#define IPC_ROUTER_SDIO_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_sdio_xprt_probe_done;
+static struct delayed_work ipc_router_sdio_xprt_probe_work;
+static DEFINE_MUTEX(sdio_remote_xprt_list_lock_lha1);
+static LIST_HEAD(sdio_remote_xprt_list);
+
+/**
+ * find_sdio_xprt_list() - Find xprt item specific to an SDIO endpoint
+ * @name: Name of the platform device to find in list
+ *
+ * @return: pointer to msm_ipc_router_sdio_xprt if matching endpoint is found,
+ *             else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_sdio_xprt *
+               find_sdio_xprt_list(const char *name)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       mutex_lock(&sdio_remote_xprt_list_lock_lha1);
+       list_for_each_entry(sdio_xprtp, &sdio_remote_xprt_list, list) {
+               if (!strcmp(name, sdio_xprtp->ch_name)) {
+                       mutex_unlock(&sdio_remote_xprt_list_lock_lha1);
+                       return sdio_xprtp;
+               }
+       }
+       mutex_unlock(&sdio_remote_xprt_list_lock_lha1);
+       return NULL;
+}
+
+/**
+ * ipc_router_sdio_set_xprt_version() - Set IPC Router header version
+ *                                          in the transport
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void ipc_router_sdio_set_xprt_version(
+       struct msm_ipc_router_xprt *xprt, unsigned version)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       if (!xprt)
+               return;
+       sdio_xprtp = container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+       sdio_xprtp->xprt_version = version;
+}
+
+/**
+ * msm_ipc_router_sdio_get_xprt_version() - Get IPC Router header version
+ *                                          supported by the XPRT
+ * @xprt: XPRT for which the version information is required.
+ *
+ * @return: IPC Router header version supported by the XPRT.
+ */
+static int msm_ipc_router_sdio_get_xprt_version(
+       struct msm_ipc_router_xprt *xprt)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       if (!xprt)
+               return -EINVAL;
+       sdio_xprtp = container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+
+       return (int)sdio_xprtp->xprt_version;
+}
+
+/**
+ * msm_ipc_router_sdio_get_xprt_option() - Get XPRT options
+ * @xprt: XPRT for which the option information is required.
+ *
+ * @return: Options supported by the XPRT.
+ */
+static int msm_ipc_router_sdio_get_xprt_option(
+       struct msm_ipc_router_xprt *xprt)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       if (!xprt)
+               return -EINVAL;
+       sdio_xprtp = container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+
+       return (int)sdio_xprtp->xprt_option;
+}
+
+/**
+ * msm_ipc_router_sdio_remote_write_avail() - Get available write space
+ * @xprt: XPRT for which the available write space info. is required.
+ *
+ * @return: Write space in bytes on success, 0 on SSR.
+ */
+static int msm_ipc_router_sdio_remote_write_avail(
+       struct msm_ipc_router_xprt *xprt)
+{
+       struct ipc_bridge_platform_data *pdata;
+       int write_avail;
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp =
+               container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+
+       mutex_lock(&sdio_xprtp->ss_reset_lock);
+       if (sdio_xprtp->ss_reset || !sdio_xprtp->pdev) {
+               write_avail = 0;
+       } else {
+               pdata = sdio_xprtp->pdev->dev.platform_data;
+               write_avail = pdata->max_write_size;
+       }
+       mutex_unlock(&sdio_xprtp->ss_reset_lock);
+       return write_avail;
+}
+
+/**
+ * msm_ipc_router_sdio_remote_write() - Write to XPRT
+ * @data: Data to be written to the XPRT.
+ * @len: Length of the data to be written.
+ * @xprt: XPRT to which the data has to be written.
+ *
+ * @return: Data Length on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_sdio_remote_write(void *data,
+               uint32_t len, struct msm_ipc_router_xprt *xprt)
+{
+       struct rr_packet *pkt = (struct rr_packet *)data;
+       struct sk_buff *skb;
+       struct ipc_bridge_platform_data *pdata;
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+       int ret;
+       uint32_t bytes_written = 0;
+       uint32_t bytes_to_write;
+       unsigned char *tx_data;
+
+       if (!pkt || pkt->length != len || !xprt) {
+               IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+               return -EINVAL;
+       }
+
+       sdio_xprtp = container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+
+       mutex_lock(&sdio_xprtp->ss_reset_lock);
+       if (sdio_xprtp->ss_reset) {
+               IPC_RTR_ERR("%s: Trying to write on a reset link\n", __func__);
+               mutex_unlock(&sdio_xprtp->ss_reset_lock);
+               return -ENETRESET;
+       }
+
+       if (!sdio_xprtp->pdev) {
+               IPC_RTR_ERR("%s: Trying to write on a closed link\n", __func__);
+               mutex_unlock(&sdio_xprtp->ss_reset_lock);
+               return -ENODEV;
+       }
+
+       pdata = sdio_xprtp->pdev->dev.platform_data;
+       if (!pdata || !pdata->write) {
+               IPC_RTR_ERR("%s on a uninitialized link\n", __func__);
+               mutex_unlock(&sdio_xprtp->ss_reset_lock);
+               return -EFAULT;
+       }
+
+       skb = skb_peek(pkt->pkt_fragment_q);
+       if (!skb) {
+               IPC_RTR_ERR("%s SKB is NULL\n", __func__);
+               mutex_unlock(&sdio_xprtp->ss_reset_lock);
+               return -EINVAL;
+       }
+       D("%s: About to write %d bytes\n", __func__, len);
+
+       while (bytes_written < len) {
+               bytes_to_write = min_t(uint32_t, (skb->len - bytes_written),
+                                      pdata->max_write_size);
+               tx_data = skb->data + bytes_written;
+               ret = pdata->write(sdio_xprtp->pdev->id, tx_data,
+                                                               bytes_to_write);
+               if (ret < 0) {
+                       IPC_RTR_ERR("%s: Error writing data %d\n",
+                                   __func__, ret);
+                       break;
+               }
+               if (ret != bytes_to_write)
+                       IPC_RTR_ERR("%s: Partial write %d < %d, retrying...\n",
+                                   __func__, ret, bytes_to_write);
+               bytes_written += bytes_to_write;
+       }
+       if (bytes_written == len) {
+               ret = bytes_written;
+       } else if (ret > 0 && bytes_written != len) {
+               IPC_RTR_ERR("%s: Fault writing data %d != %d\n",
+                           __func__, bytes_written, len);
+               ret = -EFAULT;
+       }
+       D("%s: Finished writing %d bytes\n", __func__, len);
+       mutex_unlock(&sdio_xprtp->ss_reset_lock);
+       return ret;
+}
+
+/**
+ * msm_ipc_router_sdio_remote_close() - Close the XPRT
+ * @xprt: XPRT which needs to be closed.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_sdio_remote_close(
+       struct msm_ipc_router_xprt *xprt)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+       struct ipc_bridge_platform_data *pdata;
+
+       if (!xprt)
+               return -EINVAL;
+       sdio_xprtp = container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+
+       mutex_lock(&sdio_xprtp->ss_reset_lock);
+       sdio_xprtp->ss_reset = 1;
+       mutex_unlock(&sdio_xprtp->ss_reset_lock);
+       flush_workqueue(sdio_xprtp->sdio_xprt_wq);
+       destroy_workqueue(sdio_xprtp->sdio_xprt_wq);
+       pdata = sdio_xprtp->pdev->dev.platform_data;
+       if (pdata && pdata->close)
+               pdata->close(sdio_xprtp->pdev->id);
+       sdio_xprtp->pdev = NULL;
+       return 0;
+}
+
+/**
+ * sdio_xprt_read_data() - Read work to read from the XPRT
+ * @work: Read work to be executed.
+ *
+ * This function is a read work item queued on a XPRT specific workqueue.
+ * The work parameter contains information regarding the XPRT on which this
+ * read work has to be performed. The work item keeps reading from the SDIO
+ * endpoint, until the endpoint returns an error.
+ */
+static void sdio_xprt_read_data(struct work_struct *work)
+{
+       int bytes_to_read;
+       int bytes_read;
+       int skb_size;
+       struct sk_buff *skb = NULL;
+       struct ipc_bridge_platform_data *pdata;
+       struct delayed_work *rwork = to_delayed_work(work);
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp =
+               container_of(rwork, struct msm_ipc_router_sdio_xprt, read_work);
+
+       while (1) {
+               mutex_lock(&sdio_xprtp->ss_reset_lock);
+               if (sdio_xprtp->ss_reset) {
+                       mutex_unlock(&sdio_xprtp->ss_reset_lock);
+                       break;
+               }
+               pdata = sdio_xprtp->pdev->dev.platform_data;
+               mutex_unlock(&sdio_xprtp->ss_reset_lock);
+               while (!sdio_xprtp->in_pkt) {
+                       sdio_xprtp->in_pkt = create_pkt(NULL);
+                       if (sdio_xprtp->in_pkt)
+                               break;
+                       IPC_RTR_ERR("%s: packet allocation failure\n",
+                                                               __func__);
+                       msleep(100);
+               }
+               D("%s: Allocated rr_packet\n", __func__);
+
+               bytes_to_read = 0;
+               skb_size = pdata->max_read_size;
+               do {
+                       do {
+                               skb = alloc_skb(skb_size, GFP_KERNEL);
+                               if (skb)
+                                       break;
+                               IPC_RTR_ERR("%s: Couldn't alloc SKB\n",
+                                           __func__);
+                               msleep(100);
+                       } while (!skb);
+                       bytes_read = pdata->read(sdio_xprtp->pdev->id,
+                                       skb->data, pdata->max_read_size);
+                       if (bytes_read < 0) {
+                               IPC_RTR_ERR("%s: Error %d @ read operation\n",
+                                           __func__, bytes_read);
+                               kfree_skb(skb);
+                               goto out_read_data;
+                       }
+                       if (!bytes_to_read) {
+                               bytes_to_read = ipc_router_peek_pkt_size(
+                                               skb->data);
+                               if (bytes_to_read < 0) {
+                                       IPC_RTR_ERR("%s: Invalid size %d\n",
+                                               __func__, bytes_to_read);
+                                       kfree_skb(skb);
+                                       goto out_read_data;
+                               }
+                       }
+                       bytes_to_read -= bytes_read;
+                       skb_put(skb, bytes_read);
+                       skb_queue_tail(sdio_xprtp->in_pkt->pkt_fragment_q, skb);
+                       sdio_xprtp->in_pkt->length += bytes_read;
+                       skb_size = min_t(uint32_t, pdata->max_read_size,
+                                        (uint32_t)bytes_to_read);
+               } while (bytes_to_read > 0);
+
+               D("%s: Packet size read %d\n",
+                 __func__, sdio_xprtp->in_pkt->length);
+               msm_ipc_router_xprt_notify(&sdio_xprtp->xprt,
+                       IPC_ROUTER_XPRT_EVENT_DATA, (void *)sdio_xprtp->in_pkt);
+               release_pkt(sdio_xprtp->in_pkt);
+               sdio_xprtp->in_pkt = NULL;
+       }
+out_read_data:
+       release_pkt(sdio_xprtp->in_pkt);
+       sdio_xprtp->in_pkt = NULL;
+}
+
+/**
+ * sdio_xprt_sft_close_done() - Completion of XPRT reset
+ * @xprt: XPRT on which the reset operation is complete.
+ *
+ * This function is used by IPC Router to signal this SDIO XPRT Abstraction
+ * Layer(XAL) that the reset of XPRT is completely handled by IPC Router.
+ */
+static void sdio_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp =
+               container_of(xprt, struct msm_ipc_router_sdio_xprt, xprt);
+
+       complete_all(&sdio_xprtp->sft_close_complete);
+}
+
+/**
+ * msm_ipc_router_sdio_remote_remove() - Remove an SDIO endpoint
+ * @pdev: Platform device corresponding to SDIO endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver unregisters
+ * a platform device, mapped to an SDIO endpoint, during SSR.
+ */
+static int msm_ipc_router_sdio_remote_remove(struct platform_device *pdev)
+{
+       struct ipc_bridge_platform_data *pdata;
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       sdio_xprtp = find_sdio_xprt_list(pdev->name);
+       if (!sdio_xprtp) {
+               IPC_RTR_ERR("%s No device with name %s\n",
+                                       __func__, pdev->name);
+               return -ENODEV;
+       }
+
+       mutex_lock(&sdio_xprtp->ss_reset_lock);
+       sdio_xprtp->ss_reset = 1;
+       mutex_unlock(&sdio_xprtp->ss_reset_lock);
+       flush_workqueue(sdio_xprtp->sdio_xprt_wq);
+       destroy_workqueue(sdio_xprtp->sdio_xprt_wq);
+       init_completion(&sdio_xprtp->sft_close_complete);
+       msm_ipc_router_xprt_notify(&sdio_xprtp->xprt,
+                                  IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+       D("%s: Notified IPC Router of %s CLOSE\n", __func__,
+                                                       sdio_xprtp->xprt.name);
+       wait_for_completion(&sdio_xprtp->sft_close_complete);
+       sdio_xprtp->pdev = NULL;
+       pdata = pdev->dev.platform_data;
+       if (pdata && pdata->close)
+               pdata->close(pdev->id);
+       return 0;
+}
+
+/**
+ * msm_ipc_router_sdio_remote_probe() - Probe an SDIO endpoint
+ * @pdev: Platform device corresponding to SDIO endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver registers
+ * a platform device, mapped to an SDIO endpoint.
+ */
+static int msm_ipc_router_sdio_remote_probe(struct platform_device *pdev)
+{
+       int rc;
+       struct ipc_bridge_platform_data *pdata;
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata || !pdata->open || !pdata->read ||
+           !pdata->write || !pdata->close) {
+               IPC_RTR_ERR("%s: pdata or pdata->operations is NULL\n",
+                                                               __func__);
+               return -EINVAL;
+       }
+
+       sdio_xprtp = find_sdio_xprt_list(pdev->name);
+       if (!sdio_xprtp) {
+               IPC_RTR_ERR("%s No device with name %s\n",
+                                               __func__, pdev->name);
+               return -ENODEV;
+       }
+
+       sdio_xprtp->sdio_xprt_wq =
+               create_singlethread_workqueue(pdev->name);
+       if (!sdio_xprtp->sdio_xprt_wq) {
+               IPC_RTR_ERR("%s: WQ creation failed for %s\n",
+                       __func__, pdev->name);
+               return -EFAULT;
+       }
+
+       rc = pdata->open(pdev->id, NULL);
+       if (rc < 0) {
+               IPC_RTR_ERR("%s: Channel open failed for %s.%d\n",
+                       __func__, pdev->name, pdev->id);
+               destroy_workqueue(sdio_xprtp->sdio_xprt_wq);
+               return rc;
+       }
+       sdio_xprtp->pdev = pdev;
+       mutex_lock(&sdio_xprtp->ss_reset_lock);
+       sdio_xprtp->ss_reset = 0;
+       mutex_unlock(&sdio_xprtp->ss_reset_lock);
+       msm_ipc_router_xprt_notify(&sdio_xprtp->xprt,
+                                  IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+       D("%s: Notified IPC Router of %s OPEN\n",
+         __func__, sdio_xprtp->xprt.name);
+       queue_delayed_work(sdio_xprtp->sdio_xprt_wq,
+                          &sdio_xprtp->read_work, 0);
+       return 0;
+}
+
+/**
+ * msm_ipc_router_sdio_driver_register() - register SDIO XPRT drivers
+ *
+ * @sdio_xprtp: pointer to IPC router sdio xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_sdio_driver_register(
+                       struct msm_ipc_router_sdio_xprt *sdio_xprtp)
+{
+       int ret;
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp_item;
+
+       sdio_xprtp_item = find_sdio_xprt_list(sdio_xprtp->ch_name);
+
+       mutex_lock(&sdio_remote_xprt_list_lock_lha1);
+       list_add(&sdio_xprtp->list, &sdio_remote_xprt_list);
+       mutex_unlock(&sdio_remote_xprt_list_lock_lha1);
+
+       if (!sdio_xprtp_item) {
+               sdio_xprtp->driver.driver.name = sdio_xprtp->ch_name;
+               sdio_xprtp->driver.driver.owner = THIS_MODULE;
+               sdio_xprtp->driver.probe = msm_ipc_router_sdio_remote_probe;
+               sdio_xprtp->driver.remove = msm_ipc_router_sdio_remote_remove;
+
+               ret = platform_driver_register(&sdio_xprtp->driver);
+               if (ret) {
+                       IPC_RTR_ERR(
+                       "%s: Failed to register platform driver[%s]\n",
+                                       __func__, sdio_xprtp->ch_name);
+                       return ret;
+               }
+       } else {
+               IPC_RTR_ERR("%s Already driver registered %s\n",
+                                       __func__, sdio_xprtp->ch_name);
+       }
+
+       return 0;
+}
+
+/**
+ * msm_ipc_router_sdio_config_init() - init SDIO xprt configs
+ *
+ * @sdio_xprt_config: pointer to SDIO xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the SDIO XPRT pointer with
+ * the SDIO XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_sdio_config_init(
+               struct msm_ipc_router_sdio_xprt_config *sdio_xprt_config)
+{
+       struct msm_ipc_router_sdio_xprt *sdio_xprtp;
+
+       sdio_xprtp = kzalloc(sizeof(struct msm_ipc_router_sdio_xprt),
+                                                       GFP_KERNEL);
+       if (IS_ERR_OR_NULL(sdio_xprtp)) {
+               IPC_RTR_ERR("%s: kzalloc() failed for sdio_xprtp id:%s\n",
+                               __func__, sdio_xprt_config->ch_name);
+               return -ENOMEM;
+       }
+
+       sdio_xprtp->xprt.link_id = sdio_xprt_config->link_id;
+       sdio_xprtp->xprt_version = sdio_xprt_config->xprt_version;
+
+       strlcpy(sdio_xprtp->ch_name, sdio_xprt_config->ch_name,
+                                       XPRT_NAME_LEN);
+
+       strlcpy(sdio_xprtp->xprt_name, sdio_xprt_config->xprt_name,
+                                               XPRT_NAME_LEN);
+       sdio_xprtp->xprt.name = sdio_xprtp->xprt_name;
+
+       sdio_xprtp->xprt.set_version =
+               ipc_router_sdio_set_xprt_version;
+       sdio_xprtp->xprt.get_version =
+               msm_ipc_router_sdio_get_xprt_version;
+       sdio_xprtp->xprt.get_option =
+                msm_ipc_router_sdio_get_xprt_option;
+       sdio_xprtp->xprt.read_avail = NULL;
+       sdio_xprtp->xprt.read = NULL;
+       sdio_xprtp->xprt.write_avail =
+               msm_ipc_router_sdio_remote_write_avail;
+       sdio_xprtp->xprt.write = msm_ipc_router_sdio_remote_write;
+       sdio_xprtp->xprt.close = msm_ipc_router_sdio_remote_close;
+       sdio_xprtp->xprt.sft_close_done = sdio_xprt_sft_close_done;
+       sdio_xprtp->xprt.priv = NULL;
+
+       sdio_xprtp->in_pkt = NULL;
+       INIT_DELAYED_WORK(&sdio_xprtp->read_work, sdio_xprt_read_data);
+       mutex_init(&sdio_xprtp->ss_reset_lock);
+       sdio_xprtp->ss_reset = 0;
+       sdio_xprtp->xprt_option = 0;
+
+       msm_ipc_router_sdio_driver_register(sdio_xprtp);
+       return 0;
+
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @sdio_xprt_config: pointer to SDIO XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+               struct msm_ipc_router_sdio_xprt_config *sdio_xprt_config)
+{
+       int ret;
+       int link_id;
+       int version;
+       char *key;
+       const char *ch_name;
+       const char *remote_ss;
+
+       key = "qcom,ch-name";
+       ch_name = of_get_property(node, key, NULL);
+       if (!ch_name)
+               goto error;
+       strlcpy(sdio_xprt_config->ch_name, ch_name, XPRT_NAME_LEN);
+
+       key = "qcom,xprt-remote";
+       remote_ss = of_get_property(node, key, NULL);
+       if (!remote_ss)
+               goto error;
+
+       key = "qcom,xprt-linkid";
+       ret = of_property_read_u32(node, key, &link_id);
+       if (ret)
+               goto error;
+       sdio_xprt_config->link_id = link_id;
+
+       key = "qcom,xprt-version";
+       ret = of_property_read_u32(node, key, &version);
+       if (ret)
+               goto error;
+       sdio_xprt_config->xprt_version = version;
+
+       scnprintf(sdio_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+                       remote_ss, sdio_xprt_config->ch_name);
+
+       return 0;
+
+error:
+       IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+       return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_sdio_xprt_probe() - Probe an SDIO xprt
+ * @pdev: Platform device corresponding to SDIO xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an SDIO transport.
+ */
+static int msm_ipc_router_sdio_xprt_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct msm_ipc_router_sdio_xprt_config sdio_xprt_config;
+
+       if (pdev && pdev->dev.of_node) {
+               mutex_lock(&sdio_remote_xprt_list_lock_lha1);
+               ipc_router_sdio_xprt_probe_done = 1;
+               mutex_unlock(&sdio_remote_xprt_list_lock_lha1);
+
+               ret = parse_devicetree(pdev->dev.of_node,
+                                               &sdio_xprt_config);
+               if (ret) {
+                       IPC_RTR_ERR("%s: Failed to parse device tree\n",
+                                                               __func__);
+                       return ret;
+               }
+
+               ret = msm_ipc_router_sdio_config_init(
+                                               &sdio_xprt_config);
+               if (ret) {
+                       IPC_RTR_ERR("%s init failed\n", __func__);
+                       return ret;
+               }
+       }
+       return ret;
+}
+
+/**
+ * ipc_router_sdio_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_sdio_xprt_probe_worker(struct work_struct *work)
+{
+       int i, ret;
+
+       BUG_ON(ARRAY_SIZE(sdio_xprt_cfg) != NUM_SDIO_XPRTS);
+
+       mutex_lock(&sdio_remote_xprt_list_lock_lha1);
+       if (!ipc_router_sdio_xprt_probe_done) {
+               mutex_unlock(&sdio_remote_xprt_list_lock_lha1);
+               for (i = 0; i < ARRAY_SIZE(sdio_xprt_cfg); i++) {
+                       ret = msm_ipc_router_sdio_config_init(
+                                                       &sdio_xprt_cfg[i]);
+                       if (ret)
+                               D("%s init failed config idx %d\n",
+                                                               __func__, i);
+               }
+               mutex_lock(&sdio_remote_xprt_list_lock_lha1);
+       }
+       mutex_unlock(&sdio_remote_xprt_list_lock_lha1);
+}
+
+static const struct of_device_id msm_ipc_router_sdio_xprt_match_table[] = {
+       { .compatible = "qcom,ipc_router_sdio_xprt" },
+       {},
+};
+
+static struct platform_driver msm_ipc_router_sdio_xprt_driver = {
+       .probe = msm_ipc_router_sdio_xprt_probe,
+       .driver = {
+               .name = MODULE_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = msm_ipc_router_sdio_xprt_match_table,
+        },
+};
+
+static int __init msm_ipc_router_sdio_xprt_init(void)
+{
+       int rc;
+
+       rc = platform_driver_register(&msm_ipc_router_sdio_xprt_driver);
+       if (rc) {
+               IPC_RTR_ERR(
+               "%s: msm_ipc_router_sdio_xprt_driver register failed %d\n",
+                                                               __func__, rc);
+               return rc;
+       }
+
+       INIT_DELAYED_WORK(&ipc_router_sdio_xprt_probe_work,
+                                       ipc_router_sdio_xprt_probe_worker);
+       schedule_delayed_work(&ipc_router_sdio_xprt_probe_work,
+                       msecs_to_jiffies(IPC_ROUTER_SDIO_XPRT_WAIT_TIMEOUT));
+       return 0;
+}
+
+module_init(msm_ipc_router_sdio_xprt_init);
+MODULE_LICENSE("GPL v2");
index 250cc88..7d19932 100644 (file)
@@ -1,7 +1,7 @@
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o apr_dummy.o
+obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o apr_dummy.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o apr_dummy.o
+obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o apr_dummy.o
 obj-$(CONFIG_MSM_QDSP6_APRV2_VM) += apr_vm.o apr_v2.o voice_svc.o
 obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
 obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += msm_audio_ion_vm.o
@@ -11,4 +11,4 @@ obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
 obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += audio_notifier.o
 obj-$(CONFIG_MSM_CDSP_LOADER) += cdsp-loader.o
 obj-$(CONFIG_EXT_ANC) += sdsp-anc.o audio_anc.o audio-anc-dev-mgr.o
-obj-$(CONFIG_MSM_LPASS_RESOURCE_MANAGER) += lpass_resource_mgr.o
\ No newline at end of file
+obj-$(CONFIG_MSM_LPASS_RESOURCE_MANAGER) += lpass_resource_mgr.o
index 4bc1999..8215674 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/sysfs.h>
 #include <linux/device.h>
+#include <linux/of.h>
 #include <linux/slab.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/scm.h>
 #include <linux/qdsp6v2/dsp_debug.h>
 #include <linux/qdsp6v2/audio_notifier.h>
 #include <linux/ipc_logging.h>
+#include <linux/of_device.h>
 
 #define APR_PKT_IPC_LOG_PAGE_CNT 2
 
+static struct device *apr_dev_ptr;
 static struct apr_q6 q6;
 static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX];
 static void *apr_pkt_ctx;
@@ -47,6 +50,7 @@ static wait_queue_head_t modem_wait;
 static bool is_modem_up;
 static bool is_initial_modem_boot;
 static bool is_initial_adsp_boot;
+static bool is_child_devices_loaded;
 /* Subsystem restart: QDSP6 data, functions */
 static struct workqueue_struct *apr_reset_workqueue;
 static void apr_reset_deregister(struct work_struct *work);
@@ -57,6 +61,7 @@ struct apr_reset_work {
 };
 
 static bool apr_cf_debug;
+static struct delayed_work add_chld_dev_work;
 
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *debugfs_apr_debug;
@@ -263,6 +268,11 @@ int apr_set_q6_state(enum apr_subsys_state state)
        if (state < APR_SUBSYS_DOWN || state > APR_SUBSYS_LOADED)
                return -EINVAL;
        atomic_set(&q6.q6_state, state);
+       if (state == APR_SUBSYS_LOADED && !is_child_devices_loaded) {
+               schedule_delayed_work(&add_chld_dev_work,
+                               msecs_to_jiffies(100));
+               is_child_devices_loaded = true;
+       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(apr_set_q6_state);
@@ -279,11 +289,27 @@ static void apr_adsp_down(unsigned long opcode)
        dispatch_event(opcode, APR_DEST_QDSP6);
 }
 
+static void apr_add_child_devices(struct work_struct *work)
+{
+       int ret;
+
+       ret = of_platform_populate(apr_dev_ptr->of_node,
+                       NULL, NULL, apr_dev_ptr);
+       if (ret)
+               dev_err(apr_dev_ptr, "%s: failed to add child nodes, ret=%d\n",
+                       __func__, ret);
+}
+
 static void apr_adsp_up(void)
 {
        if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN, APR_SUBSYS_LOADED) ==
                                                        APR_SUBSYS_DOWN)
                wake_up(&dsp_wait);
+       if (!is_child_devices_loaded) {
+               schedule_delayed_work(&add_chld_dev_work,
+                               msecs_to_jiffies(100));
+               is_child_devices_loaded = true;
+       }
 }
 
 int apr_wait_for_device_up(int dest_id)
@@ -1057,7 +1083,23 @@ static struct notifier_block modem_service_nb = {
        .priority = 0,
 };
 
-static int __init apr_init(void)
+static void apr_cleanup(void)
+{
+       int i, j, k;
+
+       if (apr_reset_workqueue)
+               destroy_workqueue(apr_reset_workqueue);
+       mutex_destroy(&q6.lock);
+       for (i = 0; i < APR_DEST_MAX; i++) {
+               for (j = 0; j < APR_CLIENT_MAX; j++) {
+                       mutex_destroy(&client[i][j].m_lock);
+                       for (k = 0; k < APR_SVC_MAX; k++)
+                               mutex_destroy(&client[i][j].svc[k].m_lock);
+               }
+       }
+}
+
+static int apr_probe(struct platform_device *pdev)
 {
        int i, j, k;
 
@@ -1087,10 +1129,50 @@ static int __init apr_init(void)
        subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
                              &modem_service_nb);
 
+       apr_dev_ptr = &pdev->dev;
+       INIT_DELAYED_WORK(&add_chld_dev_work, apr_add_child_devices);
+       return 0;
+}
+
+static int apr_remove(struct platform_device *pdev)
+{
+       apr_cleanup();
+       return 0;
+}
+
+static const struct of_device_id apr_machine_of_match[]  = {
+       { .compatible = "qcom,msm-audio-apr", },
+       {},
+};
+
+static struct platform_driver apr_driver = {
+       .probe = apr_probe,
+       .remove = apr_remove,
+       .driver = {
+               .name = "audio_apr",
+               .owner = THIS_MODULE,
+               .of_match_table = apr_machine_of_match,
+       }
+};
+
+static int __init apr_init(void)
+{
+       platform_driver_register(&apr_driver);
+       apr_dummy_init();
        return 0;
 }
 device_initcall(apr_init);
 
+static void __exit apr_exit(void)
+{
+       apr_dummy_exit();
+       platform_driver_unregister(&apr_driver);
+}
+__exitcall(apr_exit);
+
+MODULE_DESCRIPTION("APR DRIVER");
+MODULE_DEVICE_TABLE(of, apr_machine_of_match);
+
 static int __init apr_late_init(void)
 {
        int ret = 0;
diff --git a/drivers/soc/qcom/qdsp6v2/apr_dummy.c b/drivers/soc/qcom/qdsp6v2/apr_dummy.c
new file mode 100644 (file)
index 0000000..517a49d
--- /dev/null
@@ -0,0 +1,58 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/qdsp6v2/apr.h>
+
+static int apr_dummy_probe(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static int apr_dummy_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static const struct of_device_id apr_dummy_dt_match[] = {
+       {.compatible = "qcom,msm-audio-apr-dummy"},
+       {}
+};
+
+static struct platform_driver apr_dummy_driver = {
+       .driver = {
+               .name = "apr_dummy",
+               .owner = THIS_MODULE,
+               .of_match_table = apr_dummy_dt_match,
+       },
+       .probe = apr_dummy_probe,
+       .remove = apr_dummy_remove,
+};
+
+int __init apr_dummy_init(void)
+{
+       platform_driver_register(&apr_dummy_driver);
+       return 0;
+}
+
+void apr_dummy_exit(void)
+{
+       platform_driver_unregister(&apr_dummy_driver);
+}
+
+MODULE_DESCRIPTION("APR dummy module driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, apr_dummy_dt_match);
index 1a1368f..25daebd 100644 (file)
@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
        bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
        /* handle all the 3-wire mode */
-       if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+       if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
+           tfr->rx_buf != master->dummy_rx)
                cs |= BCM2835_SPI_CS_REN;
        else
                cs &= ~BCM2835_SPI_CS_REN;
index 7de6f84..ca65559 100644 (file)
@@ -181,19 +181,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
                      BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
 }
 
-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
 {
-       struct spi_master *master = dev_id;
-       struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
-       irqreturn_t ret = IRQ_NONE;
+       u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
 
        /* check if we have data to read */
-       while (bs->rx_len &&
-              (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
-                 BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+       for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+            stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
                bcm2835aux_rd_fifo(bs);
-               ret = IRQ_HANDLED;
-       }
 
        /* check if we have data to write */
        while (bs->tx_len &&
@@ -201,16 +196,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
               (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
                  BCM2835_AUX_SPI_STAT_TX_FULL))) {
                bcm2835aux_wr_fifo(bs);
-               ret = IRQ_HANDLED;
        }
+}
 
-       /* and check if we have reached "done" */
-       while (bs->rx_len &&
-              (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
-                 BCM2835_AUX_SPI_STAT_BUSY))) {
-               bcm2835aux_rd_fifo(bs);
-               ret = IRQ_HANDLED;
-       }
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+       struct spi_master *master = dev_id;
+       struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+       /* IRQ may be shared, so return if our interrupts are disabled */
+       if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+             (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+               return IRQ_NONE;
+
+       /* do common fifo handling */
+       bcm2835aux_spi_transfer_helper(bs);
 
        /* and if rx_len is 0 then wake up completion and disable spi */
        if (!bs->rx_len) {
@@ -218,8 +218,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
                complete(&master->xfer_completion);
        }
 
-       /* and return */
-       return ret;
+       return IRQ_HANDLED;
 }
 
 static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
@@ -265,7 +264,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
 {
        struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
        unsigned long timeout;
-       u32 stat;
 
        /* configure spi */
        bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
@@ -276,24 +274,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
 
        /* loop until finished the transfer */
        while (bs->rx_len) {
-               /* read status */
-               stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
 
-               /* fill in tx fifo with remaining data */
-               if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
-                       bcm2835aux_wr_fifo(bs);
-                       continue;
-               }
-
-               /* read data from fifo for both cases */
-               if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
-                       bcm2835aux_rd_fifo(bs);
-                       continue;
-               }
-               if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
-                       bcm2835aux_rd_fifo(bs);
-                       continue;
-               }
+               /* do common fifo handling */
+               bcm2835aux_spi_transfer_helper(bs);
 
                /* there is still data pending to read check the timeout */
                if (bs->rx_len && time_after(jiffies, timeout)) {
index 6c44458..49e405d 100644 (file)
@@ -101,8 +101,6 @@ config ANDROID_VSOC
 
 source "drivers/staging/android/ion/Kconfig"
 
-source "drivers/staging/android/fiq_debugger/Kconfig"
-
 endif # if ANDROID
 
 endmenu
index 8ef8161..a4e9c01 100644 (file)
@@ -1,7 +1,6 @@
 ccflags-y += -I$(src)                  # needed for trace events
 
 obj-y                                  += ion/
-obj-$(CONFIG_FIQ_DEBUGGER)             += fiq_debugger/
 
 obj-$(CONFIG_ASHMEM)                   += ashmem.o
 obj-$(CONFIG_ANDROID_TIMED_OUTPUT)     += timed_output.o
diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig
deleted file mode 100644 (file)
index 60fc224..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-config FIQ_DEBUGGER
-       bool "FIQ Mode Serial Debugger"
-       default n
-       depends on ARM || ARM64
-       help
-         The FIQ serial debugger can accept commands even when the
-         kernel is unresponsive due to being stuck with interrupts
-         disabled.
-
-config FIQ_DEBUGGER_NO_SLEEP
-       bool "Keep serial debugger active"
-       depends on FIQ_DEBUGGER
-       default n
-       help
-         Enables the serial debugger at boot. Passing
-         fiq_debugger.no_sleep on the kernel commandline will
-         override this config option.
-
-config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
-       bool "Don't disable wakeup IRQ when debugger is active"
-       depends on FIQ_DEBUGGER
-       default n
-       help
-         Don't disable the wakeup irq when enabling the uart clock.  This will
-         cause extra interrupts, but it makes the serial debugger usable with
-         on some MSM radio builds that ignore the uart clock request in power
-         collapse.
-
-config FIQ_DEBUGGER_CONSOLE
-       bool "Console on FIQ Serial Debugger port"
-       depends on FIQ_DEBUGGER
-       default n
-       help
-         Enables a console so that printk messages are displayed on
-         the debugger serial port as the occur.
-
-config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
-       bool "Put the FIQ debugger into console mode by default"
-       depends on FIQ_DEBUGGER_CONSOLE
-       default n
-       help
-         If enabled, this puts the fiq debugger into console mode by default.
-         Otherwise, the fiq debugger will start out in debug mode.
-
-config FIQ_DEBUGGER_UART_OVERLAY
-       bool "Install uart DT overlay"
-       depends on FIQ_DEBUGGER
-       select OF_OVERLAY
-       default n
-       help
-         If enabled, fiq debugger is calling fiq_debugger_uart_overlay()
-         that will apply overlay uart_overlay@0 to disable proper uart.
-
-config FIQ_WATCHDOG
-       bool
-       select FIQ_DEBUGGER
-       select PSTORE_RAM
-       default n
diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile
deleted file mode 100644 (file)
index a7ca487..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-y                  += fiq_debugger.o
-obj-$(CONFIG_ARM)      += fiq_debugger_arm.o
-obj-$(CONFIG_ARM64)    += fiq_debugger_arm64.o
-obj-$(CONFIG_FIQ_WATCHDOG)     += fiq_watchdog.o
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
deleted file mode 100644 (file)
index 08113a3..0000000
+++ /dev/null
@@ -1,1270 +0,0 @@
-/*
- * drivers/staging/android/fiq_debugger.c
- *
- * Serial Debugger Interface accessed through an FIQ interrupt.
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <stdarg.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/console.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/kernel_stat.h>
-#include <linux/kmsg_dump.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/sysrq.h>
-#include <linux/timer.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/wakelock.h>
-
-#ifdef CONFIG_FIQ_GLUE
-#include <asm/fiq_glue.h>
-#endif
-
-#ifdef CONFIG_FIQ_DEBUGGER_UART_OVERLAY
-#include <linux/of.h>
-#endif
-
-#include <linux/uaccess.h>
-
-#include "fiq_debugger.h"
-#include "fiq_debugger_priv.h"
-#include "fiq_debugger_ringbuf.h"
-
-#define DEBUG_MAX 64
-#define MAX_UNHANDLED_FIQ_COUNT 1000000
-
-#define MAX_FIQ_DEBUGGER_PORTS 4
-
-struct fiq_debugger_state {
-#ifdef CONFIG_FIQ_GLUE
-       struct fiq_glue_handler handler;
-#endif
-       struct fiq_debugger_output output;
-
-       int fiq;
-       int uart_irq;
-       int signal_irq;
-       int wakeup_irq;
-       bool wakeup_irq_no_set_wake;
-       struct clk *clk;
-       struct fiq_debugger_pdata *pdata;
-       struct platform_device *pdev;
-
-       char debug_cmd[DEBUG_MAX];
-       int debug_busy;
-       int debug_abort;
-
-       char debug_buf[DEBUG_MAX];
-       int debug_count;
-
-       bool no_sleep;
-       bool debug_enable;
-       bool ignore_next_wakeup_irq;
-       struct timer_list sleep_timer;
-       spinlock_t sleep_timer_lock;
-       bool uart_enabled;
-       struct wake_lock debugger_wake_lock;
-       bool console_enable;
-       int current_cpu;
-       atomic_t unhandled_fiq_count;
-       bool in_fiq;
-
-       struct work_struct work;
-       spinlock_t work_lock;
-       char work_cmd[DEBUG_MAX];
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
-       spinlock_t console_lock;
-       struct console console;
-       struct tty_port tty_port;
-       struct fiq_debugger_ringbuf *tty_rbuf;
-       bool syslog_dumping;
-#endif
-
-       unsigned int last_irqs[NR_IRQS];
-       unsigned int last_local_timer_irqs[NR_CPUS];
-};
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
-struct tty_driver *fiq_tty_driver;
-#endif
-
-#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
-static bool initial_no_sleep = true;
-#else
-static bool initial_no_sleep;
-#endif
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
-static bool initial_debug_enable = true;
-static bool initial_console_enable = true;
-#else
-static bool initial_debug_enable;
-static bool initial_console_enable;
-#endif
-
-static bool fiq_kgdb_enable;
-static bool fiq_debugger_disable;
-
-module_param_named(no_sleep, initial_no_sleep, bool, 0644);
-module_param_named(debug_enable, initial_debug_enable, bool, 0644);
-module_param_named(console_enable, initial_console_enable, bool, 0644);
-module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
-module_param_named(disable, fiq_debugger_disable, bool, 0644);
-
-#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
-static inline
-void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {}
-static inline
-void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {}
-#else
-static inline
-void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state)
-{
-       if (state->wakeup_irq < 0)
-               return;
-       enable_irq(state->wakeup_irq);
-       if (!state->wakeup_irq_no_set_wake)
-               enable_irq_wake(state->wakeup_irq);
-}
-static inline
-void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state)
-{
-       if (state->wakeup_irq < 0)
-               return;
-       disable_irq_nosync(state->wakeup_irq);
-       if (!state->wakeup_irq_no_set_wake)
-               disable_irq_wake(state->wakeup_irq);
-}
-#endif
-
-static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state)
-{
-       return (state->fiq >= 0);
-}
-
-#ifdef CONFIG_FIQ_GLUE
-static void fiq_debugger_force_irq(struct fiq_debugger_state *state)
-{
-       unsigned int irq = state->signal_irq;
-
-       if (WARN_ON(!fiq_debugger_have_fiq(state)))
-               return;
-       if (state->pdata->force_irq) {
-               state->pdata->force_irq(state->pdev, irq);
-       } else {
-               struct irq_chip *chip = irq_get_chip(irq);
-               if (chip && chip->irq_retrigger)
-                       chip->irq_retrigger(irq_get_irq_data(irq));
-       }
-}
-#endif
-
-static void fiq_debugger_uart_enable(struct fiq_debugger_state *state)
-{
-       if (state->clk)
-               clk_enable(state->clk);
-       if (state->pdata->uart_enable)
-               state->pdata->uart_enable(state->pdev);
-}
-
-static void fiq_debugger_uart_disable(struct fiq_debugger_state *state)
-{
-       if (state->pdata->uart_disable)
-               state->pdata->uart_disable(state->pdev);
-       if (state->clk)
-               clk_disable(state->clk);
-}
-
-static void fiq_debugger_uart_flush(struct fiq_debugger_state *state)
-{
-       if (state->pdata->uart_flush)
-               state->pdata->uart_flush(state->pdev);
-}
-
-static void fiq_debugger_putc(struct fiq_debugger_state *state, char c)
-{
-       state->pdata->uart_putc(state->pdev, c);
-}
-
-static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s)
-{
-       unsigned c;
-       while ((c = *s++)) {
-               if (c == '\n')
-                       fiq_debugger_putc(state, '\r');
-               fiq_debugger_putc(state, c);
-       }
-}
-
-static void fiq_debugger_prompt(struct fiq_debugger_state *state)
-{
-       fiq_debugger_puts(state, "debug> ");
-}
-
-static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state)
-{
-       char buf[512];
-       size_t len;
-       struct kmsg_dumper dumper = { .active = true };
-
-
-       kmsg_dump_rewind_nolock(&dumper);
-       while (kmsg_dump_get_line_nolock(&dumper, true, buf,
-                                        sizeof(buf) - 1, &len)) {
-               buf[len] = 0;
-               fiq_debugger_puts(state, buf);
-       }
-}
-
-static void fiq_debugger_printf(struct fiq_debugger_output *output,
-                              const char *fmt, ...)
-{
-       struct fiq_debugger_state *state;
-       char buf[256];
-       va_list ap;
-
-       state = container_of(output, struct fiq_debugger_state, output);
-       va_start(ap, fmt);
-       vsnprintf(buf, sizeof(buf), fmt, ap);
-       va_end(ap);
-
-       fiq_debugger_puts(state, buf);
-}
-
-/* Safe outside fiq context */
-static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...)
-{
-       struct fiq_debugger_state *state = cookie;
-       char buf[256];
-       va_list ap;
-       unsigned long irq_flags;
-
-       va_start(ap, fmt);
-       vsnprintf(buf, 128, fmt, ap);
-       va_end(ap);
-
-       local_irq_save(irq_flags);
-       fiq_debugger_puts(state, buf);
-       fiq_debugger_uart_flush(state);
-       local_irq_restore(irq_flags);
-       return state->debug_abort;
-}
-
-static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
-{
-       int n;
-       struct irq_desc *desc;
-
-       fiq_debugger_printf(&state->output,
-                       "irqnr       total  since-last   status  name\n");
-       for_each_irq_desc(n, desc) {
-               struct irqaction *act = desc->action;
-               if (!act && !kstat_irqs(n))
-                       continue;
-               fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x  %s\n", n,
-                       kstat_irqs(n),
-                       kstat_irqs(n) - state->last_irqs[n],
-                       desc->status_use_accessors,
-                       (act && act->name) ? act->name : "???");
-               state->last_irqs[n] = kstat_irqs(n);
-       }
-}
-
-static void fiq_debugger_do_ps(struct fiq_debugger_state *state)
-{
-       struct task_struct *g;
-       struct task_struct *p;
-       unsigned task_state;
-       static const char stat_nam[] = "RSDTtZX";
-
-       fiq_debugger_printf(&state->output, "pid   ppid  prio task            pc\n");
-       read_lock(&tasklist_lock);
-       do_each_thread(g, p) {
-               task_state = p->state ? __ffs(p->state) + 1 : 0;
-               fiq_debugger_printf(&state->output,
-                            "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
-               fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm,
-                            task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
-               if (task_state == TASK_RUNNING)
-                       fiq_debugger_printf(&state->output, " running\n");
-               else
-                       fiq_debugger_printf(&state->output, " %08lx\n",
-                                       thread_saved_pc(p));
-       } while_each_thread(g, p);
-       read_unlock(&tasklist_lock);
-}
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
-static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
-{
-       state->syslog_dumping = true;
-}
-
-static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
-{
-       state->syslog_dumping = false;
-}
-#else
-extern int do_syslog(int type, char __user *bug, int count);
-static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
-{
-       do_syslog(5 /* clear */, NULL, 0);
-}
-
-static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
-{
-       fiq_debugger_dump_kernel_log(state);
-}
-#endif
-
-static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq)
-{
-       if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
-               fiq_debugger_printf(&state->output, "sysrq-g blocked\n");
-               return;
-       }
-       fiq_debugger_begin_syslog_dump(state);
-       handle_sysrq(rq);
-       fiq_debugger_end_syslog_dump(state);
-}
-
-#ifdef CONFIG_KGDB
-static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state)
-{
-       if (!fiq_kgdb_enable) {
-               fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n");
-               return;
-       }
-
-       fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n");
-       state->console_enable = true;
-       handle_sysrq('g');
-}
-#endif
-
-static void fiq_debugger_schedule_work(struct fiq_debugger_state *state,
-               char *cmd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&state->work_lock, flags);
-       if (state->work_cmd[0] != '\0') {
-               fiq_debugger_printf(&state->output, "work command processor busy\n");
-               spin_unlock_irqrestore(&state->work_lock, flags);
-               return;
-       }
-
-       strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
-       spin_unlock_irqrestore(&state->work_lock, flags);
-
-       schedule_work(&state->work);
-}
-
-static void fiq_debugger_work(struct work_struct *work)
-{
-       struct fiq_debugger_state *state;
-       char work_cmd[DEBUG_MAX];
-       char *cmd;
-       unsigned long flags;
-
-       state = container_of(work, struct fiq_debugger_state, work);
-
-       spin_lock_irqsave(&state->work_lock, flags);
-
-       strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
-       state->work_cmd[0] = '\0';
-
-       spin_unlock_irqrestore(&state->work_lock, flags);
-
-       cmd = work_cmd;
-       if (!strncmp(cmd, "reboot", 6)) {
-               cmd += 6;
-               while (*cmd == ' ')
-                       cmd++;
-               if ((*cmd != '\0') && sysrq_on())
-                       kernel_restart(cmd);
-               else
-                       kernel_restart(NULL);
-       } else {
-               fiq_debugger_printf(&state->output, "unknown work command '%s'\n",
-                               work_cmd);
-       }
-}
-
-/* This function CANNOT be called in FIQ context */
-static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd)
-{
-       if (!strcmp(cmd, "ps"))
-               fiq_debugger_do_ps(state);
-       if (!strcmp(cmd, "sysrq"))
-               fiq_debugger_do_sysrq(state, 'h');
-       if (!strncmp(cmd, "sysrq ", 6))
-               fiq_debugger_do_sysrq(state, cmd[6]);
-#ifdef CONFIG_KGDB
-       if (!strcmp(cmd, "kgdb"))
-               fiq_debugger_do_kgdb(state);
-#endif
-       if (!strncmp(cmd, "reboot", 6))
-               fiq_debugger_schedule_work(state, cmd);
-}
-
-static void fiq_debugger_help(struct fiq_debugger_state *state)
-{
-       fiq_debugger_printf(&state->output,
-                       "FIQ Debugger commands:\n");
-       if (sysrq_on()) {
-               fiq_debugger_printf(&state->output,
-                       " pc            PC status\n"
-                       " regs          Register dump\n"
-                       " allregs       Extended Register dump\n"
-                       " bt            Stack trace\n");
-               fiq_debugger_printf(&state->output,
-                       " reboot [<c>]  Reboot with command <c>\n"
-                       " reset [<c>]   Hard reset with command <c>\n"
-                       " irqs          Interrupt status\n"
-                       " kmsg          Kernel log\n"
-                       " version       Kernel version\n");
-               fiq_debugger_printf(&state->output,
-                       " cpu           Current CPU\n"
-                       " cpu <number>  Switch to CPU<number>\n"
-                       " sysrq         sysrq options\n"
-                       " sysrq <param> Execute sysrq with <param>\n");
-       } else {
-               fiq_debugger_printf(&state->output,
-                       " reboot        Reboot\n"
-                       " reset         Hard reset\n"
-                       " irqs          Interrupt status\n");
-       }
-       fiq_debugger_printf(&state->output,
-                       " sleep         Allow sleep while in FIQ\n"
-                       " nosleep       Disable sleep while in FIQ\n"
-                       " console       Switch terminal to console\n"
-                       " ps            Process list\n");
-#ifdef CONFIG_KGDB
-       if (fiq_kgdb_enable) {
-               fiq_debugger_printf(&state->output,
-                       " kgdb          Enter kernel debugger\n");
-#endif
-}
-
-static void fiq_debugger_take_affinity(void *info)
-{
-       struct fiq_debugger_state *state = info;
-       struct cpumask cpumask;
-
-       cpumask_clear(&cpumask);
-       cpumask_set_cpu(get_cpu(), &cpumask);
-
-       irq_set_affinity(state->uart_irq, &cpumask);
-}
-
-static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu)
-{
-       if (!fiq_debugger_have_fiq(state))
-               smp_call_function_single(cpu, fiq_debugger_take_affinity, state,
-                               false);
-       state->current_cpu = cpu;
-}
-
-static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state,
-                       const char *cmd, const struct pt_regs *regs,
-                       void *svc_sp)
-{
-       bool signal_helper = false;
-
-       if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
-               fiq_debugger_help(state);
-       } else if (!strcmp(cmd, "pc")) {
-               if (sysrq_on())
-                       fiq_debugger_dump_pc(&state->output, regs);
-       } else if (!strcmp(cmd, "regs")) {
-               if (sysrq_on())
-                       fiq_debugger_dump_regs(&state->output, regs);
-       } else if (!strcmp(cmd, "allregs")) {
-               if (sysrq_on())
-                       fiq_debugger_dump_allregs(&state->output, regs);
-       } else if (!strcmp(cmd, "bt")) {
-               if (sysrq_on())
-                       fiq_debugger_dump_stacktrace(&state->output, regs,
-                                                    100, svc_sp);
-       } else if (!strncmp(cmd, "reset", 5)) {
-               cmd += 5;
-               while (*cmd == ' ')
-                       cmd++;
-               if (*cmd && sysrq_on()) {
-                       char tmp_cmd[32];
-                       strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
-                       machine_restart(tmp_cmd);
-               } else {
-                       machine_restart(NULL);
-               }
-       } else if (!strcmp(cmd, "irqs")) {
-               fiq_debugger_dump_irqs(state);
-       } else if (!strcmp(cmd, "kmsg")) {
-               if (sysrq_on())
-                       fiq_debugger_dump_kernel_log(state);
-       } else if (!strcmp(cmd, "version")) {
-               if (sysrq_on())
-                       fiq_debugger_printf(&state->output, "%s\n",
-                                           linux_banner);
-       } else if (!strcmp(cmd, "sleep")) {
-               state->no_sleep = false;
-               fiq_debugger_printf(&state->output, "enabling sleep\n");
-       } else if (!strcmp(cmd, "nosleep")) {
-               state->no_sleep = true;
-               fiq_debugger_printf(&state->output, "disabling sleep\n");
-       } else if (!strcmp(cmd, "console")) {
-               fiq_debugger_printf(&state->output, "console mode\n");
-               fiq_debugger_uart_flush(state);
-               state->console_enable = true;
-       } else if (!strcmp(cmd, "cpu")) {
-               if (sysrq_on())
-                       fiq_debugger_printf(&state->output, "cpu %d\n",
-                                           state->current_cpu);
-       } else if (!strncmp(cmd, "cpu ", 4) && sysrq_on()) {
-               unsigned long cpu = 0;
-               if (kstrtoul(cmd + 4, 10, &cpu) == 0)
-                       fiq_debugger_switch_cpu(state, cpu);
-               else
-                       fiq_debugger_printf(&state->output, "invalid cpu\n");
-               fiq_debugger_printf(&state->output, "cpu %d\n",
-                                   state->current_cpu);
-       } else {
-               if (state->debug_busy) {
-                       fiq_debugger_printf(&state->output,
-                               "command processor busy. trying to abort.\n");
-                       state->debug_abort = -1;
-               } else {
-                       strcpy(state->debug_cmd, cmd);
-                       state->debug_busy = 1;
-               }
-
-               return true;
-       }
-       if (!state->console_enable)
-               fiq_debugger_prompt(state);
-
-       return signal_helper;
-}
-
-static void fiq_debugger_sleep_timer_expired(unsigned long data)
-{
-       struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&state->sleep_timer_lock, flags);
-       if (state->uart_enabled && !state->no_sleep) {
-               if (state->debug_enable && !state->console_enable) {
-                       state->debug_enable = false;
-                       fiq_debugger_printf_nfiq(state,
-                                       "suspending fiq debugger\n");
-               }
-               state->ignore_next_wakeup_irq = true;
-               fiq_debugger_uart_disable(state);
-               state->uart_enabled = false;
-               fiq_debugger_enable_wakeup_irq(state);
-       }
-       wake_unlock(&state->debugger_wake_lock);
-       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
-}
-
-static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&state->sleep_timer_lock, flags);
-       if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
-               state->ignore_next_wakeup_irq = false;
-       } else if (!state->uart_enabled) {
-               wake_lock(&state->debugger_wake_lock);
-               fiq_debugger_uart_enable(state);
-               state->uart_enabled = true;
-               fiq_debugger_disable_wakeup_irq(state);
-               mod_timer(&state->sleep_timer, jiffies + HZ / 2);
-       }
-       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
-}
-
-static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev)
-{
-       struct fiq_debugger_state *state = dev;
-
-       if (!state->no_sleep)
-               fiq_debugger_puts(state, "WAKEUP\n");
-       fiq_debugger_handle_wakeup(state);
-
-       return IRQ_HANDLED;
-}
-
-static
-void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state)
-{
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
-       if (state->tty_port.ops) {
-               int i;
-               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
-               for (i = 0; i < count; i++) {
-                       int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
-                       tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
-                       if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
-                               pr_warn("fiq tty failed to consume byte\n");
-               }
-               tty_flip_buffer_push(&state->tty_port);
-       }
-#endif
-}
-
-static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state)
-{
-       if (!state->no_sleep) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&state->sleep_timer_lock, flags);
-               wake_lock(&state->debugger_wake_lock);
-               mod_timer(&state->sleep_timer, jiffies + HZ * 5);
-               spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
-       }
-       fiq_debugger_handle_console_irq_context(state);
-       if (state->debug_busy) {
-               fiq_debugger_irq_exec(state, state->debug_cmd);
-               if (!state->console_enable)
-                       fiq_debugger_prompt(state);
-               state->debug_busy = 0;
-       }
-}
-
-static int fiq_debugger_getc(struct fiq_debugger_state *state)
-{
-       return state->pdata->uart_getc(state->pdev);
-}
-
-static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state,
-                       int this_cpu, const struct pt_regs *regs, void *svc_sp)
-{
-       int c;
-       static int last_c;
-       int count = 0;
-       bool signal_helper = false;
-
-       if (this_cpu != state->current_cpu) {
-               if (state->in_fiq)
-                       return false;
-
-               if (atomic_inc_return(&state->unhandled_fiq_count) !=
-                                       MAX_UNHANDLED_FIQ_COUNT)
-                       return false;
-
-               fiq_debugger_printf(&state->output,
-                       "fiq_debugger: cpu %d not responding, "
-                       "reverting to cpu %d\n", state->current_cpu,
-                       this_cpu);
-
-               atomic_set(&state->unhandled_fiq_count, 0);
-               fiq_debugger_switch_cpu(state, this_cpu);
-               return false;
-       }
-
-       state->in_fiq = true;
-
-       while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
-               count++;
-               if (!state->debug_enable) {
-                       if ((c == 13) || (c == 10)) {
-                               state->debug_enable = true;
-                               state->debug_count = 0;
-                               fiq_debugger_prompt(state);
-                       }
-               } else if (c == FIQ_DEBUGGER_BREAK) {
-                       state->console_enable = false;
-                       fiq_debugger_puts(state, "fiq debugger mode\n");
-                       state->debug_count = 0;
-                       fiq_debugger_prompt(state);
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
-               } else if (state->console_enable && state->tty_rbuf) {
-                       fiq_debugger_ringbuf_push(state->tty_rbuf, c);
-                       signal_helper = true;
-#endif
-               } else if ((c >= ' ') && (c < 127)) {
-                       if (state->debug_count < (DEBUG_MAX - 1)) {
-                               state->debug_buf[state->debug_count++] = c;
-                               fiq_debugger_putc(state, c);
-                       }
-               } else if ((c == 8) || (c == 127)) {
-                       if (state->debug_count > 0) {
-                               state->debug_count--;
-                               fiq_debugger_putc(state, 8);
-                               fiq_debugger_putc(state, ' ');
-                               fiq_debugger_putc(state, 8);
-                       }
-               } else if ((c == 13) || (c == 10)) {
-                       if (c == '\r' || (c == '\n' && last_c != '\r')) {
-                               fiq_debugger_putc(state, '\r');
-                               fiq_debugger_putc(state, '\n');
-                       }
-                       if (state->debug_count) {
-                               state->debug_buf[state->debug_count] = 0;
-                               state->debug_count = 0;
-                               signal_helper |=
-                                       fiq_debugger_fiq_exec(state,
-                                                       state->debug_buf,
-                                                       regs, svc_sp);
-                       } else {
-                               fiq_debugger_prompt(state);
-                       }
-               }
-               last_c = c;
-       }
-       if (!state->console_enable)
-               fiq_debugger_uart_flush(state);
-       if (state->pdata->fiq_ack)
-               state->pdata->fiq_ack(state->pdev, state->fiq);
-
-       /* poke sleep timer if necessary */
-       if (state->debug_enable && !state->no_sleep)
-               signal_helper = true;
-
-       atomic_set(&state->unhandled_fiq_count, 0);
-       state->in_fiq = false;
-
-       return signal_helper;
-}
-
-#ifdef CONFIG_FIQ_GLUE
-static void fiq_debugger_fiq(struct fiq_glue_handler *h,
-               const struct pt_regs *regs, void *svc_sp)
-{
-       struct fiq_debugger_state *state =
-               container_of(h, struct fiq_debugger_state, handler);
-       unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
-       bool need_irq;
-
-       need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs,
-                       svc_sp);
-       if (need_irq)
-               fiq_debugger_force_irq(state);
-}
-#endif
-
-/*
- * When not using FIQs, we only use this single interrupt as an entry point.
- * This just effectively takes over the UART interrupt and does all the work
- * in this context.
- */
-static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev)
-{
-       struct fiq_debugger_state *state = dev;
-       bool not_done;
-
-       fiq_debugger_handle_wakeup(state);
-
-       /* handle the debugger irq in regular context */
-       not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(),
-                                             get_irq_regs(),
-                                             current_thread_info());
-       if (not_done)
-               fiq_debugger_handle_irq_context(state);
-
-       return IRQ_HANDLED;
-}
-
-/*
- * If FIQs are used, not everything can happen in fiq context.
- * FIQ handler does what it can and then signals this interrupt to finish the
- * job in irq context.
- */
-static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev)
-{
-       struct fiq_debugger_state *state = dev;
-
-       if (state->pdata->force_irq_ack)
-               state->pdata->force_irq_ack(state->pdev, state->signal_irq);
-
-       fiq_debugger_handle_irq_context(state);
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_FIQ_GLUE
-static void fiq_debugger_resume(struct fiq_glue_handler *h)
-{
-       struct fiq_debugger_state *state =
-               container_of(h, struct fiq_debugger_state, handler);
-       if (state->pdata->uart_resume)
-               state->pdata->uart_resume(state->pdev);
-}
-#endif
-
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
-struct tty_driver *fiq_debugger_console_device(struct console *co, int *index)
-{
-       *index = co->index;
-       return fiq_tty_driver;
-}
-
-static void fiq_debugger_console_write(struct console *co,
-                               const char *s, unsigned int count)
-{
-       struct fiq_debugger_state *state;
-       unsigned long flags;
-
-       state = container_of(co, struct fiq_debugger_state, console);
-
-       if (!state->console_enable && !state->syslog_dumping)
-               return;
-
-       fiq_debugger_uart_enable(state);
-       spin_lock_irqsave(&state->console_lock, flags);
-       while (count--) {
-               if (*s == '\n')
-                       fiq_debugger_putc(state, '\r');
-               fiq_debugger_putc(state, *s++);
-       }
-       fiq_debugger_uart_flush(state);
-       spin_unlock_irqrestore(&state->console_lock, flags);
-       fiq_debugger_uart_disable(state);
-}
-
-static struct console fiq_debugger_console = {
-       .name = "ttyFIQ",
-       .device = fiq_debugger_console_device,
-       .write = fiq_debugger_console_write,
-       .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
-};
-
-int fiq_tty_open(struct tty_struct *tty, struct file *filp)
-{
-       int line = tty->index;
-       struct fiq_debugger_state **states = tty->driver->driver_state;
-       struct fiq_debugger_state *state = states[line];
-
-       return tty_port_open(&state->tty_port, tty, filp);
-}
-
-void fiq_tty_close(struct tty_struct *tty, struct file *filp)
-{
-       tty_port_close(tty->port, tty, filp);
-}
-
-int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
-       int i;
-       int line = tty->index;
-       struct fiq_debugger_state **states = tty->driver->driver_state;
-       struct fiq_debugger_state *state = states[line];
-
-       if (!state->console_enable)
-               return count;
-
-       fiq_debugger_uart_enable(state);
-       spin_lock_irq(&state->console_lock);
-       for (i = 0; i < count; i++)
-               fiq_debugger_putc(state, *buf++);
-       spin_unlock_irq(&state->console_lock);
-       fiq_debugger_uart_disable(state);
-
-       return count;
-}
-
-int  fiq_tty_write_room(struct tty_struct *tty)
-{
-       return 16;
-}
-
-#ifdef CONFIG_CONSOLE_POLL
-static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
-{
-       return 0;
-}
-
-static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
-{
-       struct fiq_debugger_state **states = driver->driver_state;
-       struct fiq_debugger_state *state = states[line];
-       int c = NO_POLL_CHAR;
-
-       fiq_debugger_uart_enable(state);
-       if (fiq_debugger_have_fiq(state)) {
-               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
-               if (count > 0) {
-                       c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
-                       fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
-               }
-       } else {
-               c = fiq_debugger_getc(state);
-               if (c == FIQ_DEBUGGER_NO_CHAR)
-                       c = NO_POLL_CHAR;
-       }
-       fiq_debugger_uart_disable(state);
-
-       return c;
-}
-
-static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
-{
-       struct fiq_debugger_state **states = driver->driver_state;
-       struct fiq_debugger_state *state = states[line];
-       fiq_debugger_uart_enable(state);
-       fiq_debugger_putc(state, ch);
-       fiq_debugger_uart_disable(state);
-}
-#endif
-
-static const struct tty_port_operations fiq_tty_port_ops;
-
-static const struct tty_operations fiq_tty_driver_ops = {
-       .write = fiq_tty_write,
-       .write_room = fiq_tty_write_room,
-       .open = fiq_tty_open,
-       .close = fiq_tty_close,
-#ifdef CONFIG_CONSOLE_POLL
-       .poll_init = fiq_tty_poll_init,
-       .poll_get_char = fiq_tty_poll_get_char,
-       .poll_put_char = fiq_tty_poll_put_char,
-#endif
-};
-
-static int fiq_debugger_tty_init(void)
-{
-       int ret;
-       struct fiq_debugger_state **states = NULL;
-
-       states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
-       if (!states) {
-               pr_err("Failed to allocate fiq debugger state structres\n");
-               return -ENOMEM;
-       }
-
-       fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
-       if (!fiq_tty_driver) {
-               pr_err("Failed to allocate fiq debugger tty\n");
-               ret = -ENOMEM;
-               goto err_free_state;
-       }
-
-       fiq_tty_driver->owner           = THIS_MODULE;
-       fiq_tty_driver->driver_name     = "fiq-debugger";
-       fiq_tty_driver->name            = "ttyFIQ";
-       fiq_tty_driver->type            = TTY_DRIVER_TYPE_SERIAL;
-       fiq_tty_driver->subtype         = SERIAL_TYPE_NORMAL;
-       fiq_tty_driver->init_termios    = tty_std_termios;
-       fiq_tty_driver->flags           = TTY_DRIVER_REAL_RAW |
-                                         TTY_DRIVER_DYNAMIC_DEV;
-       fiq_tty_driver->driver_state    = states;
-
-       fiq_tty_driver->init_termios.c_cflag =
-                                       B115200 | CS8 | CREAD | HUPCL | CLOCAL;
-       fiq_tty_driver->init_termios.c_ispeed = 115200;
-       fiq_tty_driver->init_termios.c_ospeed = 115200;
-
-       tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
-
-       ret = tty_register_driver(fiq_tty_driver);
-       if (ret) {
-               pr_err("Failed to register fiq tty: %d\n", ret);
-               goto err_free_tty;
-       }
-
-       pr_info("Registered FIQ tty driver\n");
-       return 0;
-
-err_free_tty:
-       put_tty_driver(fiq_tty_driver);
-       fiq_tty_driver = NULL;
-err_free_state:
-       kfree(states);
-       return ret;
-}
-
-static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
-{
-       int ret;
-       struct device *tty_dev;
-       struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
-
-       states[state->pdev->id] = state;
-
-       state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
-       if (!state->tty_rbuf) {
-               pr_err("Failed to allocate fiq debugger ringbuf\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       tty_port_init(&state->tty_port);
-       state->tty_port.ops = &fiq_tty_port_ops;
-
-       tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
-                                          state->pdev->id, &state->pdev->dev);
-       if (IS_ERR(tty_dev)) {
-               pr_err("Failed to register fiq debugger tty device\n");
-               ret = PTR_ERR(tty_dev);
-               goto err;
-       }
-
-       device_set_wakeup_capable(tty_dev, 1);
-
-       pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
-
-       return 0;
-
-err:
-       fiq_debugger_ringbuf_free(state->tty_rbuf);
-       state->tty_rbuf = NULL;
-       return ret;
-}
-#endif
-
-static int fiq_debugger_dev_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
-
-       if (state->pdata->uart_dev_suspend)
-               return state->pdata->uart_dev_suspend(pdev);
-       return 0;
-}
-
-static int fiq_debugger_dev_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
-
-       if (state->pdata->uart_dev_resume)
-               return state->pdata->uart_dev_resume(pdev);
-       return 0;
-}
-
-static int fiq_debugger_probe(struct platform_device *pdev)
-{
-       int ret;
-       struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
-       struct fiq_debugger_state *state;
-       int fiq;
-       int uart_irq;
-
-       if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
-               return -EINVAL;
-
-       if (!pdata->uart_getc || !pdata->uart_putc)
-               return -EINVAL;
-       if ((pdata->uart_enable && !pdata->uart_disable) ||
-           (!pdata->uart_enable && pdata->uart_disable))
-               return -EINVAL;
-
-       fiq = platform_get_irq_byname(pdev, "fiq");
-       uart_irq = platform_get_irq_byname(pdev, "uart_irq");
-
-       /* uart_irq mode and fiq mode are mutually exclusive, but one of them
-        * is required */
-       if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
-               return -EINVAL;
-       if (fiq >= 0 && !pdata->fiq_enable)
-               return -EINVAL;
-
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       state->output.printf = fiq_debugger_printf;
-       setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired,
-                   (unsigned long)state);
-       state->pdata = pdata;
-       state->pdev = pdev;
-       state->no_sleep = initial_no_sleep;
-       state->debug_enable = initial_debug_enable;
-       state->console_enable = initial_console_enable;
-
-       state->fiq = fiq;
-       state->uart_irq = uart_irq;
-       state->signal_irq = platform_get_irq_byname(pdev, "signal");
-       state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
-
-       INIT_WORK(&state->work, fiq_debugger_work);
-       spin_lock_init(&state->work_lock);
-
-       platform_set_drvdata(pdev, state);
-
-       spin_lock_init(&state->sleep_timer_lock);
-
-       if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state))
-               state->no_sleep = true;
-       state->ignore_next_wakeup_irq = !state->no_sleep;
-
-       wake_lock_init(&state->debugger_wake_lock,
-                       WAKE_LOCK_SUSPEND, "serial-debug");
-
-       state->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(state->clk))
-               state->clk = NULL;
-
-       /* do not call pdata->uart_enable here since uart_init may still
-        * need to do some initialization before uart_enable can work.
-        * So, only try to manage the clock during init.
-        */
-       if (state->clk)
-               clk_enable(state->clk);
-
-       if (pdata->uart_init) {
-               ret = pdata->uart_init(pdev);
-               if (ret)
-                       goto err_uart_init;
-       }
-
-       fiq_debugger_printf_nfiq(state,
-                               "<hit enter %sto activate fiq debugger>\n",
-                               state->no_sleep ? "" : "twice ");
-
-#ifdef CONFIG_FIQ_GLUE
-       if (fiq_debugger_have_fiq(state)) {
-               state->handler.fiq = fiq_debugger_fiq;
-               state->handler.resume = fiq_debugger_resume;
-               ret = fiq_glue_register_handler(&state->handler);
-               if (ret) {
-                       pr_err("%s: could not install fiq handler\n", __func__);
-                       goto err_register_irq;
-               }
-
-               pdata->fiq_enable(pdev, state->fiq, 1);
-       } else
-#endif
-       {
-               ret = request_irq(state->uart_irq, fiq_debugger_uart_irq,
-                                 IRQF_NO_SUSPEND, "debug", state);
-               if (ret) {
-                       pr_err("%s: could not install irq handler\n", __func__);
-                       goto err_register_irq;
-               }
-
-               /* for irq-only mode, we want this irq to wake us up, if it
-                * can.
-                */
-               enable_irq_wake(state->uart_irq);
-       }
-
-       if (state->clk)
-               clk_disable(state->clk);
-
-       if (state->signal_irq >= 0) {
-               ret = request_irq(state->signal_irq, fiq_debugger_signal_irq,
-                         IRQF_TRIGGER_RISING, "debug-signal", state);
-               if (ret)
-                       pr_err("serial_debugger: could not install signal_irq");
-       }
-
-       if (state->wakeup_irq >= 0) {
-               ret = request_irq(state->wakeup_irq,
-                                 fiq_debugger_wakeup_irq_handler,
-                                 IRQF_TRIGGER_FALLING,
-                                 "debug-wakeup", state);
-               if (ret) {
-                       pr_err("serial_debugger: "
-                               "could not install wakeup irq\n");
-                       state->wakeup_irq = -1;
-               } else {
-                       ret = enable_irq_wake(state->wakeup_irq);
-                       if (ret) {
-                               pr_err("serial_debugger: "
-                                       "could not enable wakeup\n");
-                               state->wakeup_irq_no_set_wake = true;
-                       }
-               }
-       }
-       if (state->no_sleep)
-               fiq_debugger_handle_wakeup(state);
-
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
-       spin_lock_init(&state->console_lock);
-       state->console = fiq_debugger_console;
-       state->console.index = pdev->id;
-       if (!console_set_on_cmdline)
-               add_preferred_console(state->console.name,
-                       state->console.index, NULL);
-       register_console(&state->console);
-       fiq_debugger_tty_init_one(state);
-#endif
-       return 0;
-
-err_register_irq:
-       if (pdata->uart_free)
-               pdata->uart_free(pdev);
-err_uart_init:
-       if (state->clk)
-               clk_disable(state->clk);
-       if (state->clk)
-               clk_put(state->clk);
-       wake_lock_destroy(&state->debugger_wake_lock);
-       platform_set_drvdata(pdev, NULL);
-       kfree(state);
-       return ret;
-}
-
-static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
-       .suspend        = fiq_debugger_dev_suspend,
-       .resume         = fiq_debugger_dev_resume,
-};
-
-static struct platform_driver fiq_debugger_driver = {
-       .probe  = fiq_debugger_probe,
-       .driver = {
-               .name   = "fiq_debugger",
-               .pm     = &fiq_debugger_dev_pm_ops,
-       },
-};
-
-#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY)
-int fiq_debugger_uart_overlay(void)
-{
-       struct device_node *onp = of_find_node_by_path("/uart_overlay@0");
-       int ret;
-
-       if (!onp) {
-               pr_err("serial_debugger: uart overlay not found\n");
-               return -ENODEV;
-       }
-
-       ret = of_overlay_create(onp);
-       if (ret < 0) {
-               pr_err("serial_debugger: fail to create overlay: %d\n", ret);
-               of_node_put(onp);
-               return ret;
-       }
-
-       pr_info("serial_debugger: uart overlay applied\n");
-       return 0;
-}
-#endif
-
-static int __init fiq_debugger_init(void)
-{
-       if (fiq_debugger_disable) {
-               pr_err("serial_debugger: disabled\n");
-               return -ENODEV;
-       }
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
-       fiq_debugger_tty_init();
-#endif
-#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY)
-       fiq_debugger_uart_overlay();
-#endif
-       return platform_driver_register(&fiq_debugger_driver);
-}
-
-postcore_initcall(fiq_debugger_init);
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h
deleted file mode 100644 (file)
index c9ec4f8..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * drivers/staging/android/fiq_debugger/fiq_debugger.h
- *
- * Copyright (C) 2010 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
-#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
-
-#include <linux/serial_core.h>
-
-#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
-#define FIQ_DEBUGGER_BREAK 0x00ff0100
-
-#define FIQ_DEBUGGER_FIQ_IRQ_NAME      "fiq"
-#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME   "signal"
-#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME   "wakeup"
-
-/**
- * struct fiq_debugger_pdata - fiq debugger platform data
- * @uart_resume:       used to restore uart state right before enabling
- *                     the fiq.
- * @uart_enable:       Do the work necessary to communicate with the uart
- *                     hw (enable clocks, etc.). This must be ref-counted.
- * @uart_disable:      Do the work necessary to disable the uart hw
- *                     (disable clocks, etc.). This must be ref-counted.
- * @uart_dev_suspend:  called during PM suspend, generally not needed
- *                     for real fiq mode debugger.
- * @uart_dev_resume:   called during PM resume, generally not needed
- *                     for real fiq mode debugger.
- */
-struct fiq_debugger_pdata {
-       int (*uart_init)(struct platform_device *pdev);
-       void (*uart_free)(struct platform_device *pdev);
-       int (*uart_resume)(struct platform_device *pdev);
-       int (*uart_getc)(struct platform_device *pdev);
-       void (*uart_putc)(struct platform_device *pdev, unsigned int c);
-       void (*uart_flush)(struct platform_device *pdev);
-       void (*uart_enable)(struct platform_device *pdev);
-       void (*uart_disable)(struct platform_device *pdev);
-
-       int (*uart_dev_suspend)(struct platform_device *pdev);
-       int (*uart_dev_resume)(struct platform_device *pdev);
-
-       void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
-                                                               bool enable);
-       void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
-
-       void (*force_irq)(struct platform_device *pdev, unsigned int irq);
-       void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
-};
-
-#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
deleted file mode 100644 (file)
index 8b3e013..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2014 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/ptrace.h>
-#include <linux/uaccess.h>
-
-#include <asm/stacktrace.h>
-
-#include "fiq_debugger_priv.h"
-
-static char *mode_name(unsigned cpsr)
-{
-       switch (cpsr & MODE_MASK) {
-       case USR_MODE: return "USR";
-       case FIQ_MODE: return "FIQ";
-       case IRQ_MODE: return "IRQ";
-       case SVC_MODE: return "SVC";
-       case ABT_MODE: return "ABT";
-       case UND_MODE: return "UND";
-       case SYSTEM_MODE: return "SYS";
-       default: return "???";
-       }
-}
-
-void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       output->printf(output, " pc %08x cpsr %08x mode %s\n",
-               regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
-}
-
-void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       output->printf(output,
-                       " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
-                       regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
-       output->printf(output,
-                       " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
-                       regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
-       output->printf(output,
-                       " r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
-                       regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
-                       mode_name(regs->ARM_cpsr));
-       output->printf(output,
-                       " ip %08x  sp %08x  lr %08x  pc %08x cpsr %08x\n",
-                       regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
-                       regs->ARM_cpsr);
-}
-
-struct mode_regs {
-       unsigned long sp_svc;
-       unsigned long lr_svc;
-       unsigned long spsr_svc;
-
-       unsigned long sp_abt;
-       unsigned long lr_abt;
-       unsigned long spsr_abt;
-
-       unsigned long sp_und;
-       unsigned long lr_und;
-       unsigned long spsr_und;
-
-       unsigned long sp_irq;
-       unsigned long lr_irq;
-       unsigned long spsr_irq;
-
-       unsigned long r8_fiq;
-       unsigned long r9_fiq;
-       unsigned long r10_fiq;
-       unsigned long r11_fiq;
-       unsigned long r12_fiq;
-       unsigned long sp_fiq;
-       unsigned long lr_fiq;
-       unsigned long spsr_fiq;
-};
-
-static void __naked get_mode_regs(struct mode_regs *regs)
-{
-       asm volatile (
-       "mrs    r1, cpsr\n"
-       "msr    cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
-       "stmia  r0!, {r13 - r14}\n"
-       "mrs    r2, spsr\n"
-       "msr    cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
-       "stmia  r0!, {r2, r13 - r14}\n"
-       "mrs    r2, spsr\n"
-       "msr    cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
-       "stmia  r0!, {r2, r13 - r14}\n"
-       "mrs    r2, spsr\n"
-       "msr    cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
-       "stmia  r0!, {r2, r13 - r14}\n"
-       "mrs    r2, spsr\n"
-       "msr    cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
-       "stmia  r0!, {r2, r8 - r14}\n"
-       "mrs    r2, spsr\n"
-       "stmia  r0!, {r2}\n"
-       "msr    cpsr_c, r1\n"
-       "bx     lr\n");
-}
-
-
-void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       struct mode_regs mode_regs;
-       unsigned long mode = regs->ARM_cpsr & MODE_MASK;
-
-       fiq_debugger_dump_regs(output, regs);
-       get_mode_regs(&mode_regs);
-
-       output->printf(output,
-                       "%csvc: sp %08x  lr %08x  spsr %08x\n",
-                       mode == SVC_MODE ? '*' : ' ',
-                       mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
-       output->printf(output,
-                       "%cabt: sp %08x  lr %08x  spsr %08x\n",
-                       mode == ABT_MODE ? '*' : ' ',
-                       mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
-       output->printf(output,
-                       "%cund: sp %08x  lr %08x  spsr %08x\n",
-                       mode == UND_MODE ? '*' : ' ',
-                       mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
-       output->printf(output,
-                       "%cirq: sp %08x  lr %08x  spsr %08x\n",
-                       mode == IRQ_MODE ? '*' : ' ',
-                       mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
-       output->printf(output,
-                       "%cfiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  r12 %08x\n",
-                       mode == FIQ_MODE ? '*' : ' ',
-                       mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
-                       mode_regs.r11_fiq, mode_regs.r12_fiq);
-       output->printf(output,
-                       " fiq: sp %08x  lr %08x  spsr %08x\n",
-                       mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
-}
-
-struct stacktrace_state {
-       struct fiq_debugger_output *output;
-       unsigned int depth;
-};
-
-static int report_trace(struct stackframe *frame, void *d)
-{
-       struct stacktrace_state *sts = d;
-
-       if (sts->depth) {
-               sts->output->printf(sts->output,
-                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
-                       frame->pc, frame->pc, frame->lr, frame->lr,
-                       frame->sp, frame->fp);
-               sts->depth--;
-               return 0;
-       }
-       sts->output->printf(sts->output, "  ...\n");
-
-       return sts->depth == 0;
-}
-
-struct frame_tail {
-       struct frame_tail *fp;
-       unsigned long sp;
-       unsigned long lr;
-} __attribute__((packed));
-
-static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
-                                       struct frame_tail *tail)
-{
-       struct frame_tail buftail[2];
-
-       /* Also check accessibility of one struct frame_tail beyond */
-       if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
-               output->printf(output, "  invalid frame pointer %p\n",
-                               tail);
-               return NULL;
-       }
-       if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
-               output->printf(output,
-                       "  failed to copy frame pointer %p\n", tail);
-               return NULL;
-       }
-
-       output->printf(output, "  %p\n", buftail[0].lr);
-
-       /* frame pointers should strictly progress back up the stack
-        * (towards higher addresses) */
-       if (tail >= buftail[0].fp)
-               return NULL;
-
-       return buftail[0].fp-1;
-}
-
-void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
-               const struct pt_regs *regs, unsigned int depth, void *ssp)
-{
-       struct frame_tail *tail;
-       struct thread_info *real_thread_info = THREAD_INFO(ssp);
-       struct stacktrace_state sts;
-
-       sts.depth = depth;
-       sts.output = output;
-       *current_thread_info() = *real_thread_info;
-
-       if (!current)
-               output->printf(output, "current NULL\n");
-       else
-               output->printf(output, "pid: %d  comm: %s\n",
-                       current->pid, current->comm);
-       fiq_debugger_dump_regs(output, regs);
-
-       if (!user_mode(regs)) {
-               struct stackframe frame;
-               frame.fp = regs->ARM_fp;
-               frame.sp = regs->ARM_sp;
-               frame.lr = regs->ARM_lr;
-               frame.pc = regs->ARM_pc;
-               output->printf(output,
-                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
-                       regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
-                       regs->ARM_sp, regs->ARM_fp);
-               walk_stackframe(&frame, report_trace, &sts);
-               return;
-       }
-
-       tail = ((struct frame_tail *) regs->ARM_fp) - 1;
-       while (depth-- && tail && !((unsigned long) tail & 3))
-               tail = user_backtrace(output, tail);
-}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
deleted file mode 100644 (file)
index 97246bc..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2014 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/ptrace.h>
-#include <asm/stacktrace.h>
-
-#include "fiq_debugger_priv.h"
-
-static char *mode_name(const struct pt_regs *regs)
-{
-       if (compat_user_mode(regs)) {
-               return "USR";
-       } else {
-               switch (processor_mode(regs)) {
-               case PSR_MODE_EL0t: return "EL0t";
-               case PSR_MODE_EL1t: return "EL1t";
-               case PSR_MODE_EL1h: return "EL1h";
-               case PSR_MODE_EL2t: return "EL2t";
-               case PSR_MODE_EL2h: return "EL2h";
-               default: return "???";
-               }
-       }
-}
-
-void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
-               regs->pc, regs->pstate, mode_name(regs));
-}
-
-void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       output->printf(output, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
-                       regs->compat_usr(0), regs->compat_usr(1),
-                       regs->compat_usr(2), regs->compat_usr(3));
-       output->printf(output, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
-                       regs->compat_usr(4), regs->compat_usr(5),
-                       regs->compat_usr(6), regs->compat_usr(7));
-       output->printf(output, " r8 %08x  r9 %08x r10 %08x r11 %08x\n",
-                       regs->compat_usr(8), regs->compat_usr(9),
-                       regs->compat_usr(10), regs->compat_usr(11));
-       output->printf(output, " ip %08x  sp %08x  lr %08x  pc %08x\n",
-                       regs->compat_usr(12), regs->compat_sp,
-                       regs->compat_lr, regs->pc);
-       output->printf(output, " cpsr %08x (%s)\n",
-                       regs->pstate, mode_name(regs));
-}
-
-void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-
-       output->printf(output, "  x0 %016lx   x1 %016lx\n",
-                       regs->regs[0], regs->regs[1]);
-       output->printf(output, "  x2 %016lx   x3 %016lx\n",
-                       regs->regs[2], regs->regs[3]);
-       output->printf(output, "  x4 %016lx   x5 %016lx\n",
-                       regs->regs[4], regs->regs[5]);
-       output->printf(output, "  x6 %016lx   x7 %016lx\n",
-                       regs->regs[6], regs->regs[7]);
-       output->printf(output, "  x8 %016lx   x9 %016lx\n",
-                       regs->regs[8], regs->regs[9]);
-       output->printf(output, " x10 %016lx  x11 %016lx\n",
-                       regs->regs[10], regs->regs[11]);
-       output->printf(output, " x12 %016lx  x13 %016lx\n",
-                       regs->regs[12], regs->regs[13]);
-       output->printf(output, " x14 %016lx  x15 %016lx\n",
-                       regs->regs[14], regs->regs[15]);
-       output->printf(output, " x16 %016lx  x17 %016lx\n",
-                       regs->regs[16], regs->regs[17]);
-       output->printf(output, " x18 %016lx  x19 %016lx\n",
-                       regs->regs[18], regs->regs[19]);
-       output->printf(output, " x20 %016lx  x21 %016lx\n",
-                       regs->regs[20], regs->regs[21]);
-       output->printf(output, " x22 %016lx  x23 %016lx\n",
-                       regs->regs[22], regs->regs[23]);
-       output->printf(output, " x24 %016lx  x25 %016lx\n",
-                       regs->regs[24], regs->regs[25]);
-       output->printf(output, " x26 %016lx  x27 %016lx\n",
-                       regs->regs[26], regs->regs[27]);
-       output->printf(output, " x28 %016lx  x29 %016lx\n",
-                       regs->regs[28], regs->regs[29]);
-       output->printf(output, " x30 %016lx   sp %016lx\n",
-                       regs->regs[30], regs->sp);
-       output->printf(output, "  pc %016lx cpsr %08x (%s)\n",
-                       regs->pc, regs->pstate, mode_name(regs));
-}
-
-void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       if (compat_user_mode(regs))
-               fiq_debugger_dump_regs_aarch32(output, regs);
-       else
-               fiq_debugger_dump_regs_aarch64(output, regs);
-}
-
-#define READ_SPECIAL_REG(x) ({ \
-       u64 val; \
-       asm volatile ("mrs %0, " # x : "=r"(val)); \
-       val; \
-})
-
-void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
-               const struct pt_regs *regs)
-{
-       u32 pstate = READ_SPECIAL_REG(CurrentEl);
-       bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
-
-       fiq_debugger_dump_regs(output, regs);
-
-       output->printf(output, " sp_el0   %016lx\n",
-                       READ_SPECIAL_REG(sp_el0));
-
-       if (in_el2)
-               output->printf(output, " sp_el1   %016lx\n",
-                               READ_SPECIAL_REG(sp_el1));
-
-       output->printf(output, " elr_el1  %016lx\n",
-                       READ_SPECIAL_REG(elr_el1));
-
-       output->printf(output, " spsr_el1 %08lx\n",
-                       READ_SPECIAL_REG(spsr_el1));
-
-       if (in_el2) {
-               output->printf(output, " spsr_irq %08lx\n",
-                               READ_SPECIAL_REG(spsr_irq));
-               output->printf(output, " spsr_abt %08lx\n",
-                               READ_SPECIAL_REG(spsr_abt));
-               output->printf(output, " spsr_und %08lx\n",
-                               READ_SPECIAL_REG(spsr_und));
-               output->printf(output, " spsr_fiq %08lx\n",
-                               READ_SPECIAL_REG(spsr_fiq));
-               output->printf(output, " spsr_el2 %08lx\n",
-                               READ_SPECIAL_REG(elr_el2));
-               output->printf(output, " spsr_el2 %08lx\n",
-                               READ_SPECIAL_REG(spsr_el2));
-       }
-}
-
-struct stacktrace_state {
-       struct fiq_debugger_output *output;
-       unsigned int depth;
-};
-
-static int report_trace(struct stackframe *frame, void *d)
-{
-       struct stacktrace_state *sts = d;
-
-       if (sts->depth) {
-               sts->output->printf(sts->output, "%pF:\n", frame->pc);
-               sts->output->printf(sts->output,
-                               "  pc %016lx   sp %016lx   fp %016lx\n",
-                               frame->pc, frame->sp, frame->fp);
-               sts->depth--;
-               return 0;
-       }
-       sts->output->printf(sts->output, "  ...\n");
-
-       return sts->depth == 0;
-}
-
-void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
-               const struct pt_regs *regs, unsigned int depth, void *ssp)
-{
-       struct thread_info *real_thread_info = THREAD_INFO(ssp);
-       struct stacktrace_state sts;
-
-       sts.depth = depth;
-       sts.output = output;
-       *current_thread_info() = *real_thread_info;
-
-       if (!current)
-               output->printf(output, "current NULL\n");
-       else
-               output->printf(output, "pid: %d  comm: %s\n",
-                       current->pid, current->comm);
-       fiq_debugger_dump_regs(output, regs);
-
-       if (!user_mode(regs)) {
-               struct stackframe frame;
-               frame.fp = regs->regs[29];
-               frame.sp = regs->sp;
-               frame.pc = regs->pc;
-               output->printf(output, "\n");
-               walk_stackframe(current, &frame, report_trace, &sts);
-       }
-}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
deleted file mode 100644 (file)
index d5d051f..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _FIQ_DEBUGGER_PRIV_H_
-#define _FIQ_DEBUGGER_PRIV_H_
-
-#define THREAD_INFO(sp) ((struct thread_info *) \
-               ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
-
-struct fiq_debugger_output {
-       void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
-};
-
-struct pt_regs;
-
-void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
-               const struct pt_regs *regs);
-void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
-               const struct pt_regs *regs);
-void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
-               const struct pt_regs *regs);
-void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
-               const struct pt_regs *regs, unsigned int depth, void *ssp);
-
-#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
deleted file mode 100644 (file)
index 10c3c5d..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
- *
- * simple lockless ringbuffer
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-struct fiq_debugger_ringbuf {
-       int len;
-       int head;
-       int tail;
-       u8 buf[];
-};
-
-
-static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
-{
-       struct fiq_debugger_ringbuf *rbuf;
-
-       rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
-       if (rbuf == NULL)
-               return NULL;
-
-       rbuf->len = len;
-       rbuf->head = 0;
-       rbuf->tail = 0;
-       smp_mb();
-
-       return rbuf;
-}
-
-static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
-{
-       kfree(rbuf);
-}
-
-static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
-{
-       int level = rbuf->head - rbuf->tail;
-
-       if (level < 0)
-               level = rbuf->len + level;
-
-       return level;
-}
-
-static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
-{
-       return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
-}
-
-static inline u8
-fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
-{
-       return rbuf->buf[(rbuf->tail + i) % rbuf->len];
-}
-
-static inline int
-fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
-{
-       count = min(count, fiq_debugger_ringbuf_level(rbuf));
-
-       rbuf->tail = (rbuf->tail + count) % rbuf->len;
-       smp_mb();
-
-       return count;
-}
-
-static inline int
-fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
-{
-       if (fiq_debugger_ringbuf_room(rbuf) == 0)
-               return 0;
-
-       rbuf->buf[rbuf->head] = datum;
-       smp_mb();
-       rbuf->head = (rbuf->head + 1) % rbuf->len;
-       smp_mb();
-
-       return 1;
-}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
deleted file mode 100644 (file)
index 194b541..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2014 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/pstore_ram.h>
-
-#include "fiq_watchdog.h"
-#include "fiq_debugger_priv.h"
-
-static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
-
-static void fiq_watchdog_printf(struct fiq_debugger_output *output,
-                               const char *fmt, ...)
-{
-       char buf[256];
-       va_list ap;
-       int len;
-
-       va_start(ap, fmt);
-       len = vscnprintf(buf, sizeof(buf), fmt, ap);
-       va_end(ap);
-
-       ramoops_console_write_buf(buf, len);
-}
-
-struct fiq_debugger_output fiq_watchdog_output = {
-       .printf = fiq_watchdog_printf,
-};
-
-void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
-{
-       char msg[24];
-       int len;
-
-       raw_spin_lock(&fiq_watchdog_lock);
-
-       len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
-                       THREAD_INFO(svc_sp)->cpu);
-       ramoops_console_write_buf(msg, len);
-
-       fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
-
-       raw_spin_unlock(&fiq_watchdog_lock);
-}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
deleted file mode 100644 (file)
index c6b507f..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2014 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _FIQ_WATCHDOG_H_
-#define _FIQ_WATCHDOG_H_
-
-void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
-
-#endif
index ab7a332..6404cd4 100644 (file)
@@ -351,9 +351,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
                            unsigned int flags)
 {
-       int divider, base, prescale;
+       unsigned int divider, base, prescale;
 
-       /* This function needs improvment */
+       /* This function needs improvement */
        /* Don't know if divider==0 works. */
 
        for (prescale = 0; prescale < 16; prescale++) {
@@ -367,7 +367,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
                        divider = (*nanosec) / base;
                        break;
                case CMDF_ROUND_UP:
-                       divider = (*nanosec) / base;
+                       divider = DIV_ROUND_UP(*nanosec, base);
                        break;
                }
                if (divider < 65536) {
@@ -377,7 +377,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
        }
 
        prescale = 15;
-       base = timer_base * (1 << prescale);
+       base = timer_base * (prescale + 1);
        divider = 65535;
        *nanosec = divider * base;
        return (prescale << 16) | (divider);
index 0fdff91..43474f5 100644 (file)
@@ -406,6 +406,9 @@ static int vpfe_open(struct file *file)
        /* If decoder is not initialized. initialize it */
        if (!video->initialized && vpfe_update_pipe_state(video)) {
                mutex_unlock(&video->lock);
+               v4l2_fh_del(&handle->vfh);
+               v4l2_fh_exit(&handle->vfh);
+               kfree(handle);
                return -ENODEV;
        }
        /* Increment device users counter */
index fc46c8c..3bd19de 100644 (file)
@@ -1275,7 +1275,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
 
                        atmel_port->hd_start_rx = false;
                        atmel_start_rx(port);
-                       return;
                }
 
                tasklet_schedule(&atmel_port->tasklet);
index 0040c29..b9e137c 100644 (file)
@@ -421,7 +421,16 @@ static int cpm_uart_startup(struct uart_port *port)
                        clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
                }
                cpm_uart_initbd(pinfo);
-               cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+               if (IS_SMC(pinfo)) {
+                       out_be32(&pinfo->smcup->smc_rstate, 0);
+                       out_be32(&pinfo->smcup->smc_tstate, 0);
+                       out_be16(&pinfo->smcup->smc_rbptr,
+                                in_be16(&pinfo->smcup->smc_rbase));
+                       out_be16(&pinfo->smcup->smc_tbptr,
+                                in_be16(&pinfo->smcup->smc_tbase));
+               } else {
+                       cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+               }
        }
        /* Install interrupt handler. */
        retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
@@ -875,16 +884,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
                 (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
 
 /*
- *  In case SMC1 is being relocated...
+ *  In case SMC is being relocated...
  */
-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
        out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
        out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
        out_be32(&up->smc_rstate, 0);
        out_be32(&up->smc_tstate, 0);
        out_be16(&up->smc_brkcr, 1);              /* number of break chars */
        out_be16(&up->smc_brkec, 0);
-#endif
 
        /* Set up the uart parameters in the
         * parameter ram.
@@ -898,8 +905,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
        out_be16(&up->smc_brkec, 0);
        out_be16(&up->smc_brkcr, 1);
 
-       cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
-
        /* Set UART mode, 8 bit, no parity, one stop.
         * Enable receive and transmit.
         */
index a80cdad..d8cb949 100644 (file)
@@ -544,7 +544,11 @@ static int __init digicolor_uart_init(void)
        if (ret)
                return ret;
 
-       return platform_driver_register(&digicolor_uart_platform);
+       ret = platform_driver_register(&digicolor_uart_platform);
+       if (ret)
+               uart_unregister_driver(&digicolor_uart);
+
+       return ret;
 }
 module_init(digicolor_uart_init);
 
index 0ac0c61..a66fb7a 100644 (file)
@@ -486,37 +486,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
 
 static int max310x_set_baud(struct uart_port *port, int baud)
 {
-       unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
+       unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
 
-       /* Check for minimal value for divider */
-       if (div < 16)
-               div = 16;
-
-       if (clk % baud && (div / 16) < 0x8000) {
+       /*
+        * Calculate the integer divisor first. Select a proper mode
+        * in case if the requested baud is too high for the pre-defined
+        * clocks frequency.
+        */
+       div = port->uartclk / baud;
+       if (div < 8) {
+               /* Mode x4 */
+               c = 4;
+               mode = MAX310X_BRGCFG_4XMODE_BIT;
+       } else if (div < 16) {
                /* Mode x2 */
+               c = 8;
                mode = MAX310X_BRGCFG_2XMODE_BIT;
-               clk = port->uartclk * 2;
-               div = clk / baud;
-
-               if (clk % baud && (div / 16) < 0x8000) {
-                       /* Mode x4 */
-                       mode = MAX310X_BRGCFG_4XMODE_BIT;
-                       clk = port->uartclk * 4;
-                       div = clk / baud;
-               }
+       } else {
+               c = 16;
        }
 
-       max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
-       max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
-       max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
+       /* Calculate the divisor in accordance with the fraction coefficient */
+       div /= c;
+       F = c*baud;
+
+       /* Calculate the baud rate fraction */
+       if (div > 0)
+               frac = (16*(port->uartclk % F)) / F;
+       else
+               div = 1;
+
+       max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
+       max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
+       max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
 
-       return DIV_ROUND_CLOSEST(clk, div);
+       /* Return the actual baud rate we just programmed */
+       return (16*port->uartclk) / (c*(16*div + frac));
 }
 
 static int max310x_update_best_err(unsigned long f, long *besterr)
 {
        /* Use baudrate 115200 for calculate error */
-       long err = f % (115200 * 16);
+       long err = f % (460800 * 16);
 
        if ((*besterr < 0) || (*besterr > err)) {
                *besterr = err;
index eb66f7a..924c50d 100644 (file)
@@ -395,10 +395,14 @@ no_rx:
 
 static inline void msm_wait_for_xmitr(struct uart_port *port)
 {
+       unsigned int timeout = 500000;
+
        while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
                if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
                        break;
                udelay(1);
+               if (!timeout--)
+                       break;
        }
        msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
 }
index 669134e..c450e32 100644 (file)
@@ -1203,6 +1203,7 @@ static void work_fn_tx(struct work_struct *work)
        struct uart_port *port = &s->port;
        struct circ_buf *xmit = &port->state->xmit;
        dma_addr_t buf;
+       int head, tail;
 
        /*
         * DMA is idle now.
@@ -1212,16 +1213,23 @@ static void work_fn_tx(struct work_struct *work)
         * consistent xmit buffer state.
         */
        spin_lock_irq(&port->lock);
-       buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
+       head = xmit->head;
+       tail = xmit->tail;
+       buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
        s->tx_dma_len = min_t(unsigned int,
-               CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
-               CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
-       spin_unlock_irq(&port->lock);
+               CIRC_CNT(head, tail, UART_XMIT_SIZE),
+               CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+       if (!s->tx_dma_len) {
+               /* Transmit buffer has been flushed */
+               spin_unlock_irq(&port->lock);
+               return;
+       }
 
        desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
                                           DMA_MEM_TO_DEV,
                                           DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
+               spin_unlock_irq(&port->lock);
                dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
                /* switch to PIO */
                sci_tx_dma_release(s, true);
@@ -1231,20 +1239,20 @@ static void work_fn_tx(struct work_struct *work)
        dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
                                   DMA_TO_DEVICE);
 
-       spin_lock_irq(&port->lock);
        desc->callback = sci_dma_tx_complete;
        desc->callback_param = s;
-       spin_unlock_irq(&port->lock);
        s->cookie_tx = dmaengine_submit(desc);
        if (dma_submit_error(s->cookie_tx)) {
+               spin_unlock_irq(&port->lock);
                dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
                /* switch to PIO */
                sci_tx_dma_release(s, true);
                return;
        }
 
+       spin_unlock_irq(&port->lock);
        dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
-               __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+               __func__, xmit->buf, tail, head, s->cookie_tx);
 
        dma_async_issue_pending(chan);
 }
index ba9374e..f8f9927 100644 (file)
@@ -240,7 +240,7 @@ static inline void sprd_rx(struct uart_port *port)
 
                if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
                        SPRD_LSR_FE | SPRD_LSR_OE))
-                       if (handle_lsr_errors(port, &lsr, &flag))
+                       if (handle_lsr_errors(port, &flag, &lsr))
                                continue;
                if (uart_handle_sysrq_char(port, ch))
                        continue;
index 34234c2..656c2ad 100644 (file)
@@ -137,8 +137,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
 
        list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
                tsk = waiter->task;
-               smp_mb();
-               waiter->task = NULL;
+               smp_store_release(&waiter->task, NULL);
                wake_up_process(tsk);
                put_task_struct(tsk);
        }
@@ -234,7 +233,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
        for (;;) {
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
-               if (!waiter.task)
+               if (!smp_load_acquire(&waiter.task))
                        break;
                if (!timeout)
                        break;
index 736de10..1930a8e 100644 (file)
@@ -1319,13 +1319,6 @@ made_compressed_probe:
        if (acm == NULL)
                goto alloc_fail;
 
-       minor = acm_alloc_minor(acm);
-       if (minor < 0) {
-               dev_err(&intf->dev, "no more free acm devices\n");
-               kfree(acm);
-               return -ENODEV;
-       }
-
        ctrlsize = usb_endpoint_maxp(epctrl);
        readsize = usb_endpoint_maxp(epread) *
                                (quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1333,6 +1326,16 @@ made_compressed_probe:
        acm->writesize = usb_endpoint_maxp(epwrite) * 20;
        acm->control = control_interface;
        acm->data = data_interface;
+
+       usb_get_intf(acm->control); /* undone in destruct() */
+
+       minor = acm_alloc_minor(acm);
+       if (minor < 0) {
+               dev_err(&intf->dev, "no more free acm devices\n");
+               kfree(acm);
+               return -ENODEV;
+       }
+
        acm->minor = minor;
        acm->dev = usb_dev;
        acm->ctrl_caps = ac_management_function;
@@ -1474,7 +1477,6 @@ skip_countries:
        usb_driver_claim_interface(&acm_driver, data_interface, acm);
        usb_set_intfdata(data_interface, acm);
 
-       usb_get_intf(control_interface);
        tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
                        &control_interface->dev);
        if (IS_ERR(tty_dev)) {
index 61ea879..71ad04d 100644 (file)
@@ -577,10 +577,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
 {
        struct wdm_device *desc = file->private_data;
 
-       wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+       wait_event(desc->wait,
+                       /*
+                        * needs both flags. We cannot do with one
+                        * because resetting it would cause a race
+                        * with write() yet we need to signal
+                        * a disconnect
+                        */
+                       !test_bit(WDM_IN_USE, &desc->flags) ||
+                       test_bit(WDM_DISCONNECTING, &desc->flags));
 
        /* cannot dereference desc->intf if WDM_DISCONNECTING */
-       if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+       if (test_bit(WDM_DISCONNECTING, &desc->flags))
+               return -ENODEV;
+       if (desc->werr < 0)
                dev_err(&desc->intf->dev, "Error in flush path: %d\n",
                        desc->werr);
 
@@ -968,8 +978,6 @@ static void wdm_disconnect(struct usb_interface *intf)
        spin_lock_irqsave(&desc->iuspin, flags);
        set_bit(WDM_DISCONNECTING, &desc->flags);
        set_bit(WDM_READ, &desc->flags);
-       /* to terminate pending flushes */
-       clear_bit(WDM_IN_USE, &desc->flags);
        spin_unlock_irqrestore(&desc->iuspin, flags);
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
index 421f1d3..126987c 100644 (file)
@@ -894,7 +894,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
        struct usb_bos_descriptor *bos;
        struct usb_dev_cap_header *cap;
        struct usb_ssp_cap_descriptor *ssp_cap;
-       unsigned char *buffer;
+       unsigned char *buffer, *buffer0;
        int length, total_len, num, i, ssac;
        __u8 cap_type;
        int ret;
@@ -939,10 +939,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        ret = -ENOMSG;
                goto err;
        }
+
+       buffer0 = buffer;
        total_len -= length;
+       buffer += length;
 
        for (i = 0; i < num; i++) {
-               buffer += length;
                cap = (struct usb_dev_cap_header *)buffer;
 
                if (total_len < sizeof(*cap) || total_len < cap->bLength) {
@@ -956,8 +958,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        break;
                }
 
-               total_len -= length;
-
                if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
                        dev_warn(ddev, "descriptor type invalid, skip\n");
                        continue;
@@ -1001,7 +1001,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                default:
                        break;
                }
+
+               total_len -= length;
+               buffer += length;
        }
+       dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
 
        return 0;
 
index b3de806..097977c 100644 (file)
@@ -191,9 +191,10 @@ int usb_register_dev(struct usb_interface *intf,
                intf->minor = minor;
                break;
        }
-       up_write(&minor_rwsem);
-       if (intf->minor < 0)
+       if (intf->minor < 0) {
+               up_write(&minor_rwsem);
                return -EXFULL;
+       }
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -206,12 +207,11 @@ int usb_register_dev(struct usb_interface *intf,
                                      MKDEV(USB_MAJOR, minor), class_driver,
                                      "%s", temp);
        if (IS_ERR(intf->usb_dev)) {
-               down_write(&minor_rwsem);
                usb_minors[minor] = NULL;
                intf->minor = -1;
-               up_write(&minor_rwsem);
                retval = PTR_ERR(intf->usb_dev);
        }
+       up_write(&minor_rwsem);
        return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -237,12 +237,12 @@ void usb_deregister_dev(struct usb_interface *intf,
                return;
 
        dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+       device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 
        down_write(&minor_rwsem);
        usb_minors[intf->minor] = NULL;
        up_write(&minor_rwsem);
 
-       device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
        intf->usb_dev = NULL;
        intf->minor = -1;
        destroy_usb_class();
index 9d46cb7..63f7364 100644 (file)
@@ -3492,6 +3492,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
        struct usb_device *hdev;
        struct usb_device *udev;
        int connect_change = 0;
+       u16 link_state;
        int ret;
 
        hdev = hub->hdev;
@@ -3501,9 +3502,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
                        return 0;
                usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
        } else {
+               link_state = portstatus & USB_PORT_STAT_LINK_STATE;
                if (!udev || udev->state != USB_STATE_SUSPENDED ||
-                                (portstatus & USB_PORT_STAT_LINK_STATE) !=
-                                USB_SS_PORT_LS_U0)
+                               (link_state != USB_SS_PORT_LS_U0 &&
+                                link_state != USB_SS_PORT_LS_U1 &&
+                                link_state != USB_SS_PORT_LS_U2))
                        return 0;
        }
 
@@ -3833,6 +3836,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
  * control transfers to set the hub timeout or enable device-initiated U1/U2
  * will be successful.
  *
+ * If the control transfer to enable device-initiated U1/U2 entry fails, then
+ * hub-initiated U1/U2 will be disabled.
+ *
  * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
  * driver know about it.  If that call fails, it should be harmless, and just
  * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
@@ -3887,23 +3893,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
                 * host know that this link state won't be enabled.
                 */
                hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
-       } else {
-               /* Only a configured device will accept the Set Feature
-                * U1/U2_ENABLE
-                */
-               if (udev->actconfig)
-                       usb_set_device_initiated_lpm(udev, state, true);
+               return;
+       }
 
-               /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
-                * hub-initiated LPM is enabled. Thus, LPM is enabled no
-                * matter the result of usb_set_device_initiated_lpm().
-                * The only difference is whether device is able to initiate
-                * LPM.
-                */
+       /* Only a configured device will accept the Set Feature
+        * U1/U2_ENABLE
+        */
+       if (udev->actconfig &&
+           usb_set_device_initiated_lpm(udev, state, true) == 0) {
                if (state == USB3_LPM_U1)
                        udev->usb3_lpm_u1_enabled = 1;
                else if (state == USB3_LPM_U2)
                        udev->usb3_lpm_u2_enabled = 1;
+       } else {
+               /* Don't request U1/U2 entry if the device
+                * cannot transition to U1/U2.
+                */
+               usb_set_lpm_timeout(udev, state, 0);
+               hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
        }
 }
 
index 0650f0b..b6b25c7 100644 (file)
@@ -276,6 +276,8 @@ struct dwc3_msm {
        struct mutex suspend_resume_mutex;
 
        enum usb_device_speed override_usb_speed;
+
+       bool core_init_failed;
 };
 
 #define USB_HSPHY_3P3_VOL_MIN          3050000 /* uV */
@@ -1953,12 +1955,20 @@ static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
                ret = dwc3_core_pre_init(dwc);
                if (ret) {
                        dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
+                       mdwc->core_init_failed = true;
                        return;
                }
                mdwc->init = true;
        }
 
-       dwc3_core_init(dwc);
+       ret = dwc3_core_init(dwc);
+       if (ret) {
+               dev_err(mdwc->dev, "dwc3_core_init failed\n");
+               mdwc->core_init_failed = true;
+               return;
+       }
+
+       mdwc->core_init_failed = false;
        /* Re-configure event buffers */
        dwc3_event_buffers_setup(dwc);
 
@@ -2229,7 +2239,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
 
        /* kick_sm if it is waiting for lpm sequence to finish */
        if (test_and_clear_bit(WAIT_FOR_LPM, &mdwc->inputs))
-               schedule_delayed_work(&mdwc->sm_work, 0);
+               queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0);
 
        mutex_unlock(&mdwc->suspend_resume_mutex);
 
@@ -2325,6 +2335,12 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
        /* Resume HS PHY */
        usb_phy_set_suspend(mdwc->hs_phy, 0);
 
+       /* Disable HSPHY auto suspend */
+       dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+               dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
+                               ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
+                                       DWC3_GUSB2PHYCFG_SUSPHY));
+
        /* Recover from controller power collapse */
        if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
                dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
@@ -2339,12 +2355,6 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
        /* enable power evt irq for IN P3 detection */
        enable_irq(mdwc->pwr_event_irq);
 
-       /* Disable HSPHY auto suspend */
-       dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
-               dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
-                               ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
-                                       DWC3_GUSB2PHYCFG_SUSPHY));
-
        /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
        if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
                if (!mdwc->no_wakeup_src_in_hostmode)
@@ -3602,6 +3612,12 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
                dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
 
                pm_runtime_get_sync(mdwc->dev);
+               if (mdwc->core_init_failed) {
+                       dev_err(mdwc->dev, "%s: Core init failed\n", __func__);
+                       pm_runtime_put_sync_suspend(mdwc->dev);
+                       return -EAGAIN;
+               }
+
                mdwc->hs_phy->flags |= PHY_HOST_MODE;
                if (dwc->maximum_speed == USB_SPEED_SUPER) {
                        mdwc->ss_phy->flags |= PHY_HOST_MODE;
@@ -3748,8 +3764,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
                /* wait for LPM, to ensure h/w is reset after stop_host */
                set_bit(WAIT_FOR_LPM, &mdwc->inputs);
 
-               pm_runtime_mark_last_busy(mdwc->dev);
-               pm_runtime_put_sync_autosuspend(mdwc->dev);
+               pm_runtime_put_sync_suspend(mdwc->dev);
                dbg_event(0xFF, "StopHost psync",
                        atomic_read(&mdwc->dev->power.usage_count));
        }
@@ -4076,6 +4091,10 @@ static void dwc3_otg_sm_work(struct work_struct *w)
                                delay = VBUS_REG_CHECK_DELAY;
                                work = 1;
                                mdwc->vbus_retry_count++;
+                       } else if (ret == -EAGAIN) {
+                               mdwc->drd_state = DRD_STATE_HOST_IDLE;
+                               dev_dbg(mdwc->dev, "Core init failed. Retrying...\n");
+                               work = 1;
                        } else if (ret) {
                                dev_err(mdwc->dev, "unable to start host\n");
                                mdwc->drd_state = DRD_STATE_HOST_IDLE;
index c2dac01..f1648e8 100644 (file)
@@ -2082,6 +2082,7 @@ void composite_disconnect(struct usb_gadget *gadget)
         * disconnect callbacks?
         */
        spin_lock_irqsave(&cdev->lock, flags);
+       cdev->suspended = 0;
        if (cdev->config)
                reset_config(cdev);
        if (cdev->driver->disconnect)
index fd2150a..d2a119b 100644 (file)
@@ -1167,11 +1167,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
        ffs_log("enter");
 
        if (!is_sync_kiocb(kiocb)) {
-               p = kmalloc(sizeof(io_data), GFP_KERNEL);
+               p = kzalloc(sizeof(io_data), GFP_KERNEL);
                if (unlikely(!p))
                        return -ENOMEM;
                p->aio = true;
        } else {
+               memset(p, 0, sizeof(*p));
                p->aio = false;
        }
 
@@ -1208,11 +1209,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
        ffs_log("enter");
 
        if (!is_sync_kiocb(kiocb)) {
-               p = kmalloc(sizeof(io_data), GFP_KERNEL);
+               p = kzalloc(sizeof(io_data), GFP_KERNEL);
                if (unlikely(!p))
                        return -ENOMEM;
                p->aio = true;
        } else {
+               memset(p, 0, sizeof(*p));
                p->aio = false;
        }
 
index 1b142f5..b942f38 100644 (file)
@@ -373,9 +373,11 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
                req->complete = f_midi_complete;
                err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
                if (err) {
-                       ERROR(midi, "%s queue req: %d\n",
+                       ERROR(midi, "%s: couldn't enqueue request: %d\n",
                                    midi->out_ep->name, err);
-                       free_ep_req(midi->out_ep, req);
+                       if (req->buf != NULL)
+                               free_ep_req(midi->out_ep, req);
+                       return err;
                }
        }
 
index 7d53a47..2f03334 100644 (file)
@@ -64,7 +64,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
 /* Frees a usb_request previously allocated by alloc_ep_req() */
 static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
 {
+       WARN_ON(req->buf == NULL);
        kfree(req->buf);
+       req->buf = NULL;
        usb_ep_free_request(ep, req);
 }
 
index 2341af4..11b3a8c 100644 (file)
@@ -1653,6 +1653,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        /* see what we found out */
                        temp = check_reset_complete(fotg210, wIndex, status_reg,
                                        fotg210_readl(fotg210, status_reg));
+
+                       /* restart schedule */
+                       fotg210->command |= CMD_RUN;
+                       fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
                }
 
                if (!(temp & (PORT_RESUME|PORT_RESET))) {
index 97750f1..c14e4a6 100644 (file)
@@ -173,7 +173,7 @@ out:
        return result;
 
 error_set_cluster_id:
-       wusb_cluster_id_put(wusbhc->cluster_id);
+       wusb_cluster_id_put(addr);
 error_cluster_id_get:
        goto out;
 
index be8c618..228960d 100644 (file)
@@ -415,8 +415,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
  * other cases where the next software may expect clean state from the
  * "firmware".  this is bus-neutral, unlike shutdown() methods.
  */
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
 {
        struct ohci_hcd *ohci;
 
@@ -432,6 +431,16 @@ ohci_shutdown (struct usb_hcd *hcd)
        ohci->rh_state = OHCI_RH_HALTED;
 }
 
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+       struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ohci->lock, flags);
+       _ohci_shutdown(hcd);
+       spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
 /*-------------------------------------------------------------------------*
  * HC functions
  *-------------------------------------------------------------------------*/
@@ -750,7 +759,7 @@ static void io_watchdog_func(unsigned long _ohci)
  died:
                        usb_hc_died(ohci_to_hcd(ohci));
                        ohci_dump(ohci);
-                       ohci_shutdown(ohci_to_hcd(ohci));
+                       _ohci_shutdown(ohci_to_hcd(ohci));
                        goto done;
                } else {
                        /* No write back because the done queue was empty */
index 89e9494..3ea435c 100644 (file)
@@ -178,7 +178,7 @@ int usb_amd_find_chipset_info(void)
 {
        unsigned long flags;
        struct amd_chipset_info info;
-       int ret;
+       int need_pll_quirk = 0;
 
        spin_lock_irqsave(&amd_lock, flags);
 
@@ -192,21 +192,28 @@ int usb_amd_find_chipset_info(void)
        spin_unlock_irqrestore(&amd_lock, flags);
 
        if (!amd_chipset_sb_type_init(&info)) {
-               ret = 0;
                goto commit;
        }
 
-       /* Below chipset generations needn't enable AMD PLL quirk */
-       if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
-                       info.sb_type.gen == AMD_CHIPSET_SB600 ||
-                       info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
-                       (info.sb_type.gen == AMD_CHIPSET_SB700 &&
-                       info.sb_type.rev > 0x3b)) {
+       switch (info.sb_type.gen) {
+       case AMD_CHIPSET_SB700:
+               need_pll_quirk = info.sb_type.rev <= 0x3B;
+               break;
+       case AMD_CHIPSET_SB800:
+       case AMD_CHIPSET_HUDSON2:
+       case AMD_CHIPSET_BOLTON:
+               need_pll_quirk = 1;
+               break;
+       default:
+               need_pll_quirk = 0;
+               break;
+       }
+
+       if (!need_pll_quirk) {
                if (info.smbus_dev) {
                        pci_dev_put(info.smbus_dev);
                        info.smbus_dev = NULL;
                }
-               ret = 0;
                goto commit;
        }
 
@@ -225,7 +232,7 @@ int usb_amd_find_chipset_info(void)
                }
        }
 
-       ret = info.probe_result = 1;
+       need_pll_quirk = info.probe_result = 1;
        printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 
 commit:
@@ -236,7 +243,7 @@ commit:
 
                /* Mark that we where here */
                amd_chipset.probe_count++;
-               ret = amd_chipset.probe_result;
+               need_pll_quirk = amd_chipset.probe_result;
 
                spin_unlock_irqrestore(&amd_lock, flags);
 
@@ -250,7 +257,7 @@ commit:
                spin_unlock_irqrestore(&amd_lock, flags);
        }
 
-       return ret;
+       return need_pll_quirk;
 }
 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 
index 5e43fd8..836fb65 100644 (file)
@@ -898,19 +898,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
        dev = usb_get_intfdata(interface);
        mutex_lock(&iowarrior_open_disc_lock);
        usb_set_intfdata(interface, NULL);
+       /* prevent device read, write and ioctl */
+       dev->present = 0;
 
        minor = dev->minor;
+       mutex_unlock(&iowarrior_open_disc_lock);
+       /* give back our minor - this will call close() locks need to be dropped at this point*/
 
-       /* give back our minor */
        usb_deregister_dev(interface, &iowarrior_class);
 
        mutex_lock(&dev->mutex);
 
        /* prevent device read, write and ioctl */
-       dev->present = 0;
 
        mutex_unlock(&dev->mutex);
-       mutex_unlock(&iowarrior_open_disc_lock);
 
        if (dev->opened) {
                /* There is a process that holds a filedescriptor to the device ,
index 56e6fba..3c8badc 100644 (file)
@@ -422,11 +422,35 @@ static unsigned int ksb_fs_poll(struct file *file, poll_table *wait)
 static int ksb_fs_release(struct inode *ip, struct file *fp)
 {
        struct ks_bridge        *ksb = fp->private_data;
+       struct data_pkt *pkt;
+       unsigned long flags;
 
        if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
                dev_dbg(ksb->device, ":%s", ksb->id_info.name);
        dbg_log_event(ksb, "FS-RELEASE", 0, 0);
 
+       usb_kill_anchored_urbs(&ksb->submitted);
+
+       wait_event_interruptible_timeout(
+                                       ksb->pending_urb_wait,
+                                       !atomic_read(&ksb->tx_pending_cnt) &&
+                                       !atomic_read(&ksb->rx_pending_cnt),
+                                       msecs_to_jiffies(PENDING_URB_TIMEOUT));
+
+       spin_lock_irqsave(&ksb->lock, flags);
+       while (!list_empty(&ksb->to_ks_list)) {
+               pkt = list_first_entry(&ksb->to_ks_list,
+                               struct data_pkt, list);
+               list_del_init(&pkt->list);
+               ksb_free_data_pkt(pkt);
+       }
+       while (!list_empty(&ksb->to_mdm_list)) {
+               pkt = list_first_entry(&ksb->to_mdm_list,
+                               struct data_pkt, list);
+               list_del_init(&pkt->list);
+               ksb_free_data_pkt(pkt);
+       }
+       spin_unlock_irqrestore(&ksb->lock, flags);
        clear_bit(FILE_OPENED, &ksb->flags);
        fp->private_data = NULL;
 
index a8b6d00..2222ec2 100644 (file)
@@ -96,7 +96,6 @@ static void yurex_delete(struct kref *kref)
 
        dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
-       usb_put_dev(dev->udev);
        if (dev->cntl_urb) {
                usb_kill_urb(dev->cntl_urb);
                kfree(dev->cntl_req);
@@ -112,6 +111,7 @@ static void yurex_delete(struct kref *kref)
                                dev->int_buffer, dev->urb->transfer_dma);
                usb_free_urb(dev->urb);
        }
+       usb_put_dev(dev->udev);
        kfree(dev);
 }
 
index ae72ec6..1b09c02 100644 (file)
@@ -862,7 +862,7 @@ static int qusb_phy_init(struct usb_phy *phy)
                wmb();
 
                /* Required to get PHY PLL lock successfully */
-               usleep_range(100, 110);
+               usleep_range(50000, 51000);
        }
 
        if (qphy->major_rev < 2) {
@@ -881,6 +881,7 @@ static int qusb_phy_init(struct usb_phy *phy)
        if (pll_lock_fail) {
                dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
                WARN_ON(1);
+               return -ETIMEDOUT;
        }
 
        return 0;
index d7b31fd..1bceb11 100644 (file)
@@ -967,6 +967,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
+       /* Motorola devices */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) },    /* mdm6600 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) },    /* mdm9600 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) },    /* mdm ram dl */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) },    /* mdm qc dl */
 
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1544,6 +1549,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
          .driver_info = RSVD(2) },
        { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },    /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1949,11 +1955,15 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
          .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff),                     /* D-Link DWM-222 A2 */
+         .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },    /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/A3 */
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),                     /* Olicard 600 */
          .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff),                     /* BroadMobi BM818 */
+         .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                   /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
index be432be..f0ca9fe 100644 (file)
@@ -50,7 +50,7 @@ MODULE_VERSION("1.03");
 
 static int auto_delink_en = 1;
 module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
 
 #ifdef CONFIG_REALTEK_AUTOPM
 static int ss_en = 1;
@@ -1006,12 +1006,15 @@ static int init_realtek_cr(struct us_data *us)
                        goto INIT_FAIL;
        }
 
-       if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
-           CHECK_FW_VER(chip, 0x5901))
-               SET_AUTO_DELINK(chip);
-       if (STATUS_LEN(chip) == 16) {
-               if (SUPPORT_AUTO_DELINK(chip))
+       if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+           CHECK_PID(chip, 0x0159)) {
+               if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+                               CHECK_FW_VER(chip, 0x5901))
                        SET_AUTO_DELINK(chip);
+               if (STATUS_LEN(chip) == 16) {
+                       if (SUPPORT_AUTO_DELINK(chip))
+                               SET_AUTO_DELINK(chip);
+               }
        }
 #ifdef CONFIG_REALTEK_AUTOPM
        if (ss_en)
index d92b974..a98259e 100644 (file)
@@ -2006,7 +2006,7 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                US_FL_IGNORE_RESIDUE ),
 
 /* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0117,
                "JMicron",
                "USB to ATA/ATAPI Bridge",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
index 53cf130..6b72be1 100644 (file)
@@ -30,7 +30,7 @@
 
 #include "vhost.h"
 
-static int experimental_zcopytx = 1;
+static int experimental_zcopytx = 0;
 module_param(experimental_zcopytx, int, 0444);
 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
                                       " 1 -Enable; 0 - Disable");
@@ -39,6 +39,12 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
  * Using this limit prevents one virtqueue from starving others. */
 #define VHOST_NET_WEIGHT 0x80000
 
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with small
+ * pkts.
+ */
+#define VHOST_NET_PKT_WEIGHT 256
+
 /* MAX number of TX used buffers for outstanding zerocopy */
 #define VHOST_MAX_PEND 128
 #define VHOST_GOODCOPY_LEN 256
@@ -372,6 +378,7 @@ static void handle_tx(struct vhost_net *net)
        struct socket *sock;
        struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
        bool zcopy, zcopy_used;
+       int sent_pkts = 0;
 
        mutex_lock(&vq->mutex);
        sock = vq->private_data;
@@ -386,7 +393,7 @@ static void handle_tx(struct vhost_net *net)
        hdr_size = nvq->vhost_hlen;
        zcopy = nvq->ubufs;
 
-       for (;;) {
+       do {
                /* Release DMAs done buffers first */
                if (zcopy)
                        vhost_zerocopy_signal_used(net, vq);
@@ -474,11 +481,7 @@ static void handle_tx(struct vhost_net *net)
                        vhost_zerocopy_signal_used(net, vq);
                total_len += len;
                vhost_net_tx_packet(net);
-               if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
-                       vhost_poll_queue(&vq->poll);
-                       break;
-               }
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -642,6 +645,7 @@ static void handle_rx(struct vhost_net *net)
        struct socket *sock;
        struct iov_iter fixup;
        __virtio16 num_buffers;
+       int recv_pkts = 0;
 
        mutex_lock_nested(&vq->mutex, 0);
        sock = vq->private_data;
@@ -661,7 +665,10 @@ static void handle_rx(struct vhost_net *net)
                vq->log : NULL;
        mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
 
-       while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
+       do {
+               sock_len = vhost_net_rx_peek_head_len(net, sock->sk);
+               if (!sock_len)
+                       break;
                sock_len += sock_hlen;
                vhost_len = sock_len + vhost_hlen;
                headcount = get_rx_bufs(vq, vq->heads, vhost_len,
@@ -739,12 +746,10 @@ static void handle_rx(struct vhost_net *net)
                if (unlikely(vq_log))
                        vhost_log_write(vq, vq_log, log, vhost_len);
                total_len += vhost_len;
-               if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
-                       vhost_poll_queue(&vq->poll);
-                       goto out;
-               }
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
        vhost_net_enable_vq(net, vq);
+
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -813,7 +818,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
                n->vqs[i].vhost_hlen = 0;
                n->vqs[i].sock_hlen = 0;
        }
-       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+                      VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
 
        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
        vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
index 009315f..456572e 100644 (file)
 #define VHOST_SCSI_PREALLOC_UPAGES 2048
 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
 
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
 struct vhost_scsi_inflight {
        /* Wait for the flush operation to finish */
        struct completion comp;
@@ -855,7 +861,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
        u64 tag;
        u32 exp_data_len, data_direction;
        unsigned out, in;
-       int head, ret, prot_bytes;
+       int head, ret, prot_bytes, c = 0;
        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
        size_t out_size, in_size;
        u16 lun;
@@ -874,7 +880,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 
        vhost_disable_notify(&vs->dev, vq);
 
-       for (;;) {
+       do {
                head = vhost_get_vq_desc(vq, vq->iov,
                                         ARRAY_SIZE(vq->iov), &out, &in,
                                         NULL, NULL);
@@ -1090,7 +1096,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                 */
                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
                queue_work(vhost_scsi_workqueue, &cmd->work);
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1443,7 +1449,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                vqs[i] = &vs->vqs[i].vq;
                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
-       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ,
+                      VHOST_SCSI_WEIGHT, 0);
 
        vhost_scsi_init_inflight(vs, NULL);
 
index 388eec4..accc88c 100644 (file)
  * Using this limit prevents one virtqueue from starving others. */
 #define VHOST_TEST_WEIGHT 0x80000
 
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
 enum {
        VHOST_TEST_VQ = 0,
        VHOST_TEST_VQ_MAX = 1,
@@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n)
                }
                vhost_add_used_and_signal(&n->dev, vq, head, 0);
                total_len += len;
-               if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
-                       vhost_poll_queue(&vq->poll);
+               if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
                        break;
-               }
        }
 
        mutex_unlock(&vq->mutex);
@@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
        dev = &n->dev;
        vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
        n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
-       vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+       vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX,
+                      VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
 
        f->private_data = n;
 
index 53b1b3c..310a779 100644 (file)
@@ -393,8 +393,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
                vhost_vq_free_iovecs(dev->vqs[i]);
 }
 
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+                         int pkts, int total_len)
+{
+       struct vhost_dev *dev = vq->dev;
+
+       if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+           pkts >= dev->weight) {
+               vhost_poll_queue(&vq->poll);
+               return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
 void vhost_dev_init(struct vhost_dev *dev,
-                   struct vhost_virtqueue **vqs, int nvqs)
+                   struct vhost_virtqueue **vqs, int nvqs,
+                   int weight, int byte_weight)
 {
        struct vhost_virtqueue *vq;
        int i;
@@ -408,13 +424,14 @@ void vhost_dev_init(struct vhost_dev *dev,
        dev->iotlb = NULL;
        dev->mm = NULL;
        dev->worker = NULL;
+       dev->weight = weight;
+       dev->byte_weight = byte_weight;
        init_llist_head(&dev->work_list);
        init_waitqueue_head(&dev->wait);
        INIT_LIST_HEAD(&dev->read_list);
        INIT_LIST_HEAD(&dev->pending_list);
        spin_lock_init(&dev->iotlb_lock);
 
-
        for (i = 0; i < dev->nvqs; ++i) {
                vq = dev->vqs[i];
                vq->log = NULL;
@@ -1893,7 +1910,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
                /* If this is an input descriptor, increment that count. */
                if (access == VHOST_ACCESS_WO) {
                        *in_num += ret;
-                       if (unlikely(log)) {
+                       if (unlikely(log && ret)) {
                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
                                ++*log_num;
@@ -2029,7 +2046,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
                        /* If this is an input descriptor,
                         * increment that count. */
                        *in_num += ret;
-                       if (unlikely(log)) {
+                       if (unlikely(log && ret)) {
                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
                                ++*log_num;
index 78f3c5f..ddb052a 100644 (file)
@@ -158,6 +158,8 @@ struct vhost_dev {
        struct eventfd_ctx *log_ctx;
        struct llist_head work_list;
        struct task_struct *worker;
+       int weight;
+       int byte_weight;
        struct vhost_umem *umem;
        struct vhost_umem *iotlb;
        spinlock_t iotlb_lock;
@@ -166,7 +168,9 @@ struct vhost_dev {
        wait_queue_head_t wait;
 };
 
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+                   int nvqs, int weight, int byte_weight);
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
index 8a5ce5b..199b1fb 100644 (file)
@@ -248,6 +248,7 @@ module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+MODULE_ALIAS("platform:bcm2835-wdt");
 MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
 MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
 MODULE_LICENSE("GPL");
index a8a3883..80e2922 100644 (file)
@@ -365,8 +365,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
        /* Convert the size to actually allocated. */
        size = 1UL << (order + XEN_PAGE_SHIFT);
 
-       if (((dev_addr + size - 1 <= dma_mask)) ||
-           range_straddles_page_boundary(phys, size))
+       if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+                    range_straddles_page_boundary(phys, size)))
                xen_destroy_contiguous_region(phys, order);
 
        xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
index 7f83e90..b1a1d7d 100644 (file)
@@ -115,13 +115,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
 {
        int err;
        u16 old_value;
-       pci_power_t new_state, old_state;
+       pci_power_t new_state;
 
        err = pci_read_config_word(dev, offset, &old_value);
        if (err)
                goto out;
 
-       old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
        new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 
        new_value &= PM_OK_BITS;
index e9e0437..e80ad0c 100644 (file)
@@ -49,8 +49,9 @@
  * @page: structure to page
  *
  */
-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
+static int v9fs_fid_readpage(void *data, struct page *page)
 {
+       struct p9_fid *fid = data;
        struct inode *inode = page->mapping->host;
        struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
        struct iov_iter to;
@@ -121,7 +122,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
        if (ret == 0)
                return ret;
 
-       ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
+       ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
+                       filp->private_data);
        p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
        return ret;
 }
index 4d4a0df..b00ae92 100644 (file)
@@ -368,6 +368,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
        struct buffer_head *bh;
        struct object_info root_obj;
        unsigned char *b_data;
+       unsigned int blocksize;
        struct adfs_sb_info *asb;
        struct inode *root;
        int ret = -EINVAL;
@@ -419,8 +420,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
                goto error_free_bh;
        }
 
+       blocksize = 1 << dr->log2secsize;
        brelse(bh);
-       if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
+
+       if (sb_set_blocksize(sb, blocksize)) {
                bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
                if (!bh) {
                        adfs_error(sb, "couldn't read superblock on "
index 57a4609..f9c3907 100644 (file)
@@ -5133,7 +5133,7 @@ process_leaf:
                        }
 
                        if (btrfs_inode_in_log(di_inode, trans->transid)) {
-                               iput(di_inode);
+                               btrfs_add_delayed_iput(di_inode);
                                continue;
                        }
 
@@ -5143,7 +5143,7 @@ process_leaf:
                        btrfs_release_path(path);
                        ret = btrfs_log_inode(trans, root, di_inode,
                                              log_mode, 0, LLONG_MAX, ctx);
-                       iput(di_inode);
+                       btrfs_add_delayed_iput(di_inode);
                        if (ret)
                                goto next_dir_inode;
                        if (ctx->log_new_dentries) {
@@ -5281,7 +5281,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
 
                        ret = btrfs_log_inode(trans, root, dir_inode,
                                              LOG_INODE_ALL, 0, LLONG_MAX, ctx);
-                       iput(dir_inode);
+                       btrfs_add_delayed_iput(dir_inode);
                        if (ret)
                                goto out;
                }
index 4eb7a6b..55ce654 100644 (file)
@@ -4942,8 +4942,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
 
        if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
                         BTRFS_BLOCK_GROUP_RAID10 |
-                        BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_DUP)) {
+                        BTRFS_BLOCK_GROUP_RAID5)) {
                max_errors = 1;
        } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
                max_errors = 2;
index e7b5451..e137ff6 100644 (file)
@@ -1072,20 +1072,23 @@ static int send_cap_msg(struct ceph_mds_session *session,
 }
 
 /*
- * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_ceph_lock.
+ * Queue cap releases when an inode is dropped from our cache.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct rb_node *p;
 
+       /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+        * may call __ceph_caps_issued_mask() on a freeing inode. */
+       spin_lock(&ci->i_ceph_lock);
        p = rb_first(&ci->i_caps);
        while (p) {
                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
                p = rb_next(p);
                __ceph_remove_cap(cap, true);
        }
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
index 8c8cb8f..5d05c77 100644 (file)
@@ -474,7 +474,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
                                           long long release_count,
                                           long long ordered_count)
 {
-       smp_mb__before_atomic();
+       /*
+        * Makes sure operations that setup readdir cache (update page
+        * cache and i_size) are strongly ordered w.r.t. the following
+        * atomic64_set() operations.
+        */
+       smp_mb();
        atomic64_set(&ci->i_complete_seq[0], release_count);
        atomic64_set(&ci->i_complete_seq[1], ordered_count);
 }
index b24275e..22e5f34 100644 (file)
@@ -916,6 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
        struct ceph_cap_flush *prealloc_cf = NULL;
+       struct ceph_buffer *old_blob = NULL;
        int issued;
        int err;
        int dirty = 0;
@@ -984,13 +985,15 @@ retry:
                struct ceph_buffer *blob;
 
                spin_unlock(&ci->i_ceph_lock);
-               dout(" preaallocating new blob size=%d\n", required_blob_size);
+               ceph_buffer_put(old_blob); /* Shouldn't be required */
+               dout(" pre-allocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto do_sync_unlocked;
                spin_lock(&ci->i_ceph_lock);
+               /* prealloc_blob can't be released while holding i_ceph_lock */
                if (ci->i_xattrs.prealloc_blob)
-                       ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+                       old_blob = ci->i_xattrs.prealloc_blob;
                ci->i_xattrs.prealloc_blob = blob;
                goto retry;
        }
@@ -1006,6 +1009,7 @@ retry:
        }
 
        spin_unlock(&ci->i_ceph_lock);
+       ceph_buffer_put(old_blob);
        if (lock_snap_rwsem)
                up_read(&mdsc->snap_rwsem);
        if (dirty)
index 9cb72fd..6310834 100644 (file)
@@ -2466,6 +2466,7 @@ static int
 cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
 {
        int rc = 0;
+       int is_domain = 0;
        const char *delim, *payload;
        char *desc;
        ssize_t len;
@@ -2513,6 +2514,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
                        rc = PTR_ERR(key);
                        goto out_err;
                }
+               is_domain = 1;
        }
 
        down_read(&key->sem);
@@ -2570,6 +2572,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
                goto out_key_put;
        }
 
+       /*
+        * If we have a domain key then we must set the domainName in the
+        * for the request.
+        */
+       if (is_domain && ses->domainName) {
+               vol->domainname = kstrndup(ses->domainName,
+                                          strlen(ses->domainName),
+                                          GFP_KERNEL);
+               if (!vol->domainname) {
+                       cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+                                "domain\n", len);
+                       rc = -ENOMEM;
+                       kfree(vol->username);
+                       vol->username = NULL;
+                       kzfree(vol->password);
+                       vol->password = NULL;
+                       goto out_key_put;
+               }
+       }
+
 out_key_put:
        up_read(&key->sem);
        key_put(key);
index 5e21d58..84e60b3 100644 (file)
@@ -171,7 +171,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
        if (tcon == NULL)
                return 0;
 
-       if (smb2_command == SMB2_TREE_CONNECT)
+       if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
                return 0;
 
        if (tcon->tidStatus == CifsExiting) {
@@ -677,7 +677,12 @@ ssetup_ntlmssp_authenticate:
        else
                req->SecurityMode = 0;
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
        req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
        req->Channel = 0; /* MBZ */
 
        iov[0].iov_base = (char *)req;
index 1da3805..cbbd76f 100644 (file)
@@ -81,6 +81,41 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
        return ret;
 }
 
+struct coda_vm_ops {
+       atomic_t refcnt;
+       struct file *coda_file;
+       const struct vm_operations_struct *host_vm_ops;
+       struct vm_operations_struct vm_ops;
+};
+
+static void
+coda_vm_open(struct vm_area_struct *vma)
+{
+       struct coda_vm_ops *cvm_ops =
+               container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+       atomic_inc(&cvm_ops->refcnt);
+
+       if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
+               cvm_ops->host_vm_ops->open(vma);
+}
+
+static void
+coda_vm_close(struct vm_area_struct *vma)
+{
+       struct coda_vm_ops *cvm_ops =
+               container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+       if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
+               cvm_ops->host_vm_ops->close(vma);
+
+       if (atomic_dec_and_test(&cvm_ops->refcnt)) {
+               vma->vm_ops = cvm_ops->host_vm_ops;
+               fput(cvm_ops->coda_file);
+               kfree(cvm_ops);
+       }
+}
+
 static int
 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
 {
@@ -88,6 +123,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        struct coda_inode_info *cii;
        struct file *host_file;
        struct inode *coda_inode, *host_inode;
+       struct coda_vm_ops *cvm_ops;
+       int ret;
 
        cfi = CODA_FTOC(coda_file);
        BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
@@ -96,6 +133,13 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        if (!host_file->f_op->mmap)
                return -ENODEV;
 
+       if (WARN_ON(coda_file != vma->vm_file))
+               return -EIO;
+
+       cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
+       if (!cvm_ops)
+               return -ENOMEM;
+
        coda_inode = file_inode(coda_file);
        host_inode = file_inode(host_file);
 
@@ -109,6 +153,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
         * the container file on us! */
        else if (coda_inode->i_mapping != host_inode->i_mapping) {
                spin_unlock(&cii->c_lock);
+               kfree(cvm_ops);
                return -EBUSY;
        }
 
@@ -117,7 +162,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        cfi->cfi_mapcount++;
        spin_unlock(&cii->c_lock);
 
-       return host_file->f_op->mmap(host_file, vma);
+       vma->vm_file = get_file(host_file);
+       ret = host_file->f_op->mmap(host_file, vma);
+
+       if (ret) {
+               /* if call_mmap fails, our caller will put coda_file so we
+                * should drop the reference to the host_file that we got.
+                */
+               fput(host_file);
+               kfree(cvm_ops);
+       } else {
+               /* here we add redirects for the open/close vm_operations */
+               cvm_ops->host_vm_ops = vma->vm_ops;
+               if (vma->vm_ops)
+                       cvm_ops->vm_ops = *vma->vm_ops;
+
+               cvm_ops->vm_ops.open = coda_vm_open;
+               cvm_ops->vm_ops.close = coda_vm_close;
+               cvm_ops->coda_file = coda_file;
+               atomic_set(&cvm_ops->refcnt, 1);
+
+               vma->vm_ops = &cvm_ops->vm_ops;
+       }
+       return ret;
 }
 
 int coda_open(struct inode *coda_inode, struct file *coda_file)
index 8226291..ff9b5cf 100644 (file)
@@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
        if (req->uc_opcode == CODA_OPEN_BY_FD) {
                struct coda_open_by_fd_out *outp =
                        (struct coda_open_by_fd_out *)req->uc_data;
-               if (!outp->oh.result)
+               if (!outp->oh.result) {
                        outp->fh = fget(outp->fd);
+                       if (!outp->fh)
+                               return -EBADF;
+               }
        }
 
         wake_up(&req->uc_sleep);
index 42e014c..2acdc6d 100644 (file)
@@ -1019,9 +1019,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
 COMPATIBLE_IOCTL(PPPIOCATTCHAN)
 COMPATIBLE_IOCTL(PPPIOCGCHAN)
 COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
 /* ppdev */
 COMPATIBLE_IOCTL(PPSETMODE)
 COMPATIBLE_IOCTL(PPRSTATUS)
index cf0186f..27d1db5 100644 (file)
@@ -1171,8 +1171,10 @@ int ecryptfs_read_and_validate_header_region(struct inode *inode)
        lower_file->f_ra.ra_pages = ra_pages_org;
        /* restore read a head mechanism */
 
-       if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
-               return rc >= 0 ? -EINVAL : rc;
+       if (rc < 0)
+               return rc;
+       else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+               return -EINVAL;
        rc = ecryptfs_validate_marker(marker);
        if (!rc)
                ecryptfs_i_size_init(file_size, inode);
@@ -1530,8 +1532,10 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
        rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
                                     ECRYPTFS_XATTR_NAME, file_size,
                                     ECRYPTFS_SIZE_AND_MARKER_BYTES);
-       if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
-               return rc >= 0 ? -EINVAL : rc;
+       if (rc < 0)
+               return rc;
+       else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+               return -EINVAL;
        rc = ecryptfs_validate_marker(marker);
        if (!rc)
                ecryptfs_i_size_init(file_size, inode);
index ab6e34e..eba4024 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1644,7 +1644,7 @@ static int do_execveat_common(int fd, struct filename *filename,
        current->fs->in_exec = 0;
        current->in_execve = 0;
        acct_update_integrals(current);
-       task_numa_free(current);
+       task_numa_free(current, false);
        free_bprm(bprm);
        kfree(pathbuf);
        putname(filename);
index b3cd53a..1e27252 100644 (file)
@@ -146,8 +146,8 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
 
        exist = f2fs_test_bit(offset, se->cur_valid_map);
        if (!exist && type == DATA_GENERIC_ENHANCE) {
-               f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
-                       "blkaddr:%u, sit bitmap:%d", blkaddr, exist);
+               f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
+                        blkaddr, exist);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
                WARN_ON(1);
        }
@@ -184,8 +184,8 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
        case DATA_GENERIC_ENHANCE_READ:
                if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
                                blkaddr < MAIN_BLKADDR(sbi))) {
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "access invalid blkaddr:%u", blkaddr);
+                       f2fs_warn(sbi, "access invalid blkaddr:%u",
+                                 blkaddr);
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
                        WARN_ON(1);
                        return false;
@@ -658,9 +658,8 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 
 err_out:
        set_sbi_flag(sbi, SBI_NEED_FSCK);
-       f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: orphan failed (ino=%x), run fsck to fix.",
-                       __func__, ino);
+       f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
+                 __func__, ino);
        return err;
 }
 
@@ -677,13 +676,12 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
                return 0;
 
        if (bdev_read_only(sbi->sb->s_bdev)) {
-               f2fs_msg(sbi->sb, KERN_INFO, "write access "
-                       "unavailable, skipping orphan cleanup");
+               f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
                return 0;
        }
 
        if (s_flags & MS_RDONLY) {
-               f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+               f2fs_info(sbi, "orphan cleanup on readonly fs");
                sbi->sb->s_flags &= ~MS_RDONLY;
        }
 
@@ -828,26 +826,14 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
        if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
                        crc_offset > CP_CHKSUM_OFFSET) {
                f2fs_put_page(*cp_page, 1);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "invalid crc_offset: %zu", crc_offset);
+               f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
                return -EINVAL;
        }
 
-       if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) {
-               if (crc_offset != CP_MIN_CHKSUM_OFFSET) {
-                       f2fs_put_page(*cp_page, 1);
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "layout of large_nat_bitmap is deprecated, "
-                               "run fsck to repair, chksum_offset: %zu",
-                               crc_offset);
-                       return -EINVAL;
-               }
-       }
-
        crc = f2fs_checkpoint_chksum(sbi, *cp_block);
        if (crc != cur_cp_crc(*cp_block)) {
                f2fs_put_page(*cp_page, 1);
-               f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+               f2fs_warn(sbi, "invalid crc value");
                return -EINVAL;
        }
 
@@ -870,9 +856,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
 
        if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
                                        sbi->blocks_per_seg) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "invalid cp_pack_total_block_count:%u",
-                       le32_to_cpu(cp_block->cp_pack_total_block_count));
+               f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
+                         le32_to_cpu(cp_block->cp_pack_total_block_count));
                goto invalid_cp;
        }
        pre_version = *version;
@@ -906,6 +891,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
        unsigned int cp_blks = 1 + __cp_payload(sbi);
        block_t cp_blk_no;
        int i;
+       int err;
 
        sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
                                 GFP_KERNEL);
@@ -933,6 +919,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
        } else if (cp2) {
                cur_page = cp2;
        } else {
+               err = -EFSCORRUPTED;
                goto fail_no_cp;
        }
 
@@ -945,8 +932,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
                sbi->cur_cp_pack = 2;
 
        /* Sanity checking of checkpoint */
-       if (f2fs_sanity_check_ckpt(sbi))
+       if (f2fs_sanity_check_ckpt(sbi)) {
+               err = -EFSCORRUPTED;
                goto free_fail_no_cp;
+       }
 
        if (cp_blks <= 1)
                goto done;
@@ -960,8 +949,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
                unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 
                cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
-               if (IS_ERR(cur_page))
+               if (IS_ERR(cur_page)) {
+                       err = PTR_ERR(cur_page);
                        goto free_fail_no_cp;
+               }
                sit_bitmap_ptr = page_address(cur_page);
                memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
                f2fs_put_page(cur_page, 1);
@@ -976,7 +967,7 @@ free_fail_no_cp:
        f2fs_put_page(cp2, 1);
 fail_no_cp:
        kvfree(sbi->ckpt);
-       return -EINVAL;
+       return err;
 }
 
 static void __add_dirty_inode(struct inode *inode, enum inode_type type)
@@ -1143,17 +1134,24 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
 
 static bool __need_flush_quota(struct f2fs_sb_info *sbi)
 {
+       bool ret = false;
+
        if (!is_journalled_quota(sbi))
                return false;
-       if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
-               return false;
-       if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
-               return false;
-       if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH))
-               return true;
-       if (get_pages(sbi, F2FS_DIRTY_QDATA))
-               return true;
-       return false;
+
+       down_write(&sbi->quota_sem);
+       if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
+               ret = false;
+       } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
+               ret = false;
+       } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
+               clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+               ret = true;
+       } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
+               ret = true;
+       }
+       up_write(&sbi->quota_sem);
+       return ret;
 }
 
 /*
@@ -1172,26 +1170,22 @@ static int block_operations(struct f2fs_sb_info *sbi)
        blk_start_plug(&plug);
 
 retry_flush_quotas:
+       f2fs_lock_all(sbi);
        if (__need_flush_quota(sbi)) {
                int locked;
 
                if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
                        set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
-                       f2fs_lock_all(sbi);
+                       set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
                        goto retry_flush_dents;
                }
-               clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+               f2fs_unlock_all(sbi);
 
                /* only failed during mount/umount/freeze/quotactl */
                locked = down_read_trylock(&sbi->sb->s_umount);
                f2fs_quota_sync(sbi->sb, -1);
                if (locked)
                        up_read(&sbi->sb->s_umount);
-       }
-
-       f2fs_lock_all(sbi);
-       if (__need_flush_quota(sbi)) {
-               f2fs_unlock_all(sbi);
                cond_resched();
                goto retry_flush_quotas;
        }
@@ -1213,12 +1207,6 @@ retry_flush_dents:
         */
        down_write(&sbi->node_change);
 
-       if (__need_flush_quota(sbi)) {
-               up_write(&sbi->node_change);
-               f2fs_unlock_all(sbi);
-               goto retry_flush_quotas;
-       }
-
        if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
                up_write(&sbi->node_change);
                f2fs_unlock_all(sbi);
@@ -1314,7 +1302,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        else
                __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
 
-       if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+       if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
+               is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
                __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
 
        if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
@@ -1570,8 +1559,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
                if (cpc->reason != CP_PAUSE)
                        return 0;
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                               "Start checkpoint disabled!");
+               f2fs_warn(sbi, "Start checkpoint disabled!");
        }
        mutex_lock(&sbi->cp_mutex);
 
@@ -1637,8 +1625,7 @@ stop:
        stat_inc_cp_count(sbi->stat_info);
 
        if (cpc->reason & CP_RECOVERY)
-               f2fs_msg(sbi->sb, KERN_NOTICE,
-                       "checkpoint: version = %llx", ckpt_ver);
+               f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
 
        /* do checkpoint periodically */
        f2fs_update_time(sbi, CP_TIME);
index 380d5b5..568aaba 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
+#include <linux/swap.h>
 #include <linux/prefetch.h>
 #include <linux/uio.h>
 #include <linux/cleancache.h>
@@ -54,7 +55,7 @@ static bool __is_cp_guaranteed(struct page *page)
 
 static enum count_type __read_io_type(struct page *page)
 {
-       struct address_space *mapping = page->mapping;
+       struct address_space *mapping = page_file_mapping(page);
 
        if (mapping) {
                struct inode *inode = mapping->host;
@@ -380,20 +381,20 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
        io->bio = NULL;
 }
 
-static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+static bool __has_merged_page(struct bio *bio, struct inode *inode,
                                                struct page *page, nid_t ino)
 {
        struct bio_vec *bvec;
        struct page *target;
        int i;
 
-       if (!io->bio)
+       if (!bio)
                return false;
 
        if (!inode && !page && !ino)
                return true;
 
-       bio_for_each_segment_all(bvec, io->bio, i) {
+       bio_for_each_segment_all(bvec, bio, i) {
 
                if (bvec->bv_page->mapping)
                        target = bvec->bv_page;
@@ -444,7 +445,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
                        struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 
                        down_read(&io->io_rwsem);
-                       ret = __has_merged_page(io, inode, page, ino);
+                       ret = __has_merged_page(io->bio, inode, page, ino);
                        up_read(&io->io_rwsem);
                }
                if (ret)
@@ -488,7 +489,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
                        fio->is_por ? META_POR : (__is_meta_io(fio) ?
                        META_GENERIC : DATA_GENERIC_ENHANCE)))
-               return -EFAULT;
+               return -EFSCORRUPTED;
 
        trace_f2fs_submit_page_bio(page, fio);
        f2fs_trace_ios(fio, 0);
@@ -514,6 +515,61 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
        return 0;
 }
 
+int f2fs_merge_page_bio(struct f2fs_io_info *fio)
+{
+       struct bio *bio = *fio->bio;
+       struct page *page = fio->encrypted_page ?
+                       fio->encrypted_page : fio->page;
+
+       if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+                       __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+               return -EFSCORRUPTED;
+
+       trace_f2fs_submit_page_bio(page, fio);
+       f2fs_trace_ios(fio, 0);
+
+       if (bio && (*fio->last_block + 1 != fio->new_blkaddr ||
+                       !__same_bdev(fio->sbi, fio->new_blkaddr, bio))) {
+               __submit_bio(fio->sbi, bio, fio->type);
+               bio = NULL;
+       }
+alloc_new:
+       if (!bio) {
+               bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
+                               BIO_MAX_PAGES, false, fio->type, fio->temp);
+               bio_set_op_attrs(bio, fio->op, fio->op_flags);
+       }
+
+       if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+               __submit_bio(fio->sbi, bio, fio->type);
+               bio = NULL;
+               goto alloc_new;
+       }
+
+       if (fio->io_wbc)
+               wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
+
+       inc_page_count(fio->sbi, WB_DATA_TYPE(page));
+
+       *fio->last_block = fio->new_blkaddr;
+       *fio->bio = bio;
+
+       return 0;
+}
+
+static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio,
+                                                       struct page *page)
+{
+       if (!bio)
+               return;
+
+       if (!__has_merged_page(*bio, NULL, page, 0))
+               return;
+
+       __submit_bio(sbi, *bio, DATA);
+       *bio = NULL;
+}
+
 void f2fs_submit_page_write(struct f2fs_io_info *fio)
 {
        struct f2fs_sb_info *sbi = fio->sbi;
@@ -767,7 +823,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
                dn.data_blkaddr = ei.blk + index - ei.fofs;
                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
                                                DATA_GENERIC_ENHANCE_READ)) {
-                       err = -EFAULT;
+                       err = -EFSCORRUPTED;
                        goto put_err;
                }
                goto got_it;
@@ -787,7 +843,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
                        !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
                                                dn.data_blkaddr,
                                                DATA_GENERIC_ENHANCE)) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto put_err;
        }
 got_it:
@@ -1133,7 +1189,7 @@ next_block:
 
        if (__is_valid_data_blkaddr(blkaddr) &&
                !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto sync_out;
        }
 
@@ -1563,7 +1619,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
        sector_t block_nr;
        int ret = 0;
 
-       block_in_file = (sector_t)page->index;
+       block_in_file = (sector_t)page_index(page);
        last_block = block_in_file + nr_pages;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
                                                        blkbits;
@@ -1596,14 +1652,15 @@ got_it:
                block_nr = map->m_pblk + block_in_file - map->m_lblk;
                SetPageMappedToDisk(page);
 
-               if (!PageUptodate(page) && !cleancache_get_page(page)) {
+               if (!PageUptodate(page) && (!PageSwapCache(page) &&
+                                       !cleancache_get_page(page))) {
                        SetPageUptodate(page);
                        goto confused;
                }
 
                if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
                                                DATA_GENERIC_ENHANCE_READ)) {
-                       ret = -EFAULT;
+                       ret = -EFSCORRUPTED;
                        goto out;
                }
        } else {
@@ -1694,7 +1751,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
                        prefetchw(&page->flags);
                        list_del(&page->lru);
                        if (add_to_page_cache_lru(page, mapping,
-                                                 page->index, GFP_KERNEL))
+                                                 page_index(page), GFP_KERNEL))
                                goto next_page;
                }
 
@@ -1717,7 +1774,7 @@ next_page:
 
 static int f2fs_read_data_page(struct file *file, struct page *page)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = page_file_mapping(page)->host;
        int ret = -EAGAIN;
 
        trace_f2fs_readpage(page, DATA);
@@ -1726,7 +1783,8 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
        if (f2fs_has_inline_data(inode))
                ret = f2fs_read_inline_data(inode, page);
        if (ret == -EAGAIN)
-               ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
+               ret = f2fs_mpage_readpages(page_file_mapping(page),
+                                               NULL, page, 1, false);
        return ret;
 }
 
@@ -1883,7 +1941,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 
                if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
                                                DATA_GENERIC_ENHANCE))
-                       return -EFAULT;
+                       return -EFSCORRUPTED;
 
                ipu_force = true;
                fio->need_lock = LOCK_DONE;
@@ -1910,7 +1968,7 @@ got_it:
        if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
                !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
                                                DATA_GENERIC_ENHANCE)) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto out_writepage;
        }
        /*
@@ -1979,6 +2037,8 @@ out:
 }
 
 static int __write_data_page(struct page *page, bool *submitted,
+                               struct bio **bio,
+                               sector_t *last_block,
                                struct writeback_control *wbc,
                                enum iostat_type io_type)
 {
@@ -2004,6 +2064,8 @@ static int __write_data_page(struct page *page, bool *submitted,
                .need_lock = LOCK_RETRY,
                .io_type = io_type,
                .io_wbc = wbc,
+               .bio = bio,
+               .last_block = last_block,
        };
 
        trace_f2fs_writepage(page, DATA);
@@ -2102,10 +2164,13 @@ out:
 
        unlock_page(page);
        if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
-                                       !F2FS_I(inode)->cp_task)
+                                       !F2FS_I(inode)->cp_task) {
+               f2fs_submit_ipu_bio(sbi, bio, page);
                f2fs_balance_fs(sbi, need_balance_fs);
+       }
 
        if (unlikely(f2fs_cp_error(sbi))) {
+               f2fs_submit_ipu_bio(sbi, bio, page);
                f2fs_submit_merged_write(sbi, DATA);
                submitted = NULL;
        }
@@ -2132,7 +2197,7 @@ redirty_out:
 static int f2fs_write_data_page(struct page *page,
                                        struct writeback_control *wbc)
 {
-       return __write_data_page(page, NULL, wbc, FS_DATA_IO);
+       return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO);
 }
 
 /*
@@ -2148,6 +2213,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
        int done = 0;
        struct pagevec pvec;
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+       struct bio *bio = NULL;
+       sector_t last_block;
        int nr_pages;
        pgoff_t uninitialized_var(writeback_index);
        pgoff_t index;
@@ -2224,17 +2291,20 @@ continue_unlock:
                        }
 
                        if (PageWriteback(page)) {
-                               if (wbc->sync_mode != WB_SYNC_NONE)
+                               if (wbc->sync_mode != WB_SYNC_NONE) {
                                        f2fs_wait_on_page_writeback(page,
                                                        DATA, true, true);
-                               else
+                                       f2fs_submit_ipu_bio(sbi, &bio, page);
+                               } else {
                                        goto continue_unlock;
+                               }
                        }
 
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
-                       ret = __write_data_page(page, &submitted, wbc, io_type);
+                       ret = __write_data_page(page, &submitted, &bio,
+                                       &last_block, wbc, io_type);
                        if (unlikely(ret)) {
                                /*
                                 * keep nr_to_write, since vfs uses this to
@@ -2283,6 +2353,9 @@ continue_unlock:
        if (nwritten)
                f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
                                                                NULL, 0, DATA);
+       /* submit cached bio of IPU write */
+       if (bio)
+               __submit_bio(sbi, bio, DATA);
 
        return ret;
 }
@@ -2294,6 +2367,9 @@ static inline bool __should_serialize_io(struct inode *inode,
                return false;
        if (IS_NOQUOTA(inode))
                return false;
+       /* to avoid deadlock in path of data flush */
+       if (F2FS_I(inode)->cp_task)
+               return false;
        if (wbc->sync_mode != WB_SYNC_ALL)
                return true;
        if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
@@ -2575,7 +2651,7 @@ repeat:
        } else {
                if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
                                DATA_GENERIC_ENHANCE_READ)) {
-                       err = -EFAULT;
+                       err = -EFSCORRUPTED;
                        goto fail;
                }
                err = f2fs_submit_page_read(inode, page, blkaddr);
@@ -2851,13 +2927,14 @@ int f2fs_release_page(struct page *page, gfp_t wait)
 
 static int f2fs_set_data_page_dirty(struct page *page)
 {
-       struct address_space *mapping = page->mapping;
-       struct inode *inode = mapping->host;
+       struct inode *inode = page_file_mapping(page)->host;
 
        trace_f2fs_set_page_dirty(page, DATA);
 
        if (!PageUptodate(page))
                SetPageUptodate(page);
+       if (PageSwapCache(page))
+               return __set_page_dirty_nobuffers(page);
 
        if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
                if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
@@ -2946,6 +3023,126 @@ int f2fs_migrate_page(struct address_space *mapping,
 }
 #endif
 
+#ifdef CONFIG_SWAP
+/* Copied from generic_swapfile_activate() to check any holes */
+static int check_swap_activate(struct file *swap_file, unsigned int max)
+{
+       struct address_space *mapping = swap_file->f_mapping;
+       struct inode *inode = mapping->host;
+       unsigned blocks_per_page;
+       unsigned long page_no;
+       unsigned blkbits;
+       sector_t probe_block;
+       sector_t last_block;
+       sector_t lowest_block = -1;
+       sector_t highest_block = 0;
+
+       blkbits = inode->i_blkbits;
+       blocks_per_page = PAGE_SIZE >> blkbits;
+
+       /*
+        * Map all the blocks into the extent list.  This code doesn't try
+        * to be very smart.
+        */
+       probe_block = 0;
+       page_no = 0;
+       last_block = i_size_read(inode) >> blkbits;
+       while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
+               unsigned block_in_page;
+               sector_t first_block;
+
+               cond_resched();
+
+               first_block = bmap(inode, probe_block);
+               if (first_block == 0)
+                       goto bad_bmap;
+
+               /*
+                * It must be PAGE_SIZE aligned on-disk
+                */
+               if (first_block & (blocks_per_page - 1)) {
+                       probe_block++;
+                       goto reprobe;
+               }
+
+               for (block_in_page = 1; block_in_page < blocks_per_page;
+                                       block_in_page++) {
+                       sector_t block;
+
+                       block = bmap(inode, probe_block + block_in_page);
+                       if (block == 0)
+                               goto bad_bmap;
+                       if (block != first_block + block_in_page) {
+                               /* Discontiguity */
+                               probe_block++;
+                               goto reprobe;
+                       }
+               }
+
+               first_block >>= (PAGE_SHIFT - blkbits);
+               if (page_no) {  /* exclude the header page */
+                       if (first_block < lowest_block)
+                               lowest_block = first_block;
+                       if (first_block > highest_block)
+                               highest_block = first_block;
+               }
+
+               page_no++;
+               probe_block += blocks_per_page;
+reprobe:
+               continue;
+       }
+       return 0;
+
+bad_bmap:
+       pr_err("swapon: swapfile has holes\n");
+       return -EINVAL;
+}
+
+static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
+                               sector_t *span)
+{
+       struct inode *inode = file_inode(file);
+       int ret;
+
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
+
+       if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+               return -EROFS;
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               return ret;
+
+       ret = check_swap_activate(file, sis->max);
+       if (ret)
+               return ret;
+
+       set_inode_flag(inode, FI_PIN_FILE);
+       f2fs_precache_extents(inode);
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+       return 0;
+}
+
+static void f2fs_swap_deactivate(struct file *file)
+{
+       struct inode *inode = file_inode(file);
+
+       clear_inode_flag(inode, FI_PIN_FILE);
+}
+#else
+static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
+                               sector_t *span)
+{
+       return -EOPNOTSUPP;
+}
+
+static void f2fs_swap_deactivate(struct file *file)
+{
+}
+#endif
+
 const struct address_space_operations f2fs_dblock_aops = {
        .readpage       = f2fs_read_data_page,
        .readpages      = f2fs_read_data_pages,
@@ -2958,6 +3155,8 @@ const struct address_space_operations f2fs_dblock_aops = {
        .releasepage    = f2fs_release_page,
        .direct_IO      = f2fs_direct_IO,
        .bmap           = f2fs_bmap,
+       .swap_activate  = f2fs_swap_activate,
+       .swap_deactivate = f2fs_swap_deactivate,
 #ifdef CONFIG_MIGRATION
        .migratepage    = f2fs_migrate_page,
 #endif
index d00ba9b..9cadcf9 100644 (file)
@@ -27,8 +27,15 @@ static DEFINE_MUTEX(f2fs_stat_mutex);
 static void update_general_status(struct f2fs_sb_info *sbi)
 {
        struct f2fs_stat_info *si = F2FS_STAT(sbi);
+       struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        int i;
 
+       /* these will be changed if online resize is done */
+       si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
+       si->main_area_sections = le32_to_cpu(raw_super->section_count);
+       si->main_area_zones = si->main_area_sections /
+                               le32_to_cpu(raw_super->secs_per_zone);
+
        /* validation check of the segment numbers */
        si->hit_largest = atomic64_read(&sbi->read_hit_largest);
        si->hit_cached = atomic64_read(&sbi->read_hit_cached);
index 988af0b..d01f376 100644 (file)
@@ -218,9 +218,8 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
 
        max_depth = F2FS_I(dir)->i_current_depth;
        if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
-               f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING,
-                               "Corrupted max_depth of %lu: %u",
-                               dir->i_ino, max_depth);
+               f2fs_warn(F2FS_I_SB(dir), "Corrupted max_depth of %lu: %u",
+                         dir->i_ino, max_depth);
                max_depth = MAX_DIR_HASH_DEPTH;
                f2fs_i_depth_write(dir, max_depth);
        }
@@ -816,11 +815,10 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
                bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
                if (unlikely(bit_pos > d->max ||
                                le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "%s: corrupted namelen=%d, run fsck to fix.",
-                               __func__, le16_to_cpu(de->name_len));
+                       f2fs_warn(sbi, "%s: corrupted namelen=%d, run fsck to fix.",
+                                 __func__, le16_to_cpu(de->name_len));
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
-                       err = -EINVAL;
+                       err = -EFSCORRUPTED;
                        goto out;
                }
 
@@ -828,8 +826,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
                        int save_len = fstr->len;
 
                        err = fscrypt_fname_disk_to_usr(d->inode,
-                                               (u32)de->hash_code, 0,
-                                               &de_name, fstr);
+                                               (u32)le32_to_cpu(de->hash_code),
+                                               0, &de_name, fstr);
                        if (err)
                                goto out;
 
index 904ad7b..a770226 100644 (file)
@@ -175,10 +175,9 @@ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
                next_re = rb_entry(next, struct rb_entry, rb_node);
 
                if (cur_re->ofs + cur_re->len > next_re->ofs) {
-                       f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
-                               "cur(%u, %u) next(%u, %u)",
-                               cur_re->ofs, cur_re->len,
-                               next_re->ofs, next_re->len);
+                       f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
+                                 cur_re->ofs, cur_re->len,
+                                 next_re->ofs, next_re->len);
                        return false;
                }
 
index b70d99f..ae98cdf 100644 (file)
@@ -139,6 +139,9 @@ struct f2fs_mount_info {
        int alloc_mode;                 /* segment allocation policy */
        int fsync_mode;                 /* fsync policy */
        bool test_dummy_encryption;     /* test dummy encryption */
+       block_t unusable_cap;           /* Amount of space allowed to be
+                                        * unusable when disabling checkpoint
+                                        */
 };
 
 #define F2FS_FEATURE_ENCRYPT           0x0001
@@ -476,6 +479,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
 #define F2FS_IOC_SET_PIN_FILE          _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
 #define F2FS_IOC_GET_PIN_FILE          _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
 #define F2FS_IOC_PRECACHE_EXTENTS      _IO(F2FS_IOCTL_MAGIC, 15)
+#define F2FS_IOC_RESIZE_FS             _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
 
 #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
 #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -537,8 +541,8 @@ static inline int get_inline_xattr_addrs(struct inode *inode);
 #define NR_INLINE_DENTRY(inode)        (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
                                ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
                                BITS_PER_BYTE + 1))
-#define INLINE_DENTRY_BITMAP_SIZE(inode)       ((NR_INLINE_DENTRY(inode) + \
-                                       BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+#define INLINE_DENTRY_BITMAP_SIZE(inode) \
+       DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
 #define INLINE_RESERVED_SIZE(inode)    (MAX_INLINE_DATA(inode) - \
                                ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
                                NR_INLINE_DENTRY(inode) + \
@@ -1113,6 +1117,8 @@ struct f2fs_io_info {
        bool retry;             /* need to reallocate block address */
        enum iostat_type io_type;       /* io type */
        struct writeback_control *io_wbc; /* writeback control */
+       struct bio **bio;               /* bio for ipu */
+       sector_t *last_block;           /* last block number in bio */
        unsigned char version;          /* version of the node */
 };
 
@@ -1172,6 +1178,7 @@ enum {
        SBI_QUOTA_NEED_FLUSH,                   /* need to flush quota info in CP */
        SBI_QUOTA_SKIP_FLUSH,                   /* skip flushing quota in current CP */
        SBI_QUOTA_NEED_REPAIR,                  /* quota file may be corrupted */
+       SBI_IS_RESIZEFS,                        /* resizefs is in process */
 };
 
 enum {
@@ -1268,6 +1275,7 @@ struct f2fs_sb_info {
        /* for inode management */
        struct list_head inode_list[NR_INODE_TYPE];     /* dirty inode list */
        spinlock_t inode_lock[NR_INODE_TYPE];   /* for dirty inode list lock */
+       struct mutex flush_lock;                /* for flush exclusion */
 
        /* for extent tree cache */
        struct radix_tree_root extent_tree_root;/* cache extent cache entries */
@@ -1291,6 +1299,7 @@ struct f2fs_sb_info {
        unsigned int segs_per_sec;              /* segments per section */
        unsigned int secs_per_zone;             /* sections per zone */
        unsigned int total_sections;            /* total section count */
+       struct mutex resize_mutex;              /* for resize exclusion */
        unsigned int total_node_count;          /* total node block count */
        unsigned int total_valid_node_count;    /* valid node block count */
        loff_t max_file_blocks;                 /* max block index of file */
@@ -1308,6 +1317,7 @@ struct f2fs_sb_info {
        block_t unusable_block_count;           /* # of blocks saved by last cp */
 
        unsigned int nquota_files;              /* # of quota sysfile */
+       struct rw_semaphore quota_sem;          /* blocking cp for flags */
 
        /* # of pages, see count_type */
        atomic_t nr_pages[NR_COUNT_TYPE];
@@ -1550,7 +1560,7 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
 
 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
 {
-       return F2FS_M_SB(page->mapping);
+       return F2FS_M_SB(page_file_mapping(page));
 }
 
 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@@ -1828,8 +1838,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
 
        if (!__allow_reserved_blocks(sbi, inode, true))
                avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
-       if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
-               avail_user_block_count -= sbi->unusable_block_count;
+       if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+               if (avail_user_block_count > sbi->unusable_block_count)
+                       avail_user_block_count -= sbi->unusable_block_count;
+               else
+                       avail_user_block_count = 0;
+       }
        if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
                diff = sbi->total_valid_block_count - avail_user_block_count;
                if (diff > *count)
@@ -1857,7 +1871,20 @@ enospc:
        return -ENOSPC;
 }
 
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+__printf(2, 3)
+void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
+
+#define f2fs_err(sbi, fmt, ...)                                                \
+       f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
+#define f2fs_warn(sbi, fmt, ...)                                       \
+       f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
+#define f2fs_notice(sbi, fmt, ...)                                     \
+       f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
+#define f2fs_info(sbi, fmt, ...)                                       \
+       f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
+#define f2fs_debug(sbi, fmt, ...)                                      \
+       f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
+
 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
                                                struct inode *inode,
                                                block_t count)
@@ -1873,11 +1900,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
                                        sbi->current_reserved_blocks + count);
        spin_unlock(&sbi->stat_lock);
        if (unlikely(inode->i_blocks < sectors)) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
-                       inode->i_ino,
-                       (unsigned long long)inode->i_blocks,
-                       (unsigned long long)sectors);
+               f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
+                         inode->i_ino,
+                         (unsigned long long)inode->i_blocks,
+                         (unsigned long long)sectors);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
                return;
        }
@@ -2029,7 +2055,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
                                        struct inode *inode, bool is_inode)
 {
        block_t valid_block_count;
-       unsigned int valid_node_count;
+       unsigned int valid_node_count, user_block_count;
        int err;
 
        if (is_inode) {
@@ -2056,10 +2082,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
 
        if (!__allow_reserved_blocks(sbi, inode, false))
                valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
+       user_block_count = sbi->user_block_count;
        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
-               valid_block_count += sbi->unusable_block_count;
+               user_block_count -= sbi->unusable_block_count;
 
-       if (unlikely(valid_block_count > sbi->user_block_count)) {
+       if (unlikely(valid_block_count > user_block_count)) {
                spin_unlock(&sbi->stat_lock);
                goto enospc;
        }
@@ -2114,10 +2141,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
                dquot_free_inode(inode);
        } else {
                if (unlikely(inode->i_blocks == 0)) {
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
-                               inode->i_ino,
-                               (unsigned long long)inode->i_blocks);
+                       f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+                                 inode->i_ino,
+                                 (unsigned long long)inode->i_blocks);
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
                        return;
                }
@@ -2253,6 +2279,9 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
 
 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
 {
+       if (sbi->gc_mode == GC_URGENT)
+               return true;
+
        if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
                get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
                get_pages(sbi, F2FS_WB_CP_DATA) ||
@@ -2260,7 +2289,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
                get_pages(sbi, F2FS_DIO_WRITE))
                return false;
 
-       if (SM_I(sbi) && SM_I(sbi)->dcc_info &&
+       if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
                        atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
                return false;
 
@@ -2382,57 +2411,23 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
 }
 
 /*
- * Inode flags
+ * On-disk inode flags (f2fs_inode::i_flags)
  */
-#define F2FS_SECRM_FL                  0x00000001 /* Secure deletion */
-#define F2FS_UNRM_FL                   0x00000002 /* Undelete */
-#define F2FS_COMPR_FL                  0x00000004 /* Compress file */
 #define F2FS_SYNC_FL                   0x00000008 /* Synchronous updates */
 #define F2FS_IMMUTABLE_FL              0x00000010 /* Immutable file */
 #define F2FS_APPEND_FL                 0x00000020 /* writes to file may only append */
 #define F2FS_NODUMP_FL                 0x00000040 /* do not dump file */
 #define F2FS_NOATIME_FL                        0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define F2FS_DIRTY_FL                  0x00000100
-#define F2FS_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
-#define F2FS_NOCOMPR_FL                        0x00000400 /* Don't compress */
-#define F2FS_ENCRYPT_FL                        0x00000800 /* encrypted file */
-/* End compression flags --- maybe not all used */
 #define F2FS_INDEX_FL                  0x00001000 /* hash-indexed directory */
-#define F2FS_IMAGIC_FL                 0x00002000 /* AFS directory */
-#define F2FS_JOURNAL_DATA_FL           0x00004000 /* file data should be journaled */
-#define F2FS_NOTAIL_FL                 0x00008000 /* file tail should not be merged */
 #define F2FS_DIRSYNC_FL                        0x00010000 /* dirsync behaviour (directories only) */
-#define F2FS_TOPDIR_FL                 0x00020000 /* Top of directory hierarchies*/
-#define F2FS_HUGE_FILE_FL               0x00040000 /* Set to each huge file */
-#define F2FS_EXTENTS_FL                        0x00080000 /* Inode uses extents */
-#define F2FS_EA_INODE_FL               0x00200000 /* Inode used for large EA */
-#define F2FS_EOFBLOCKS_FL              0x00400000 /* Blocks allocated beyond EOF */
-#define F2FS_NOCOW_FL                  0x00800000 /* Do not cow file */
-#define F2FS_INLINE_DATA_FL            0x10000000 /* Inode has inline data. */
 #define F2FS_PROJINHERIT_FL            0x20000000 /* Create with parents projid */
-#define F2FS_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
-
-#define F2FS_FL_USER_VISIBLE           0x30CBDFFF /* User visible flags */
-#define F2FS_FL_USER_MODIFIABLE                0x204BC0FF /* User modifiable flags */
-
-/* Flags we can manipulate with through F2FS_IOC_FSSETXATTR */
-#define F2FS_FL_XFLAG_VISIBLE          (F2FS_SYNC_FL | \
-                                        F2FS_IMMUTABLE_FL | \
-                                        F2FS_APPEND_FL | \
-                                        F2FS_NODUMP_FL | \
-                                        F2FS_NOATIME_FL | \
-                                        F2FS_PROJINHERIT_FL)
 
 /* Flags that should be inherited by new inodes from their parent. */
-#define F2FS_FL_INHERITED (F2FS_SECRM_FL | F2FS_UNRM_FL | F2FS_COMPR_FL |\
-                          F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL |\
-                          F2FS_NOCOMPR_FL | F2FS_JOURNAL_DATA_FL |\
-                          F2FS_NOTAIL_FL | F2FS_DIRSYNC_FL |\
-                          F2FS_PROJINHERIT_FL)
+#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
+                          F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL)
 
 /* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define F2FS_REG_FLMASK                (~(F2FS_DIRSYNC_FL | F2FS_TOPDIR_FL))
+#define F2FS_REG_FLMASK                (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL))
 
 /* Flags that are appropriate for non-directories/regular files. */
 #define F2FS_OTHER_FLMASK      (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
@@ -2940,9 +2935,8 @@ static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
                                        block_t blkaddr, int type)
 {
        if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "invalid blkaddr: %u, type: %d, run fsck to fix.",
-                       blkaddr, type);
+               f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
+                        blkaddr, type);
                f2fs_bug_on(sbi, 1);
        }
 }
@@ -3072,8 +3066,6 @@ int f2fs_quota_sync(struct super_block *sb, int type);
 void f2fs_quota_off_umount(struct super_block *sb);
 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
 int f2fs_sync_fs(struct super_block *sb, int sync);
-extern __printf(3, 4)
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
 
 /*
@@ -3157,9 +3149,12 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
                                        struct cp_control *cpc);
 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
-int f2fs_disable_cp_again(struct f2fs_sb_info *sbi);
+block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
+int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
+void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+                                       unsigned int start, unsigned int end);
 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
@@ -3252,6 +3247,7 @@ void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
                                nid_t ino, enum page_type type);
 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
+int f2fs_merge_page_bio(struct f2fs_io_info *fio);
 void f2fs_submit_page_write(struct f2fs_io_info *fio);
 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
                        block_t blk_addr, struct bio *bio);
@@ -3297,6 +3293,7 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
                        unsigned int segno);
 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
+int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
 
 /*
  * recovery.c
@@ -3777,7 +3774,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
        if (test_opt(sbi, LFS) && (rw == WRITE) &&
                                block_unaligned_IO(inode, iocb, iter))
                return true;
-       if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
+       if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) &&
+                                       !(inode->i_flags & S_SWAPFILE))
                return true;
 
        return false;
@@ -3803,4 +3801,7 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
        return false;
 }
 
+#define EFSBADCRC      EBADMSG         /* Bad CRC detected */
+#define EFSCORRUPTED   EUCLEAN         /* Filesystem is corrupted */
+
 #endif /* _LINUX_F2FS_H */
index 8a1ad6c..c51d0c5 100644 (file)
@@ -711,12 +711,12 @@ int f2fs_getattr(struct vfsmount *mnt,
                stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
        }
 
-       flags = fi->i_flags & F2FS_FL_USER_VISIBLE;
+       flags = fi->i_flags;
        if (flags & F2FS_APPEND_FL)
                stat->attributes |= STATX_ATTR_APPEND;
        if (flags & F2FS_COMPR_FL)
                stat->attributes |= STATX_ATTR_COMPRESSED;
-       if (f2fs_encrypted_inode(inode))
+       if (IS_ENCRYPTED(inode))
                stat->attributes |= STATX_ATTR_ENCRYPTED;
        if (flags & F2FS_IMMUTABLE_FL)
                stat->attributes |= STATX_ATTR_IMMUTABLE;
@@ -724,7 +724,6 @@ int f2fs_getattr(struct vfsmount *mnt,
                stat->attributes |= STATX_ATTR_NODUMP;
 
        stat->attributes_mask |= (STATX_ATTR_APPEND |
-                                 STATX_ATTR_COMPRESSED |
                                  STATX_ATTR_ENCRYPTED |
                                  STATX_ATTR_IMMUTABLE |
                                  STATX_ATTR_NODUMP);
@@ -1033,7 +1032,7 @@ next_dnode:
                        !f2fs_is_valid_blkaddr(sbi, *blkaddr,
                                        DATA_GENERIC_ENHANCE)) {
                        f2fs_put_dnode(&dn);
-                       return -EFAULT;
+                       return -EFSCORRUPTED;
                }
 
                if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
@@ -1221,7 +1220,7 @@ roll_back:
 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+       pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
        pgoff_t start = offset >> PAGE_SHIFT;
        pgoff_t end = (offset + len) >> PAGE_SHIFT;
        int ret;
@@ -1474,7 +1473,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
        pg_start = offset >> PAGE_SHIFT;
        pg_end = (offset + len) >> PAGE_SHIFT;
        delta = pg_end - pg_start;
-       idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+       idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1538,7 +1537,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
        if (off_end)
                map.m_len++;
 
-       err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+       if (f2fs_is_pinned_file(inode))
+               map.m_seg_type = CURSEG_COLD_DATA;
+
+       err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
+                                               F2FS_GET_BLOCK_PRE_DIO :
+                                               F2FS_GET_BLOCK_PRE_AIO));
        if (err) {
                pgoff_t last_off;
 
@@ -1655,69 +1659,152 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
        return 0;
 }
 
+static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+{
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+
+       /* Is it quota file? Do not allow user to mess with it */
+       if (IS_NOQUOTA(inode))
+               return -EPERM;
+
+       fi->i_flags = iflags | (fi->i_flags & ~mask);
+
+       if (fi->i_flags & F2FS_PROJINHERIT_FL)
+               set_inode_flag(inode, FI_PROJ_INHERIT);
+       else
+               clear_inode_flag(inode, FI_PROJ_INHERIT);
+
+       inode->i_ctime = current_time(inode);
+       f2fs_set_inode_flags(inode);
+       f2fs_mark_inode_dirty_sync(inode, true);
+       return 0;
+}
+
+/* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
+
+/*
+ * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
+ * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
+ * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
+ * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
+ */
+
+static const struct {
+       u32 iflag;
+       u32 fsflag;
+} f2fs_fsflags_map[] = {
+       { F2FS_SYNC_FL,         FS_SYNC_FL },
+       { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
+       { F2FS_APPEND_FL,       FS_APPEND_FL },
+       { F2FS_NODUMP_FL,       FS_NODUMP_FL },
+       { F2FS_NOATIME_FL,      FS_NOATIME_FL },
+       { F2FS_INDEX_FL,        FS_INDEX_FL },
+       { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
+       { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
+};
+
+#define F2FS_GETTABLE_FS_FL (          \
+               FS_SYNC_FL |            \
+               FS_IMMUTABLE_FL |       \
+               FS_APPEND_FL |          \
+               FS_NODUMP_FL |          \
+               FS_NOATIME_FL |         \
+               FS_INDEX_FL |           \
+               FS_DIRSYNC_FL |         \
+               FS_PROJINHERIT_FL |     \
+               FS_ENCRYPT_FL |         \
+               FS_INLINE_DATA_FL |     \
+               FS_NOCOW_FL)
+
+#define F2FS_SETTABLE_FS_FL (          \
+               FS_SYNC_FL |            \
+               FS_IMMUTABLE_FL |       \
+               FS_APPEND_FL |          \
+               FS_NODUMP_FL |          \
+               FS_NOATIME_FL |         \
+               FS_DIRSYNC_FL |         \
+               FS_PROJINHERIT_FL)
+
+/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
+static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
+{
+       u32 fsflags = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
+               if (iflags & f2fs_fsflags_map[i].iflag)
+                       fsflags |= f2fs_fsflags_map[i].fsflag;
+
+       return fsflags;
+}
+
+/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
+static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
+{
+       u32 iflags = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
+               if (fsflags & f2fs_fsflags_map[i].fsflag)
+                       iflags |= f2fs_fsflags_map[i].iflag;
+
+       return iflags;
+}
+
 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       unsigned int flags = fi->i_flags;
+       u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
 
-       if (f2fs_encrypted_inode(inode))
-               flags |= F2FS_ENCRYPT_FL;
+       if (IS_ENCRYPTED(inode))
+               fsflags |= FS_ENCRYPT_FL;
        if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
-               flags |= F2FS_INLINE_DATA_FL;
+               fsflags |= FS_INLINE_DATA_FL;
        if (is_inode_flag_set(inode, FI_PIN_FILE))
-               flags |= F2FS_NOCOW_FL;
+               fsflags |= FS_NOCOW_FL;
 
-       flags &= F2FS_FL_USER_VISIBLE;
+       fsflags &= F2FS_GETTABLE_FS_FL;
 
-       return put_user(flags, (int __user *)arg);
+       return put_user(fsflags, (int __user *)arg);
 }
 
 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       unsigned int flags;
-       unsigned int oldflags;
+       u32 fsflags, old_fsflags;
+       u32 iflags;
        int ret;
 
        if (!inode_owner_or_capable(inode))
                return -EACCES;
 
-       if (get_user(flags, (int __user *)arg))
+       if (get_user(fsflags, (int __user *)arg))
                return -EFAULT;
 
+       if (fsflags & ~F2FS_GETTABLE_FS_FL)
+               return -EOPNOTSUPP;
+       fsflags &= F2FS_SETTABLE_FS_FL;
+
+       iflags = f2fs_fsflags_to_iflags(fsflags);
+       if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
+               return -EOPNOTSUPP;
+
        ret = mnt_want_write_file(filp);
        if (ret)
                return ret;
 
        inode_lock(inode);
 
-       /* Is it quota file? Do not allow user to mess with it */
-       if (IS_NOQUOTA(inode)) {
-               ret = -EPERM;
-               goto unlock_out;
-       }
-
-       flags = f2fs_mask_flags(inode->i_mode, flags);
-
-       oldflags = fi->i_flags;
-
-       if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) {
-               if (!capable(CAP_LINUX_IMMUTABLE)) {
-                       ret = -EPERM;
-                       goto unlock_out;
-               }
-       }
-
-       flags = flags & (F2FS_FL_USER_MODIFIABLE);
-       flags |= oldflags & ~(F2FS_FL_USER_MODIFIABLE);
-       fi->i_flags = flags;
+       old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
+       ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+       if (ret)
+               goto out;
 
-       inode->i_ctime = current_time(inode);
-       f2fs_set_inode_flags(inode);
-       f2fs_mark_inode_dirty_sync(inode, false);
-unlock_out:
+       ret = f2fs_setflags_common(inode, iflags,
+                       f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
+out:
        inode_unlock(inode);
        mnt_drop_write_file(filp);
        return ret;
@@ -1764,9 +1851,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
         * f2fs_is_atomic_file.
         */
        if (get_dirty_pages(inode))
-               f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
-               "Unexpected flush for atomic writes: ino=%lu, npages=%u",
-                                       inode->i_ino, get_dirty_pages(inode));
+               f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+                         inode->i_ino, get_dirty_pages(inode));
        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
        if (ret) {
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2201,8 +2287,7 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
                return -EROFS;
 
        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
-               f2fs_msg(sbi->sb, KERN_INFO,
-                       "Skipping Checkpoint. Checkpoints currently disabled.");
+               f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
                return -EINVAL;
        }
 
@@ -2291,7 +2376,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
        if (!fragmented)
                goto out;
 
-       sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
+       sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
 
        /*
         * make sure there are enough free section for LFS allocation, this can
@@ -2587,10 +2672,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
 
        if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
                        __is_large_section(sbi)) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "Can't flush %u in %d for segs_per_sec %u != 1",
-                               range.dev_num, sbi->s_ndevs,
-                               sbi->segs_per_sec);
+               f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
+                         range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
                return -EINVAL;
        }
 
@@ -2649,10 +2732,9 @@ int f2fs_pin_file_control(struct inode *inode, bool inc)
                                fi->i_gc_failures[GC_FAILURE_PIN] + 1);
 
        if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: Enable GC = ino %lx after %x GC trials",
-                       __func__, inode->i_ino,
-                       fi->i_gc_failures[GC_FAILURE_PIN]);
+               f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
+                         __func__, inode->i_ino,
+                         fi->i_gc_failures[GC_FAILURE_PIN]);
                clear_inode_flag(inode, FI_PIN_FILE);
                return -EAGAIN;
        }
@@ -2665,9 +2747,6 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
        __u32 pin;
        int ret = 0;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
        if (get_user(pin, (__u32 __user *)arg))
                return -EFAULT;
 
@@ -2760,6 +2839,27 @@ static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
        return f2fs_precache_extents(file_inode(filp));
 }
 
+static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
+       __u64 block_count;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (f2fs_readonly(sbi->sb))
+               return -EROFS;
+
+       if (copy_from_user(&block_count, (void __user *)arg,
+                          sizeof(block_count)))
+               return -EFAULT;
+
+       ret = f2fs_resize_fs(sbi, block_count);
+
+       return ret;
+}
+
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -2812,6 +2912,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return f2fs_ioc_set_pin_file(filp, arg);
        case F2FS_IOC_PRECACHE_EXTENTS:
                return f2fs_ioc_precache_extents(filp, arg);
+       case F2FS_IOC_RESIZE_FS:
+               return f2fs_ioc_resize_fs(filp, arg);
        default:
                return -ENOTTY;
        }
@@ -2928,6 +3030,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case F2FS_IOC_GET_PIN_FILE:
        case F2FS_IOC_SET_PIN_FILE:
        case F2FS_IOC_PRECACHE_EXTENTS:
+       case F2FS_IOC_RESIZE_FS:
                break;
        default:
                return -ENOIOCTLCMD;
index e610a94..bac6051 100644 (file)
@@ -311,10 +311,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        struct sit_info *sm = SIT_I(sbi);
        struct victim_sel_policy p;
        unsigned int secno, last_victim;
-       unsigned int last_segment = MAIN_SEGS(sbi);
+       unsigned int last_segment;
        unsigned int nsearched = 0;
 
        mutex_lock(&dirty_i->seglist_lock);
+       last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
 
        p.alloc_mode = alloc_mode;
        select_policy(sbi, gc_type, type, &p);
@@ -387,7 +388,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                        goto next;
                /* Don't touch checkpointed data */
                if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
-                                       get_ckpt_valid_blocks(sbi, segno)))
+                                       get_ckpt_valid_blocks(sbi, segno) &&
+                                       p.alloc_mode != SSR))
                        goto next;
                if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
                        goto next;
@@ -404,7 +406,8 @@ next:
                                sm->last_victim[p.gc_mode] = last_victim + 1;
                        else
                                sm->last_victim[p.gc_mode] = segno + 1;
-                       sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
+                       sm->last_victim[p.gc_mode] %=
+                               (MAIN_SECS(sbi) * sbi->segs_per_sec);
                        break;
                }
        }
@@ -615,9 +618,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
        }
 
        if (sum->version != dni->version) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                               "%s: valid data with mismatched node version.",
-                               __func__);
+               f2fs_warn(sbi, "%s: valid data with mismatched node version.",
+                         __func__);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
        }
 
@@ -658,7 +660,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
                dn.data_blkaddr = ei.blk + index - ei.fofs;
                if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
                                                DATA_GENERIC_ENHANCE_READ))) {
-                       err = -EFAULT;
+                       err = -EFSCORRUPTED;
                        goto put_page;
                }
                goto got_it;
@@ -676,7 +678,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
        }
        if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
                                                DATA_GENERIC_ENHANCE))) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto put_page;
        }
 got_it:
@@ -794,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
        if (lfs_mode)
                down_write(&fio.sbi->io_order_lock);
 
+       mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+                                       fio.old_blkaddr, false);
+       if (!mpage)
+               goto up_out;
+
+       fio.encrypted_page = mpage;
+
+       /* read source block in mpage */
+       if (!PageUptodate(mpage)) {
+               err = f2fs_submit_page_bio(&fio);
+               if (err) {
+                       f2fs_put_page(mpage, 1);
+                       goto up_out;
+               }
+               lock_page(mpage);
+               if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
+                                               !PageUptodate(mpage))) {
+                       err = -EIO;
+                       f2fs_put_page(mpage, 1);
+                       goto up_out;
+               }
+       }
+
        f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
                                        &sum, CURSEG_COLD_DATA, NULL, false);
 
@@ -801,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
                                newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
        if (!fio.encrypted_page) {
                err = -ENOMEM;
-               goto recover_block;
-       }
-
-       mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
-                                       fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
-       if (mpage) {
-               bool updated = false;
-
-               if (PageUptodate(mpage)) {
-                       memcpy(page_address(fio.encrypted_page),
-                                       page_address(mpage), PAGE_SIZE);
-                       updated = true;
-               }
                f2fs_put_page(mpage, 1);
-               invalidate_mapping_pages(META_MAPPING(fio.sbi),
-                                       fio.old_blkaddr, fio.old_blkaddr);
-               if (updated)
-                       goto write_page;
-       }
-
-       err = f2fs_submit_page_bio(&fio);
-       if (err)
-               goto put_page_out;
-
-       /* write page */
-       lock_page(fio.encrypted_page);
-
-       if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
-               err = -EIO;
-               goto put_page_out;
-       }
-       if (unlikely(!PageUptodate(fio.encrypted_page))) {
-               err = -EIO;
-               goto put_page_out;
+               goto recover_block;
        }
 
-write_page:
+       /* write target block */
        f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
+       memcpy(page_address(fio.encrypted_page),
+                               page_address(mpage), PAGE_SIZE);
+       f2fs_put_page(mpage, 1);
+       invalidate_mapping_pages(META_MAPPING(fio.sbi),
+                               fio.old_blkaddr, fio.old_blkaddr);
+
        set_page_dirty(fio.encrypted_page);
        if (clear_page_dirty_for_io(fio.encrypted_page))
                dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -869,11 +868,12 @@ write_page:
 put_page_out:
        f2fs_put_page(fio.encrypted_page, 1);
 recover_block:
-       if (lfs_mode)
-               up_write(&fio.sbi->io_order_lock);
        if (err)
                f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
                                                                true, true);
+up_out:
+       if (lfs_mode)
+               up_write(&fio.sbi->io_order_lock);
 put_out:
        f2fs_put_dnode(&dn);
 out:
@@ -1180,9 +1180,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 
                sum = page_address(sum_page);
                if (type != GET_SUM_TYPE((&sum->footer))) {
-                       f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
-                               "type [%d, %d] in SSA and SIT",
-                               segno, type, GET_SUM_TYPE((&sum->footer)));
+                       f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
+                                segno, type, GET_SUM_TYPE((&sum->footer)));
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
                        f2fs_stop_checkpoint(sbi, false);
                        goto skip;
@@ -1360,3 +1359,176 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
                SIT_I(sbi)->last_victim[ALLOC_NEXT] =
                                GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 }
+
+static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
+                                                       unsigned int end)
+{
+       int type;
+       unsigned int segno, next_inuse;
+       int err = 0;
+
+       /* Move out cursegs from the target range */
+       for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
+               allocate_segment_for_resize(sbi, type, start, end);
+
+       /* do GC to move out valid blocks in the range */
+       for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
+               struct gc_inode_list gc_list = {
+                       .ilist = LIST_HEAD_INIT(gc_list.ilist),
+                       .iroot = RADIX_TREE_INIT(GFP_NOFS),
+               };
+
+               mutex_lock(&sbi->gc_mutex);
+               do_garbage_collect(sbi, segno, &gc_list, FG_GC);
+               mutex_unlock(&sbi->gc_mutex);
+               put_gc_inode(&gc_list);
+
+               if (get_valid_blocks(sbi, segno, true))
+                       return -EAGAIN;
+       }
+
+       err = f2fs_sync_fs(sbi->sb, 1);
+       if (err)
+               return err;
+
+       next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
+       if (next_inuse <= end) {
+               f2fs_err(sbi, "segno %u should be free but still inuse!",
+                        next_inuse);
+               f2fs_bug_on(sbi, 1);
+       }
+       return err;
+}
+
+static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
+{
+       struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
+       int section_count = le32_to_cpu(raw_sb->section_count);
+       int segment_count = le32_to_cpu(raw_sb->segment_count);
+       int segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
+       long long block_count = le64_to_cpu(raw_sb->block_count);
+       int segs = secs * sbi->segs_per_sec;
+
+       raw_sb->section_count = cpu_to_le32(section_count + secs);
+       raw_sb->segment_count = cpu_to_le32(segment_count + segs);
+       raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
+       raw_sb->block_count = cpu_to_le64(block_count +
+                                       (long long)segs * sbi->blocks_per_seg);
+}
+
+static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
+{
+       int segs = secs * sbi->segs_per_sec;
+       long long user_block_count =
+                               le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
+
+       SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
+       MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
+       FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
+       FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
+       F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
+                                       (long long)segs * sbi->blocks_per_seg);
+}
+
+int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+{
+       __u64 old_block_count, shrunk_blocks;
+       unsigned int secs;
+       int gc_mode, gc_type;
+       int err = 0;
+       __u32 rem;
+
+       old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
+       if (block_count > old_block_count)
+               return -EINVAL;
+
+       /* new fs size should align to section size */
+       div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
+       if (rem)
+               return -EINVAL;
+
+       if (block_count == old_block_count)
+               return 0;
+
+       if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+               f2fs_err(sbi, "Should run fsck to repair first.");
+               return -EFSCORRUPTED;
+       }
+
+       if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+               f2fs_err(sbi, "Checkpoint should be enabled.");
+               return -EINVAL;
+       }
+
+       freeze_bdev(sbi->sb->s_bdev);
+
+       shrunk_blocks = old_block_count - block_count;
+       secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
+       spin_lock(&sbi->stat_lock);
+       if (shrunk_blocks + valid_user_blocks(sbi) +
+               sbi->current_reserved_blocks + sbi->unusable_block_count +
+               F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
+               err = -ENOSPC;
+       else
+               sbi->user_block_count -= shrunk_blocks;
+       spin_unlock(&sbi->stat_lock);
+       if (err) {
+               thaw_bdev(sbi->sb->s_bdev, sbi->sb);
+               return err;
+       }
+
+       mutex_lock(&sbi->resize_mutex);
+       set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+
+       mutex_lock(&DIRTY_I(sbi)->seglist_lock);
+
+       MAIN_SECS(sbi) -= secs;
+
+       for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
+               if (SIT_I(sbi)->last_victim[gc_mode] >=
+                                       MAIN_SECS(sbi) * sbi->segs_per_sec)
+                       SIT_I(sbi)->last_victim[gc_mode] = 0;
+
+       for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
+               if (sbi->next_victim_seg[gc_type] >=
+                                       MAIN_SECS(sbi) * sbi->segs_per_sec)
+                       sbi->next_victim_seg[gc_type] = NULL_SEGNO;
+
+       mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
+
+       err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
+                       MAIN_SEGS(sbi) - 1);
+       if (err)
+               goto out;
+
+       update_sb_metadata(sbi, -secs);
+
+       err = f2fs_commit_super(sbi, false);
+       if (err) {
+               update_sb_metadata(sbi, secs);
+               goto out;
+       }
+
+       update_fs_metadata(sbi, -secs);
+       clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+       err = f2fs_sync_fs(sbi->sb, 1);
+       if (err) {
+               update_fs_metadata(sbi, secs);
+               update_sb_metadata(sbi, secs);
+               f2fs_commit_super(sbi, false);
+       }
+out:
+       if (err) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
+
+               MAIN_SECS(sbi) += secs;
+               spin_lock(&sbi->stat_lock);
+               sbi->user_block_count += shrunk_blocks;
+               spin_unlock(&sbi->stat_lock);
+       }
+       clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+       mutex_unlock(&sbi->resize_mutex);
+       thaw_bdev(sbi->sb->s_bdev, sbi->sb);
+       return err;
+}
index 7ad88aa..aef822b 100644 (file)
@@ -158,11 +158,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
                f2fs_put_dnode(dn);
                set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
-               f2fs_msg(fio.sbi->sb, KERN_WARNING,
-                       "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
-                       "run fsck to fix.",
-                       __func__, dn->inode->i_ino, dn->data_blkaddr);
-               return -EINVAL;
+               f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
+                         __func__, dn->inode->i_ino, dn->data_blkaddr);
+               return -EFSCORRUPTED;
        }
 
        f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
@@ -401,11 +399,9 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
        if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
                f2fs_put_dnode(&dn);
                set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
-               f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
-                       "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
-                       "run fsck to fix.",
-                       __func__, dir->i_ino, dn.data_blkaddr);
-               err = -EINVAL;
+               f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
+                         __func__, dir->i_ino, dn.data_blkaddr);
+               err = -EFSCORRUPTED;
                goto out;
        }
 
index 63a0fcd..2f02199 100644 (file)
@@ -74,7 +74,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi,
        if (!__is_valid_data_blkaddr(addr))
                return 1;
        if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
-               return -EFAULT;
+               return -EFSCORRUPTED;
        return 0;
 }
 
@@ -176,9 +176,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
        calculated = f2fs_inode_chksum(sbi, page);
 
        if (provided != calculated)
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
-                       page->index, ino_of_node(page), provided, calculated);
+               f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
+                         page->index, ino_of_node(page), provided, calculated);
 
        return provided == calculated;
 }
@@ -202,50 +201,41 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
        iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
        if (!iblocks) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
-                       "run fsck to fix.",
-                       __func__, inode->i_ino, iblocks);
+               f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
+                         __func__, inode->i_ino, iblocks);
                return false;
        }
 
        if (ino_of_node(node_page) != nid_of_node(node_page)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: corrupted inode footer i_ino=%lx, ino,nid: "
-                       "[%u, %u] run fsck to fix.",
-                       __func__, inode->i_ino,
-                       ino_of_node(node_page), nid_of_node(node_page));
+               f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
+                         __func__, inode->i_ino,
+                         ino_of_node(node_page), nid_of_node(node_page));
                return false;
        }
 
        if (f2fs_sb_has_flexible_inline_xattr(sbi)
                        && !f2fs_has_extra_attr(inode)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: corrupted inode ino=%lx, run fsck to fix.",
-                       __func__, inode->i_ino);
+               f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
+                         __func__, inode->i_ino);
                return false;
        }
 
        if (f2fs_has_extra_attr(inode) &&
                        !f2fs_sb_has_extra_attr(sbi)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: inode (ino=%lx) is with extra_attr, "
-                       "but extra_attr feature is off",
-                       __func__, inode->i_ino);
+               f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
+                         __func__, inode->i_ino);
                return false;
        }
 
        if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
                        fi->i_extra_isize % sizeof(__le32)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
-                       "max: %zu",
-                       __func__, inode->i_ino, fi->i_extra_isize,
-                       F2FS_TOTAL_EXTRA_ATTR_SIZE);
+               f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
+                         __func__, inode->i_ino, fi->i_extra_isize,
+                         F2FS_TOTAL_EXTRA_ATTR_SIZE);
                return false;
        }
 
@@ -255,11 +245,9 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
                (!fi->i_inline_xattr_size ||
                fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: inode (ino=%lx) has corrupted "
-                       "i_inline_xattr_size: %d, max: %zu",
-                       __func__, inode->i_ino, fi->i_inline_xattr_size,
-                       MAX_INLINE_XATTR_SIZE);
+               f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
+                         __func__, inode->i_ino, fi->i_inline_xattr_size,
+                         MAX_INLINE_XATTR_SIZE);
                return false;
        }
 
@@ -272,11 +260,9 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
                        !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
                                                DATA_GENERIC_ENHANCE))) {
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "%s: inode (ino=%lx) extent info [%u, %u, %u] "
-                               "is incorrect, run fsck to fix",
-                               __func__, inode->i_ino,
-                               ei->blk, ei->fofs, ei->len);
+                       f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
+                                 __func__, inode->i_ino,
+                                 ei->blk, ei->fofs, ei->len);
                        return false;
                }
        }
@@ -284,19 +270,15 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
        if (f2fs_has_inline_data(inode) &&
                        (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: inode (ino=%lx, mode=%u) should not have "
-                       "inline_data, run fsck to fix",
-                       __func__, inode->i_ino, inode->i_mode);
+               f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
+                         __func__, inode->i_ino, inode->i_mode);
                return false;
        }
 
        if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "%s: inode (ino=%lx, mode=%u) should not have "
-                       "inline_dentry, run fsck to fix",
-                       __func__, inode->i_ino, inode->i_mode);
+               f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
+                         __func__, inode->i_ino, inode->i_mode);
                return false;
        }
 
@@ -343,6 +325,8 @@ static int do_read_inode(struct inode *inode)
                                        le16_to_cpu(ri->i_gc_failures);
        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
        fi->i_flags = le32_to_cpu(ri->i_flags);
+       if (S_ISREG(inode->i_mode))
+               fi->i_flags &= ~F2FS_PROJINHERIT_FL;
        fi->flags = 0;
        fi->i_advise = ri->i_advise;
        fi->i_pino = le32_to_cpu(ri->i_pino);
@@ -374,7 +358,7 @@ static int do_read_inode(struct inode *inode)
 
        if (!sanity_check_inode(inode, node_page)) {
                f2fs_put_page(node_page, 1);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
 
        /* check data exist */
@@ -783,8 +767,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
        err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
        if (err) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "May loss orphan inode, run fsck to fix.");
+               f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
                goto out;
        }
 
@@ -792,8 +775,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
                err = f2fs_acquire_orphan_inode(sbi);
                if (err) {
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "Too many orphan inodes, run fsck to fix.");
+                       f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
                } else {
                        f2fs_add_orphan_inode(inode);
                }
index aa82a5d..94902b8 100644 (file)
@@ -386,9 +386,8 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
        int err = 0;
 
        if (f2fs_readonly(sbi->sb)) {
-               f2fs_msg(sbi->sb, KERN_INFO,
-                       "skip recovering inline_dots inode (ino:%lu, pino:%u) "
-                       "in readonly mountpoint", dir->i_ino, pino);
+               f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
+                         dir->i_ino, pino);
                return 0;
        }
 
@@ -481,9 +480,8 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
        if (f2fs_encrypted_inode(dir) &&
            (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
            !fscrypt_has_permitted_context(dir, inode)) {
-               f2fs_msg(inode->i_sb, KERN_WARNING,
-                        "Inconsistent encryption contexts: %lu/%lu",
-                        dir->i_ino, inode->i_ino);
+               f2fs_warn(F2FS_I_SB(inode), "Inconsistent encryption contexts: %lu/%lu",
+                         dir->i_ino, inode->i_ino);
                err = -EPERM;
                goto out_iput;
        }
index a39aa88..508e684 100644 (file)
@@ -34,10 +34,9 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 {
        if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                               "%s: out-of-range nid=%x, run fsck to fix.",
-                               __func__, nid);
-               return -EINVAL;
+               f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
+                         __func__, nid);
+               return -EFSCORRUPTED;
        }
        return 0;
 }
@@ -1187,10 +1186,8 @@ int f2fs_remove_inode_page(struct inode *inode)
        }
 
        if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
-               f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
-                       "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
-                       inode->i_ino,
-                       (unsigned long long)inode->i_blocks);
+               f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+                         inode->i_ino, (unsigned long long)inode->i_blocks);
                set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
        }
 
@@ -1289,7 +1286,7 @@ static int read_node_page(struct page *page, int op_flags)
        if (PageUptodate(page)) {
                if (!f2fs_inode_chksum_verify(sbi, page)) {
                        ClearPageUptodate(page);
-                       return -EBADMSG;
+                       return -EFSBADCRC;
                }
                return LOCKED_PAGE;
        }
@@ -1375,16 +1372,15 @@ repeat:
        }
 
        if (!f2fs_inode_chksum_verify(sbi, page)) {
-               err = -EBADMSG;
+               err = -EFSBADCRC;
                goto out_err;
        }
 page_hit:
        if(unlikely(nid != nid_of_node(page))) {
-               f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
-                       "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
-                       nid, nid_of_node(page), ino_of_node(page),
-                       ofs_of_node(page), cpver_of_node(page),
-                       next_blkaddr_of_node(page));
+               f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+                         nid, nid_of_node(page), ino_of_node(page),
+                         ofs_of_node(page), cpver_of_node(page),
+                         next_blkaddr_of_node(page));
                err = -EINVAL;
 out_err:
                ClearPageUptodate(page);
@@ -1752,9 +1748,8 @@ continue_unlock:
                        break;
        }
        if (!ret && atomic && !marked) {
-               f2fs_msg(sbi->sb, KERN_DEBUG,
-                       "Retry to write fsync mark: ino=%u, idx=%lx",
-                                       ino, last_page->index);
+               f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
+                          ino, last_page->index);
                lock_page(last_page);
                f2fs_wait_on_page_writeback(last_page, NODE, true, true);
                set_page_dirty(last_page);
@@ -2307,8 +2302,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
                        if (ret) {
                                up_read(&nm_i->nat_tree_lock);
                                f2fs_bug_on(sbi, !mount);
-                               f2fs_msg(sbi->sb, KERN_ERR,
-                                       "NAT is corrupt, run fsck to fix it");
+                               f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
                                return ret;
                        }
                }
@@ -2728,7 +2722,7 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
                i = 1;
        }
        for (; i < NAT_ENTRY_PER_BLOCK; i++) {
-               if (nat_blk->entries[i].block_addr != NULL_ADDR)
+               if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
                        valid++;
        }
        if (valid == 0) {
@@ -2918,7 +2912,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
        nm_i->full_nat_bits = nm_i->nat_bits + 8;
        nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
 
-       f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
+       f2fs_notice(sbi, "Found nat_bits in checkpoint");
        return 0;
 }
 
index 67206ef..5d0c770 100644 (file)
@@ -188,10 +188,9 @@ out:
                name = "<encrypted>";
        else
                name = raw_inode->i_name;
-       f2fs_msg(inode->i_sb, KERN_NOTICE,
-                       "%s: ino = %x, name = %s, dir = %lx, err = %d",
-                       __func__, ino_of_node(ipage), name,
-                       IS_ERR(dir) ? 0 : dir->i_ino, err);
+       f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
+                   __func__, ino_of_node(ipage), name,
+                   IS_ERR(dir) ? 0 : dir->i_ino, err);
        return err;
 }
 
@@ -284,9 +283,8 @@ static int recover_inode(struct inode *inode, struct page *page)
        else
                name = F2FS_INODE(page)->i_name;
 
-       f2fs_msg(inode->i_sb, KERN_NOTICE,
-               "recover_inode: ino = %x, name = %s, inline = %x",
-                       ino_of_node(page), name, raw->i_inline);
+       f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
+                   ino_of_node(page), name, raw->i_inline);
        return 0;
 }
 
@@ -363,10 +361,9 @@ next:
                /* sanity check in order to detect looped node chain */
                if (++loop_cnt >= free_blocks ||
                        blkaddr == next_blkaddr_of_node(page)) {
-                       f2fs_msg(sbi->sb, KERN_NOTICE,
-                               "%s: detect looped node chain, "
-                               "blkaddr:%u, next:%u",
-                               __func__, blkaddr, next_blkaddr_of_node(page));
+                       f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
+                                   __func__, blkaddr,
+                                   next_blkaddr_of_node(page));
                        f2fs_put_page(page, 1);
                        err = -EINVAL;
                        break;
@@ -545,11 +542,10 @@ retry_dn:
        f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 
        if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
-                       inode->i_ino, ofs_of_node(dn.node_page),
-                       ofs_of_node(page));
-               err = -EFAULT;
+               f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
+                         inode->i_ino, ofs_of_node(dn.node_page),
+                         ofs_of_node(page));
+               err = -EFSCORRUPTED;
                goto err;
        }
 
@@ -561,13 +557,13 @@ retry_dn:
 
                if (__is_valid_data_blkaddr(src) &&
                        !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
-                       err = -EFAULT;
+                       err = -EFSCORRUPTED;
                        goto err;
                }
 
                if (__is_valid_data_blkaddr(dest) &&
                        !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
-                       err = -EFAULT;
+                       err = -EFSCORRUPTED;
                        goto err;
                }
 
@@ -634,11 +630,9 @@ retry_prev:
 err:
        f2fs_put_dnode(&dn);
 out:
-       f2fs_msg(sbi->sb, KERN_NOTICE,
-               "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
-               inode->i_ino,
-               file_keep_isize(inode) ? "keep" : "recover",
-               recovered, err);
+       f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+                   inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
+                   recovered, err);
        return err;
 }
 
@@ -726,8 +720,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 #endif
 
        if (s_flags & MS_RDONLY) {
-               f2fs_msg(sbi->sb, KERN_INFO,
-                               "recover fsync data on readonly fs");
+               f2fs_info(sbi, "recover fsync data on readonly fs");
                sbi->sb->s_flags &= ~MS_RDONLY;
        }
 
index af4770f..553a16e 100644 (file)
@@ -546,9 +546,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
                if (test_opt(sbi, DATA_FLUSH)) {
                        struct blk_plug plug;
 
+                       mutex_lock(&sbi->flush_lock);
+
                        blk_start_plug(&plug);
                        f2fs_sync_dirty_inodes(sbi, FILE_INODE);
                        blk_finish_plug(&plug);
+
+                       mutex_unlock(&sbi->flush_lock);
                }
                f2fs_sync_fs(sbi->sb, true);
                stat_inc_bg_cp_count(sbi->stat_info);
@@ -869,11 +873,14 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
        mutex_unlock(&dirty_i->seglist_lock);
 }
 
-int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
+block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
 {
+       int ovp_hole_segs =
+               (overprovision_segments(sbi) - reserved_segments(sbi));
+       block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
        block_t holes[2] = {0, 0};      /* DATA and NODE */
+       block_t unusable;
        struct seg_entry *se;
        unsigned int segno;
 
@@ -887,10 +894,20 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
        }
        mutex_unlock(&dirty_i->seglist_lock);
 
-       if (holes[DATA] > ovp || holes[NODE] > ovp)
+       unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
+       if (unusable > ovp_holes)
+               return unusable - ovp_holes;
+       return 0;
+}
+
+int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
+{
+       int ovp_hole_segs =
+               (overprovision_segments(sbi) - reserved_segments(sbi));
+       if (unusable > F2FS_OPTION(sbi).unusable_cap)
                return -EAGAIN;
        if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
-               dirty_segments(sbi) > overprovision_segments(sbi))
+               dirty_segments(sbi) > ovp_hole_segs)
                return -EAGAIN;
        return 0;
 }
@@ -1556,6 +1573,10 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
                list_for_each_entry_safe(dc, tmp, pend_list, list) {
                        f2fs_bug_on(sbi, dc->state != D_PREP);
 
+                       if (dpolicy->timeout != 0 &&
+                               f2fs_time_over(sbi, dpolicy->timeout))
+                               break;
+
                        if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
                                                !is_idle(sbi, DISCARD_TIME)) {
                                io_interrupted = true;
@@ -1816,8 +1837,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
                devi = f2fs_target_device_index(sbi, blkstart);
                if (blkstart < FDEV(devi).start_blk ||
                    blkstart > FDEV(devi).end_blk) {
-                       f2fs_msg(sbi->sb, KERN_ERR, "Invalid block %x",
-                                blkstart);
+                       f2fs_err(sbi, "Invalid block %x", blkstart);
                        return -EIO;
                }
                blkstart -= FDEV(devi).start_blk;
@@ -1830,10 +1850,9 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
 
                if (sector & (bdev_zone_sectors(bdev) - 1) ||
                                nr_sects != bdev_zone_sectors(bdev)) {
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                               "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
-                               devi, sbi->s_ndevs ? FDEV(devi).path: "",
-                               blkstart, blklen);
+                       f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
+                                devi, sbi->s_ndevs ? FDEV(devi).path : "",
+                                blkstart, blklen);
                        return -EIO;
                }
                trace_f2fs_issue_reset_zone(bdev, blkstart);
@@ -2197,15 +2216,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
                mir_exist = f2fs_test_and_set_bit(offset,
                                                se->cur_valid_map_mir);
                if (unlikely(exist != mir_exist)) {
-                       f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
-                               "when setting bitmap, blk:%u, old bit:%d",
-                               blkaddr, exist);
+                       f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
+                                blkaddr, exist);
                        f2fs_bug_on(sbi, 1);
                }
 #endif
                if (unlikely(exist)) {
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                               "Bitmap was wrongly set, blk:%u", blkaddr);
+                       f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
+                                blkaddr);
                        f2fs_bug_on(sbi, 1);
                        se->valid_blocks--;
                        del = 0;
@@ -2226,15 +2244,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
                mir_exist = f2fs_test_and_clear_bit(offset,
                                                se->cur_valid_map_mir);
                if (unlikely(exist != mir_exist)) {
-                       f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
-                               "when clearing bitmap, blk:%u, old bit:%d",
-                               blkaddr, exist);
+                       f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
+                                blkaddr, exist);
                        f2fs_bug_on(sbi, 1);
                }
 #endif
                if (unlikely(!exist)) {
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                               "Bitmap was wrongly cleared, blk:%u", blkaddr);
+                       f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
+                                blkaddr);
                        f2fs_bug_on(sbi, 1);
                        se->valid_blocks++;
                        del = 0;
@@ -2716,6 +2733,39 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
        stat_inc_seg_type(sbi, curseg);
 }
 
+void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+                                       unsigned int start, unsigned int end)
+{
+       struct curseg_info *curseg = CURSEG_I(sbi, type);
+       unsigned int segno;
+
+       down_read(&SM_I(sbi)->curseg_lock);
+       mutex_lock(&curseg->curseg_mutex);
+       down_write(&SIT_I(sbi)->sentry_lock);
+
+       segno = CURSEG_I(sbi, type)->segno;
+       if (segno < start || segno > end)
+               goto unlock;
+
+       if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
+               change_curseg(sbi, type);
+       else
+               new_curseg(sbi, type, true);
+
+       stat_inc_seg_type(sbi, curseg);
+
+       locate_dirty_segment(sbi, segno);
+unlock:
+       up_write(&SIT_I(sbi)->sentry_lock);
+
+       if (segno != curseg->segno)
+               f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
+                           type, segno, curseg->segno);
+
+       mutex_unlock(&curseg->curseg_mutex);
+       up_read(&SM_I(sbi)->curseg_lock);
+}
+
 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
 {
        struct curseg_info *curseg;
@@ -2848,9 +2898,8 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
                goto out;
 
        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "Found FS corruption, run fsck to fix.");
-               return -EIO;
+               f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
+               return -EFSCORRUPTED;
        }
 
        /* start/end segment number in main_area */
@@ -3274,12 +3323,17 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 
        if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               return -EFAULT;
+               f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
+                         __func__, segno);
+               return -EFSCORRUPTED;
        }
 
        stat_inc_inplace_blocks(fio->sbi);
 
-       err = f2fs_submit_page_bio(fio);
+       if (fio->bio)
+               err = f2fs_merge_page_bio(fio);
+       else
+               err = f2fs_submit_page_bio(fio);
        if (!err) {
                update_device_state(fio);
                f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
@@ -3470,6 +3524,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
                seg_i = CURSEG_I(sbi, i);
                segno = le32_to_cpu(ckpt->cur_data_segno[i]);
                blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
+               if (blk_off > ENTRIES_IN_SUM) {
+                       f2fs_bug_on(sbi, 1);
+                       f2fs_put_page(page, 1);
+                       return -EFAULT;
+               }
                seg_i->next_segno = segno;
                reset_curseg(sbi, i, 0);
                seg_i->alloc_type = ckpt->alloc_type[i];
@@ -3607,8 +3666,11 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
 
        /* sanity check for summary blocks */
        if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
-                       sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
+                       sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
+               f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
+                        nats_in_cursum(nat_j), sits_in_cursum(sit_j));
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -3839,7 +3901,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        struct f2fs_journal *journal = curseg->journal;
        struct sit_entry_set *ses, *tmp;
        struct list_head *head = &SM_I(sbi)->sit_entry_set;
-       bool to_journal = true;
+       bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
        struct seg_entry *se;
 
        down_write(&sit_i->sentry_lock);
@@ -3858,7 +3920,8 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
         * entries, remove all entries from journal and add and account
         * them in sit entry set.
         */
-       if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
+       if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
+                                                               !to_journal)
                remove_sits_in_journal(sbi);
 
        /*
@@ -4173,11 +4236,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
 
                start = le32_to_cpu(segno_in_journal(journal, i));
                if (start >= MAIN_SEGS(sbi)) {
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Wrong journal entry on segno %u",
-                                       start);
+                       f2fs_err(sbi, "Wrong journal entry on segno %u",
+                                start);
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
-                       err = -EINVAL;
+                       err = -EFSCORRUPTED;
                        break;
                }
 
@@ -4214,11 +4276,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
        up_read(&curseg->journal_rwsem);
 
        if (!err && total_node_blocks != valid_node_count(sbi)) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "SIT is corrupted node# %u vs %u",
-                       total_node_blocks, valid_node_count(sbi));
+               f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
+                        total_node_blocks, valid_node_count(sbi));
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               err = -EINVAL;
+               err = -EFSCORRUPTED;
        }
 
        return err;
@@ -4309,6 +4370,39 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
        return init_victim_secmap(sbi);
 }
 
+static int sanity_check_curseg(struct f2fs_sb_info *sbi)
+{
+       int i;
+
+       /*
+        * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
+        * In LFS curseg, all blkaddr after .next_blkoff should be unused.
+        */
+       for (i = 0; i < NO_CHECK_TYPE; i++) {
+               struct curseg_info *curseg = CURSEG_I(sbi, i);
+               struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
+               unsigned int blkofs = curseg->next_blkoff;
+
+               if (f2fs_test_bit(blkofs, se->cur_valid_map))
+                       goto out;
+
+               if (curseg->alloc_type == SSR)
+                       continue;
+
+               for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
+                       if (!f2fs_test_bit(blkofs, se->cur_valid_map))
+                               continue;
+out:
+                       f2fs_err(sbi,
+                                "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
+                                i, curseg->segno, curseg->alloc_type,
+                                curseg->next_blkoff, blkofs);
+                       return -EFSCORRUPTED;
+               }
+       }
+       return 0;
+}
+
 /*
  * Update min, max modified time for cost-benefit GC algorithm
  */
@@ -4404,6 +4498,10 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
        if (err)
                return err;
 
+       err = sanity_check_curseg(sbi);
+       if (err)
+               return err;
+
        init_min_max_mtime(sbi);
        return 0;
 }
index 429007b..b746028 100644 (file)
 #define        START_SEGNO(segno)              \
        (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
 #define SIT_BLK_CNT(sbi)                       \
-       ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
+       DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
 #define f2fs_bitmap_size(nr)                   \
        (BITS_TO_LONGS(nr) * sizeof(unsigned long))
 
@@ -693,21 +693,19 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
        } while (cur_pos < sbi->blocks_per_seg);
 
        if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                               "Mismatch valid blocks %d vs. %d",
-                                       GET_SIT_VBLOCKS(raw_sit), valid_blocks);
+               f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
+                        GET_SIT_VBLOCKS(raw_sit), valid_blocks);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
 
        /* check segment usage, and check boundary of a given segment number */
        if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
                                        || segno > TOTAL_SEGS(sbi) - 1)) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                               "Wrong valid blocks %d or segno %u",
-                                       GET_SIT_VBLOCKS(raw_sit), segno);
+               f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
+                        GET_SIT_VBLOCKS(raw_sit), segno);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
        return 0;
 }
index ff908f1..cda6651 100644 (file)
@@ -136,7 +136,10 @@ enum {
        Opt_alloc,
        Opt_fsync,
        Opt_test_dummy_encryption,
-       Opt_checkpoint,
+       Opt_checkpoint_disable,
+       Opt_checkpoint_disable_cap,
+       Opt_checkpoint_disable_cap_perc,
+       Opt_checkpoint_enable,
        Opt_err,
 };
 
@@ -195,45 +198,52 @@ static match_table_t f2fs_tokens = {
        {Opt_alloc, "alloc_mode=%s"},
        {Opt_fsync, "fsync_mode=%s"},
        {Opt_test_dummy_encryption, "test_dummy_encryption"},
-       {Opt_checkpoint, "checkpoint=%s"},
+       {Opt_checkpoint_disable, "checkpoint=disable"},
+       {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
+       {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
+       {Opt_checkpoint_enable, "checkpoint=enable"},
        {Opt_err, NULL},
 };
 
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
+       int level;
 
        va_start(args, fmt);
-       vaf.fmt = fmt;
+
+       level = printk_get_level(fmt);
+       vaf.fmt = printk_skip_level(fmt);
        vaf.va = &args;
-       printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+       printk("%c%cF2FS-fs (%s): %pV\n",
+              KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+
        va_end(args);
 }
 
 static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
 {
-       block_t limit = (sbi->user_block_count << 1) / 1000;
+       block_t limit = min((sbi->user_block_count << 1) / 1000,
+                       sbi->user_block_count - sbi->reserved_blocks);
 
        /* limit is 0.2% */
        if (test_opt(sbi, RESERVE_ROOT) &&
                        F2FS_OPTION(sbi).root_reserved_blocks > limit) {
                F2FS_OPTION(sbi).root_reserved_blocks = limit;
-               f2fs_msg(sbi->sb, KERN_INFO,
-                       "Reduce reserved blocks for root = %u",
-                       F2FS_OPTION(sbi).root_reserved_blocks);
+               f2fs_info(sbi, "Reduce reserved blocks for root = %u",
+                         F2FS_OPTION(sbi).root_reserved_blocks);
        }
        if (!test_opt(sbi, RESERVE_ROOT) &&
                (!uid_eq(F2FS_OPTION(sbi).s_resuid,
                                make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
                !gid_eq(F2FS_OPTION(sbi).s_resgid,
                                make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
-               f2fs_msg(sbi->sb, KERN_INFO,
-                       "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
-                               from_kuid_munged(&init_user_ns,
-                                       F2FS_OPTION(sbi).s_resuid),
-                               from_kgid_munged(&init_user_ns,
-                                       F2FS_OPTION(sbi).s_resgid));
+               f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
+                         from_kuid_munged(&init_user_ns,
+                                          F2FS_OPTION(sbi).s_resuid),
+                         from_kgid_munged(&init_user_ns,
+                                          F2FS_OPTION(sbi).s_resgid));
 }
 
 static void init_once(void *foo)
@@ -254,35 +264,29 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
        int ret = -EINVAL;
 
        if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
-               f2fs_msg(sb, KERN_ERR,
-                       "Cannot change journaled "
-                       "quota options when quota turned on");
+               f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
                return -EINVAL;
        }
        if (f2fs_sb_has_quota_ino(sbi)) {
-               f2fs_msg(sb, KERN_INFO,
-                       "QUOTA feature is enabled, so ignore qf_name");
+               f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
                return 0;
        }
 
        qname = match_strdup(args);
        if (!qname) {
-               f2fs_msg(sb, KERN_ERR,
-                       "Not enough memory for storing quotafile name");
+               f2fs_err(sbi, "Not enough memory for storing quotafile name");
                return -ENOMEM;
        }
        if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
                if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
                        ret = 0;
                else
-                       f2fs_msg(sb, KERN_ERR,
-                                "%s quota file already specified",
+                       f2fs_err(sbi, "%s quota file already specified",
                                 QTYPE2NAME(qtype));
                goto errout;
        }
        if (strchr(qname, '/')) {
-               f2fs_msg(sb, KERN_ERR,
-                       "quotafile must be on filesystem root");
+               f2fs_err(sbi, "quotafile must be on filesystem root");
                goto errout;
        }
        F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
@@ -298,8 +302,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
        if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
-               f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
-                       " when quota turned on");
+               f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
                return -EINVAL;
        }
        kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
@@ -315,8 +318,7 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
         * to support legacy quotas in quota files.
         */
        if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
-               f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
-                        "Cannot enable project quota enforcement.");
+               f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
                return -1;
        }
        if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
@@ -336,21 +338,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
 
                if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
                                test_opt(sbi, PRJQUOTA)) {
-                       f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
-                                       "format mixing");
+                       f2fs_err(sbi, "old and new quota format mixing");
                        return -1;
                }
 
                if (!F2FS_OPTION(sbi).s_jquota_fmt) {
-                       f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
-                                       "not specified");
+                       f2fs_err(sbi, "journaled quota format not specified");
                        return -1;
                }
        }
 
        if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
-               f2fs_msg(sbi->sb, KERN_INFO,
-                       "QUOTA feature is enabled, so ignore jquota_fmt");
+               f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
                F2FS_OPTION(sbi).s_jquota_fmt = 0;
        }
        return 0;
@@ -418,8 +417,7 @@ static int parse_options(struct super_block *sb, char *options)
                        break;
                case Opt_nodiscard:
                        if (f2fs_sb_has_blkzoned(sbi)) {
-                               f2fs_msg(sb, KERN_WARNING,
-                                       "discard is required for zoned block devices");
+                               f2fs_warn(sbi, "discard is required for zoned block devices");
                                return -EINVAL;
                        }
                        clear_opt(sbi, DISCARD);
@@ -451,20 +449,16 @@ static int parse_options(struct super_block *sb, char *options)
                        break;
 #else
                case Opt_user_xattr:
-                       f2fs_msg(sb, KERN_INFO,
-                               "user_xattr options not supported");
+                       f2fs_info(sbi, "user_xattr options not supported");
                        break;
                case Opt_nouser_xattr:
-                       f2fs_msg(sb, KERN_INFO,
-                               "nouser_xattr options not supported");
+                       f2fs_info(sbi, "nouser_xattr options not supported");
                        break;
                case Opt_inline_xattr:
-                       f2fs_msg(sb, KERN_INFO,
-                               "inline_xattr options not supported");
+                       f2fs_info(sbi, "inline_xattr options not supported");
                        break;
                case Opt_noinline_xattr:
-                       f2fs_msg(sb, KERN_INFO,
-                               "noinline_xattr options not supported");
+                       f2fs_info(sbi, "noinline_xattr options not supported");
                        break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -476,10 +470,10 @@ static int parse_options(struct super_block *sb, char *options)
                        break;
 #else
                case Opt_acl:
-                       f2fs_msg(sb, KERN_INFO, "acl options not supported");
+                       f2fs_info(sbi, "acl options not supported");
                        break;
                case Opt_noacl:
-                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
+                       f2fs_info(sbi, "noacl options not supported");
                        break;
 #endif
                case Opt_active_logs:
@@ -529,9 +523,8 @@ static int parse_options(struct super_block *sb, char *options)
                        if (args->from && match_int(args, &arg))
                                return -EINVAL;
                        if (test_opt(sbi, RESERVE_ROOT)) {
-                               f2fs_msg(sb, KERN_INFO,
-                                       "Preserve previous reserve_root=%u",
-                                       F2FS_OPTION(sbi).root_reserved_blocks);
+                               f2fs_info(sbi, "Preserve previous reserve_root=%u",
+                                         F2FS_OPTION(sbi).root_reserved_blocks);
                        } else {
                                F2FS_OPTION(sbi).root_reserved_blocks = arg;
                                set_opt(sbi, RESERVE_ROOT);
@@ -542,8 +535,7 @@ static int parse_options(struct super_block *sb, char *options)
                                return -EINVAL;
                        uid = make_kuid(current_user_ns(), arg);
                        if (!uid_valid(uid)) {
-                               f2fs_msg(sb, KERN_ERR,
-                                       "Invalid uid value %d", arg);
+                               f2fs_err(sbi, "Invalid uid value %d", arg);
                                return -EINVAL;
                        }
                        F2FS_OPTION(sbi).s_resuid = uid;
@@ -553,8 +545,7 @@ static int parse_options(struct super_block *sb, char *options)
                                return -EINVAL;
                        gid = make_kgid(current_user_ns(), arg);
                        if (!gid_valid(gid)) {
-                               f2fs_msg(sb, KERN_ERR,
-                                       "Invalid gid value %d", arg);
+                               f2fs_err(sbi, "Invalid gid value %d", arg);
                                return -EINVAL;
                        }
                        F2FS_OPTION(sbi).s_resgid = gid;
@@ -567,9 +558,7 @@ static int parse_options(struct super_block *sb, char *options)
                        if (strlen(name) == 8 &&
                                        !strncmp(name, "adaptive", 8)) {
                                if (f2fs_sb_has_blkzoned(sbi)) {
-                                       f2fs_msg(sb, KERN_WARNING,
-                                                "adaptive mode is not allowed with "
-                                                "zoned block device feature");
+                                       f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
                                        kvfree(name);
                                        return -EINVAL;
                                }
@@ -587,9 +576,8 @@ static int parse_options(struct super_block *sb, char *options)
                        if (args->from && match_int(args, &arg))
                                return -EINVAL;
                        if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
-                               f2fs_msg(sb, KERN_WARNING,
-                                       "Not support %d, larger than %d",
-                                       1 << arg, BIO_MAX_PAGES);
+                               f2fs_warn(sbi, "Not support %d, larger than %d",
+                                         1 << arg, BIO_MAX_PAGES);
                                return -EINVAL;
                        }
                        F2FS_OPTION(sbi).write_io_size_bits = arg;
@@ -610,13 +598,11 @@ static int parse_options(struct super_block *sb, char *options)
                        break;
 #else
                case Opt_fault_injection:
-                       f2fs_msg(sb, KERN_INFO,
-                               "fault_injection options not supported");
+                       f2fs_info(sbi, "fault_injection options not supported");
                        break;
 
                case Opt_fault_type:
-                       f2fs_msg(sb, KERN_INFO,
-                               "fault_type options not supported");
+                       f2fs_info(sbi, "fault_type options not supported");
                        break;
 #endif
                case Opt_lazytime:
@@ -696,8 +682,7 @@ static int parse_options(struct super_block *sb, char *options)
                case Opt_jqfmt_vfsv0:
                case Opt_jqfmt_vfsv1:
                case Opt_noquota:
-                       f2fs_msg(sb, KERN_INFO,
-                                       "quota operations not supported");
+                       f2fs_info(sbi, "quota operations not supported");
                        break;
 #endif
                case Opt_whint:
@@ -759,39 +744,44 @@ static int parse_options(struct super_block *sb, char *options)
                case Opt_test_dummy_encryption:
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
                        if (!f2fs_sb_has_encrypt(sbi)) {
-                               f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
+                               f2fs_err(sbi, "Encrypt feature is off");
                                return -EINVAL;
                        }
 
                        F2FS_OPTION(sbi).test_dummy_encryption = true;
-                       f2fs_msg(sb, KERN_INFO,
-                                       "Test dummy encryption mode enabled");
+                       f2fs_info(sbi, "Test dummy encryption mode enabled");
 #else
-                       f2fs_msg(sb, KERN_INFO,
-                                       "Test dummy encryption mount option ignored");
+                       f2fs_info(sbi, "Test dummy encryption mount option ignored");
 #endif
                        break;
-               case Opt_checkpoint:
-                       name = match_strdup(&args[0]);
-                       if (!name)
-                               return -ENOMEM;
-
-                       if (strlen(name) == 6 &&
-                                       !strncmp(name, "enable", 6)) {
-                               clear_opt(sbi, DISABLE_CHECKPOINT);
-                       } else if (strlen(name) == 7 &&
-                                       !strncmp(name, "disable", 7)) {
-                               set_opt(sbi, DISABLE_CHECKPOINT);
-                       } else {
-                               kvfree(name);
+               case Opt_checkpoint_disable_cap_perc:
+                       if (args->from && match_int(args, &arg))
                                return -EINVAL;
-                       }
-                       kvfree(name);
+                       if (arg < 0 || arg > 100)
+                               return -EINVAL;
+                       if (arg == 100)
+                               F2FS_OPTION(sbi).unusable_cap =
+                                       sbi->user_block_count;
+                       else
+                               F2FS_OPTION(sbi).unusable_cap =
+                                       (sbi->user_block_count / 100) * arg;
+                       set_opt(sbi, DISABLE_CHECKPOINT);
+                       break;
+               case Opt_checkpoint_disable_cap:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+                       F2FS_OPTION(sbi).unusable_cap = arg;
+                       set_opt(sbi, DISABLE_CHECKPOINT);
+                       break;
+               case Opt_checkpoint_disable:
+                       set_opt(sbi, DISABLE_CHECKPOINT);
+                       break;
+               case Opt_checkpoint_enable:
+                       clear_opt(sbi, DISABLE_CHECKPOINT);
                        break;
                default:
-                       f2fs_msg(sb, KERN_ERR,
-                               "Unrecognized mount option \"%s\" or missing value",
-                               p);
+                       f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
+                                p);
                        return -EINVAL;
                }
        }
@@ -800,23 +790,18 @@ static int parse_options(struct super_block *sb, char *options)
                return -EINVAL;
 #else
        if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
-               f2fs_msg(sbi->sb, KERN_INFO,
-                        "Filesystem with quota feature cannot be mounted RDWR "
-                        "without CONFIG_QUOTA");
+               f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
                return -EINVAL;
        }
        if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
-               f2fs_msg(sb, KERN_ERR,
-                       "Filesystem with project quota feature cannot be "
-                       "mounted RDWR without CONFIG_QUOTA");
+               f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
                return -EINVAL;
        }
 #endif
 
        if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
-               f2fs_msg(sb, KERN_ERR,
-                               "Should set mode=lfs with %uKB-sized IO",
-                               F2FS_IO_SIZE_KB(sbi));
+               f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
+                        F2FS_IO_SIZE_KB(sbi));
                return -EINVAL;
        }
 
@@ -825,15 +810,11 @@ static int parse_options(struct super_block *sb, char *options)
 
                if (!f2fs_sb_has_extra_attr(sbi) ||
                        !f2fs_sb_has_flexible_inline_xattr(sbi)) {
-                       f2fs_msg(sb, KERN_ERR,
-                                       "extra_attr or flexible_inline_xattr "
-                                       "feature is off");
+                       f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
                        return -EINVAL;
                }
                if (!test_opt(sbi, INLINE_XATTR)) {
-                       f2fs_msg(sb, KERN_ERR,
-                                       "inline_xattr_size option should be "
-                                       "set with inline_xattr option");
+                       f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
                        return -EINVAL;
                }
 
@@ -842,16 +823,14 @@ static int parse_options(struct super_block *sb, char *options)
 
                if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
                                F2FS_OPTION(sbi).inline_xattr_size > max_size) {
-                       f2fs_msg(sb, KERN_ERR,
-                               "inline xattr size is out of range: %d ~ %d",
-                               min_size, max_size);
+                       f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
+                                min_size, max_size);
                        return -EINVAL;
                }
        }
 
        if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
-               f2fs_msg(sb, KERN_ERR,
-                               "LFS not compatible with checkpoint=disable\n");
+               f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
                return -EINVAL;
        }
 
@@ -1319,6 +1298,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_puts(seq, ",disable_roll_forward");
        if (test_opt(sbi, DISCARD))
                seq_puts(seq, ",discard");
+       else
+               seq_puts(seq, ",nodiscard");
        if (test_opt(sbi, NOHEAP))
                seq_puts(seq, ",no_heap");
        else
@@ -1415,8 +1396,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",alloc_mode=%s", "reuse");
 
        if (test_opt(sbi, DISABLE_CHECKPOINT))
-               seq_puts(seq, ",checkpoint=disable");
-
+               seq_printf(seq, ",checkpoint=disable:%u",
+                               F2FS_OPTION(sbi).unusable_cap);
        if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
                seq_printf(seq, ",fsync_mode=%s", "posix");
        else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
@@ -1446,6 +1427,7 @@ static void default_options(struct f2fs_sb_info *sbi)
        set_opt(sbi, NOHEAP);
        sbi->sb->s_flags |= MS_LAZYTIME;
        clear_opt(sbi, DISABLE_CHECKPOINT);
+       F2FS_OPTION(sbi).unusable_cap = 0;
        set_opt(sbi, FLUSH_MERGE);
        set_opt(sbi, DISCARD);
        if (f2fs_sb_has_blkzoned(sbi))
@@ -1473,10 +1455,10 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
        struct cp_control cpc;
        int err = 0;
        int ret;
+       block_t unusable;
 
        if (s_flags & MS_RDONLY) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                               "checkpoint=disable on readonly fs");
+               f2fs_err(sbi, "checkpoint=disable on readonly fs");
                return -EINVAL;
        }
        sbi->sb->s_flags |= MS_ACTIVE;
@@ -1500,7 +1482,8 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
                goto restore_flag;
        }
 
-       if (f2fs_disable_cp_again(sbi)) {
+       unusable = f2fs_get_unusable_blocks(sbi);
+       if (f2fs_disable_cp_again(sbi, unusable)) {
                err = -EAGAIN;
                goto restore_flag;
        }
@@ -1513,7 +1496,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
                goto out_unlock;
 
        spin_lock(&sbi->stat_lock);
-       sbi->unusable_block_count = 0;
+       sbi->unusable_block_count = unusable;
        spin_unlock(&sbi->stat_lock);
 
 out_unlock:
@@ -1578,8 +1561,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        /* recover superblocks we couldn't write due to previous RO mount */
        if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
                err = f2fs_commit_super(sbi, false);
-               f2fs_msg(sb, KERN_INFO,
-                       "Try to recover all the superblocks, ret: %d", err);
+               f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
+                         err);
                if (!err)
                        clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
        }
@@ -1620,15 +1603,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        /* disallow enable/disable extent_cache dynamically */
        if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
                err = -EINVAL;
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                               "switch extent_cache option is not allowed");
+               f2fs_warn(sbi, "switch extent_cache option is not allowed");
                goto restore_opts;
        }
 
        if ((*flags & MS_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
                err = -EINVAL;
-               f2fs_msg(sbi->sb, KERN_WARNING,
-                       "disabling checkpoint not compatible with read-only");
+               f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
                goto restore_opts;
        }
 
@@ -1698,8 +1679,7 @@ skip:
 restore_gc:
        if (need_restart_gc) {
                if (f2fs_start_gc_thread(sbi))
-                       f2fs_msg(sbi->sb, KERN_WARNING,
-                               "background gc thread has stopped");
+                       f2fs_warn(sbi, "background gc thread has stopped");
        } else if (need_stop_gc) {
                f2fs_stop_gc_thread(sbi);
        }
@@ -1838,8 +1818,7 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
 {
        if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "quota sysfile may be corrupted, skip loading it");
+               f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
                return 0;
        }
 
@@ -1855,8 +1834,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
        if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
                err = f2fs_enable_quotas(sbi->sb);
                if (err) {
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Cannot turn on quota_ino: %d", err);
+                       f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
                        return 0;
                }
                return 1;
@@ -1869,8 +1847,8 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
                                enabled = 1;
                                continue;
                        }
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                               "Cannot turn on quotas: %d on %d", err, i);
+                       f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
+                                err, i);
                }
        }
        return enabled;
@@ -1891,8 +1869,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
 
        qf_inode = f2fs_iget(sb, qf_inum);
        if (IS_ERR(qf_inode)) {
-               f2fs_msg(sb, KERN_ERR,
-                       "Bad quota inode %u:%lu", type, qf_inum);
+               f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
                return PTR_ERR(qf_inode);
        }
 
@@ -1905,17 +1882,17 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
 
 static int f2fs_enable_quotas(struct super_block *sb)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
        int type, err = 0;
        unsigned long qf_inum;
        bool quota_mopt[MAXQUOTAS] = {
-               test_opt(F2FS_SB(sb), USRQUOTA),
-               test_opt(F2FS_SB(sb), GRPQUOTA),
-               test_opt(F2FS_SB(sb), PRJQUOTA),
+               test_opt(sbi, USRQUOTA),
+               test_opt(sbi, GRPQUOTA),
+               test_opt(sbi, PRJQUOTA),
        };
 
        if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
-               f2fs_msg(sb, KERN_ERR,
-                       "quota file may be corrupted, skip loading it");
+               f2fs_err(sbi, "quota file may be corrupted, skip loading it");
                return 0;
        }
 
@@ -1928,10 +1905,8 @@ static int f2fs_enable_quotas(struct super_block *sb)
                                DQUOT_USAGE_ENABLED |
                                (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
                        if (err) {
-                               f2fs_msg(sb, KERN_ERR,
-                                       "Failed to enable quota tracking "
-                                       "(type=%d, err=%d). Please run "
-                                       "fsck to fix.", type, err);
+                               f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
+                                        type, err);
                                for (type--; type >= 0; type--)
                                        dquot_quota_off(sb, type);
                                set_sbi_flag(F2FS_SB(sb),
@@ -1950,6 +1925,18 @@ int f2fs_quota_sync(struct super_block *sb, int type)
        int cnt;
        int ret;
 
+       /*
+        * do_quotactl
+        *  f2fs_quota_sync
+        *  down_read(quota_sem)
+        *  dquot_writeback_dquots()
+        *  f2fs_dquot_commit
+        *                            block_operation
+        *                            down_read(quota_sem)
+        */
+       f2fs_lock_op(sbi);
+
+       down_read(&sbi->quota_sem);
        ret = dquot_writeback_dquots(sb, type);
        if (ret)
                goto out;
@@ -1987,6 +1974,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
 out:
        if (ret)
                set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+       up_read(&sbi->quota_sem);
+       f2fs_unlock_op(sbi);
        return ret;
 }
 
@@ -2051,10 +2040,8 @@ void f2fs_quota_off_umount(struct super_block *sb)
                if (err) {
                        int ret = dquot_quota_off(sb, type);
 
-                       f2fs_msg(sb, KERN_ERR,
-                               "Fail to turn off disk quota "
-                               "(type: %d, err: %d, ret:%d), Please "
-                               "run fsck to fix it.", type, err, ret);
+                       f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
+                                type, err, ret);
                        set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
                }
        }
@@ -2080,32 +2067,40 @@ static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
 
 static int f2fs_dquot_commit(struct dquot *dquot)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
        int ret;
 
+       down_read(&sbi->quota_sem);
        ret = dquot_commit(dquot);
        if (ret < 0)
-               set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+       up_read(&sbi->quota_sem);
        return ret;
 }
 
 static int f2fs_dquot_acquire(struct dquot *dquot)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
        int ret;
 
+       down_read(&sbi->quota_sem);
        ret = dquot_acquire(dquot);
        if (ret < 0)
-               set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
-
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+       up_read(&sbi->quota_sem);
        return ret;
 }
 
 static int f2fs_dquot_release(struct dquot *dquot)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
        int ret;
 
+       down_read(&sbi->quota_sem);
        ret = dquot_release(dquot);
        if (ret < 0)
-               set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+       up_read(&sbi->quota_sem);
        return ret;
 }
 
@@ -2115,22 +2110,27 @@ static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        int ret;
 
+       down_read(&sbi->quota_sem);
        ret = dquot_mark_dquot_dirty(dquot);
 
        /* if we are using journalled quota */
        if (is_journalled_quota(sbi))
                set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
 
+       up_read(&sbi->quota_sem);
        return ret;
 }
 
 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
        int ret;
 
+       down_read(&sbi->quota_sem);
        ret = dquot_commit_info(sb, type);
        if (ret < 0)
-               set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+       up_read(&sbi->quota_sem);
        return ret;
 }
 
@@ -2350,55 +2350,49 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
                                (segment_count << log_blocks_per_seg);
 
        if (segment0_blkaddr != cp_blkaddr) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
-                       segment0_blkaddr, cp_blkaddr);
+               f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
+                         segment0_blkaddr, cp_blkaddr);
                return true;
        }
 
        if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
                                                        sit_blkaddr) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
-                       cp_blkaddr, sit_blkaddr,
-                       segment_count_ckpt << log_blocks_per_seg);
+               f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
+                         cp_blkaddr, sit_blkaddr,
+                         segment_count_ckpt << log_blocks_per_seg);
                return true;
        }
 
        if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
                                                        nat_blkaddr) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
-                       sit_blkaddr, nat_blkaddr,
-                       segment_count_sit << log_blocks_per_seg);
+               f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
+                         sit_blkaddr, nat_blkaddr,
+                         segment_count_sit << log_blocks_per_seg);
                return true;
        }
 
        if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
                                                        ssa_blkaddr) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
-                       nat_blkaddr, ssa_blkaddr,
-                       segment_count_nat << log_blocks_per_seg);
+               f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
+                         nat_blkaddr, ssa_blkaddr,
+                         segment_count_nat << log_blocks_per_seg);
                return true;
        }
 
        if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
                                                        main_blkaddr) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
-                       ssa_blkaddr, main_blkaddr,
-                       segment_count_ssa << log_blocks_per_seg);
+               f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
+                         ssa_blkaddr, main_blkaddr,
+                         segment_count_ssa << log_blocks_per_seg);
                return true;
        }
 
        if (main_end_blkaddr > seg_end_blkaddr) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
-                       main_blkaddr,
-                       segment0_blkaddr +
-                               (segment_count << log_blocks_per_seg),
-                       segment_count_main << log_blocks_per_seg);
+               f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
+                         main_blkaddr,
+                         segment0_blkaddr +
+                         (segment_count << log_blocks_per_seg),
+                         segment_count_main << log_blocks_per_seg);
                return true;
        } else if (main_end_blkaddr < seg_end_blkaddr) {
                int err = 0;
@@ -2415,12 +2409,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
                        err = __f2fs_commit_super(bh, NULL);
                        res = err ? "failed" : "done";
                }
-               f2fs_msg(sb, KERN_INFO,
-                       "Fix alignment : %s, start(%u) end(%u) block(%u)",
-                       res, main_blkaddr,
-                       segment0_blkaddr +
-                               (segment_count << log_blocks_per_seg),
-                       segment_count_main << log_blocks_per_seg);
+               f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)",
+                         res, main_blkaddr,
+                         segment0_blkaddr +
+                         (segment_count << log_blocks_per_seg),
+                         segment_count_main << log_blocks_per_seg);
                if (err)
                        return true;
        }
@@ -2434,59 +2427,52 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        block_t total_sections, blocks_per_seg;
        struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
                                        (bh->b_data + F2FS_SUPER_OFFSET);
-       struct super_block *sb = sbi->sb;
        unsigned int blocksize;
        size_t crc_offset = 0;
        __u32 crc = 0;
 
+       if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
+               f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
+                         F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
+               return -EINVAL;
+       }
+
        /* Check checksum_offset and crc in superblock */
        if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
                crc_offset = le32_to_cpu(raw_super->checksum_offset);
                if (crc_offset !=
                        offsetof(struct f2fs_super_block, crc)) {
-                       f2fs_msg(sb, KERN_INFO,
-                               "Invalid SB checksum offset: %zu",
-                               crc_offset);
-                       return 1;
+                       f2fs_info(sbi, "Invalid SB checksum offset: %zu",
+                                 crc_offset);
+                       return -EFSCORRUPTED;
                }
                crc = le32_to_cpu(raw_super->crc);
                if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
-                       f2fs_msg(sb, KERN_INFO,
-                               "Invalid SB checksum value: %u", crc);
-                       return 1;
+                       f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
+                       return -EFSCORRUPTED;
                }
        }
 
-       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Magic Mismatch, valid(0x%x) - read(0x%x)",
-                       F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
-               return 1;
-       }
-
        /* Currently, support only 4KB page cache size */
        if (F2FS_BLKSIZE != PAGE_SIZE) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid page_cache_size (%lu), supports only 4KB",
-                       PAGE_SIZE);
-               return 1;
+               f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
+                         PAGE_SIZE);
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support only 4KB block size */
        blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
        if (blocksize != F2FS_BLKSIZE) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid blocksize (%u), supports only 4KB",
-                       blocksize);
-               return 1;
+               f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
+                         blocksize);
+               return -EFSCORRUPTED;
        }
 
        /* check log blocks per segment */
        if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid log blocks per segment (%u)",
-                       le32_to_cpu(raw_super->log_blocks_per_seg));
-               return 1;
+               f2fs_info(sbi, "Invalid log blocks per segment (%u)",
+                         le32_to_cpu(raw_super->log_blocks_per_seg));
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2494,18 +2480,17 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                                F2FS_MAX_LOG_SECTOR_SIZE ||
                le32_to_cpu(raw_super->log_sectorsize) <
                                F2FS_MIN_LOG_SECTOR_SIZE) {
-               f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
-                       le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               f2fs_info(sbi, "Invalid log sectorsize (%u)",
+                         le32_to_cpu(raw_super->log_sectorsize));
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->log_sectors_per_block) +
                le32_to_cpu(raw_super->log_sectorsize) !=
                        F2FS_MAX_LOG_SECTOR_SIZE) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid log sectors per block(%u) log sectorsize(%u)",
-                       le32_to_cpu(raw_super->log_sectors_per_block),
-                       le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
+                         le32_to_cpu(raw_super->log_sectors_per_block),
+                         le32_to_cpu(raw_super->log_sectorsize));
+               return -EFSCORRUPTED;
        }
 
        segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2518,77 +2503,68 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 
        if (segment_count > F2FS_MAX_SEGMENT ||
                                segment_count < F2FS_MIN_SEGMENTS) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid segment count (%u)",
-                       segment_count);
-               return 1;
+               f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
+               return -EFSCORRUPTED;
        }
 
        if (total_sections > segment_count ||
                        total_sections < F2FS_MIN_SEGMENTS ||
                        segs_per_sec > segment_count || !segs_per_sec) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid segment/section count (%u, %u x %u)",
-                       segment_count, total_sections, segs_per_sec);
-               return 1;
+               f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
+                         segment_count, total_sections, segs_per_sec);
+               return -EFSCORRUPTED;
        }
 
        if ((segment_count / segs_per_sec) < total_sections) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Small segment_count (%u < %u * %u)",
-                       segment_count, segs_per_sec, total_sections);
-               return 1;
+               f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
+                         segment_count, segs_per_sec, total_sections);
+               return -EFSCORRUPTED;
        }
 
        if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong segment_count / block_count (%u > %llu)",
-                       segment_count, le64_to_cpu(raw_super->block_count));
-               return 1;
+               f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
+                         segment_count, le64_to_cpu(raw_super->block_count));
+               return -EFSCORRUPTED;
        }
 
        if (secs_per_zone > total_sections || !secs_per_zone) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Wrong secs_per_zone / total_sections (%u, %u)",
-                       secs_per_zone, total_sections);
-               return 1;
+               f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
+                         secs_per_zone, total_sections);
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
                        raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
                        (le32_to_cpu(raw_super->extension_count) +
                        raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Corrupted extension count (%u + %u > %u)",
-                       le32_to_cpu(raw_super->extension_count),
-                       raw_super->hot_ext_count,
-                       F2FS_MAX_EXTENSION);
-               return 1;
+               f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
+                         le32_to_cpu(raw_super->extension_count),
+                         raw_super->hot_ext_count,
+                         F2FS_MAX_EXTENSION);
+               return -EFSCORRUPTED;
        }
 
        if (le32_to_cpu(raw_super->cp_payload) >
                                (blocks_per_seg - F2FS_CP_PACKS)) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Insane cp_payload (%u > %u)",
-                       le32_to_cpu(raw_super->cp_payload),
-                       blocks_per_seg - F2FS_CP_PACKS);
-               return 1;
+               f2fs_info(sbi, "Insane cp_payload (%u > %u)",
+                         le32_to_cpu(raw_super->cp_payload),
+                         blocks_per_seg - F2FS_CP_PACKS);
+               return -EFSCORRUPTED;
        }
 
        /* check reserved ino info */
        if (le32_to_cpu(raw_super->node_ino) != 1 ||
                le32_to_cpu(raw_super->meta_ino) != 2 ||
                le32_to_cpu(raw_super->root_ino) != 3) {
-               f2fs_msg(sb, KERN_INFO,
-                       "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
-                       le32_to_cpu(raw_super->node_ino),
-                       le32_to_cpu(raw_super->meta_ino),
-                       le32_to_cpu(raw_super->root_ino));
-               return 1;
+               f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
+                         le32_to_cpu(raw_super->node_ino),
+                         le32_to_cpu(raw_super->meta_ino),
+                         le32_to_cpu(raw_super->root_ino));
+               return -EFSCORRUPTED;
        }
 
        /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
        if (sanity_check_area_boundary(sbi, bh))
-               return 1;
+               return -EFSCORRUPTED;
 
        return 0;
 }
@@ -2626,8 +2602,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 
        if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
                        ovp_segments == 0 || reserved_segments == 0)) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "Wrong layout: check mkfs.f2fs version");
+               f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
                return 1;
        }
 
@@ -2636,16 +2611,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
        log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
        if (!user_block_count || user_block_count >=
                        segment_count_main << log_blocks_per_seg) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "Wrong user_block_count: %u", user_block_count);
+               f2fs_err(sbi, "Wrong user_block_count: %u",
+                        user_block_count);
                return 1;
        }
 
        valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
        if (valid_user_blocks > user_block_count) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "Wrong valid_user_blocks: %u, user_block_count: %u",
-                       valid_user_blocks, user_block_count);
+               f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
+                        valid_user_blocks, user_block_count);
                return 1;
        }
 
@@ -2653,9 +2627,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
        avail_node_count = sbi->total_node_count - sbi->nquota_files -
                                                F2FS_RESERVED_NODE_NUM;
        if (valid_node_count > avail_node_count) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "Wrong valid_node_count: %u, avail_node_count: %u",
-                       valid_node_count, avail_node_count);
+               f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
+                        valid_node_count, avail_node_count);
                return 1;
        }
 
@@ -2669,10 +2642,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
                for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
                        if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
                                le32_to_cpu(ckpt->cur_node_segno[j])) {
-                               f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Node segment (%u, %u) has the same "
-                                       "segno: %u", i, j,
-                                       le32_to_cpu(ckpt->cur_node_segno[i]));
+                               f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
+                                        i, j,
+                                        le32_to_cpu(ckpt->cur_node_segno[i]));
                                return 1;
                        }
                }
@@ -2684,10 +2656,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
                for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
                        if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
                                le32_to_cpu(ckpt->cur_data_segno[j])) {
-                               f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Data segment (%u, %u) has the same "
-                                       "segno: %u", i, j,
-                                       le32_to_cpu(ckpt->cur_data_segno[i]));
+                               f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
+                                        i, j,
+                                        le32_to_cpu(ckpt->cur_data_segno[i]));
                                return 1;
                        }
                }
@@ -2696,10 +2667,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
                for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
                        if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
                                le32_to_cpu(ckpt->cur_data_segno[j])) {
-                               f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Data segment (%u) and Data segment (%u)"
-                                       " has the same segno: %u", i, j,
-                                       le32_to_cpu(ckpt->cur_node_segno[i]));
+                               f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u",
+                                        i, j,
+                                        le32_to_cpu(ckpt->cur_node_segno[i]));
                                return 1;
                        }
                }
@@ -2710,9 +2680,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 
        if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
                nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "Wrong bitmap size: sit: %u, nat:%u",
-                       sit_bitmap_size, nat_bitmap_size);
+               f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
+                        sit_bitmap_size, nat_bitmap_size);
                return 1;
        }
 
@@ -2721,14 +2690,22 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
        if (cp_pack_start_sum < cp_payload + 1 ||
                cp_pack_start_sum > blocks_per_seg - 1 -
                        NR_CURSEG_TYPE) {
-               f2fs_msg(sbi->sb, KERN_ERR,
-                       "Wrong cp_pack_start_sum: %u",
-                       cp_pack_start_sum);
+               f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
+                        cp_pack_start_sum);
+               return 1;
+       }
+
+       if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
+               le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
+               f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
+                         "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
+                         "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
+                         le32_to_cpu(ckpt->checksum_offset));
                return 1;
        }
 
        if (unlikely(f2fs_cp_error(sbi))) {
-               f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+               f2fs_err(sbi, "A bug case: need to run fsck");
                return 1;
        }
        return 0;
@@ -2897,18 +2874,17 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
        for (block = 0; block < 2; block++) {
                bh = sb_bread(sb, block);
                if (!bh) {
-                       f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
-                               block + 1);
+                       f2fs_err(sbi, "Unable to read %dth superblock",
+                                block + 1);
                        err = -EIO;
                        continue;
                }
 
                /* sanity checking of raw super */
-               if (sanity_check_raw_super(sbi, bh)) {
-                       f2fs_msg(sb, KERN_ERR,
-                               "Can't find valid F2FS filesystem in %dth superblock",
-                               block + 1);
-                       err = -EINVAL;
+               err = sanity_check_raw_super(sbi, bh);
+               if (err) {
+                       f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
+                                block + 1);
                        brelse(bh);
                        continue;
                }
@@ -3041,36 +3017,32 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
 #ifdef CONFIG_BLK_DEV_ZONED
                if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
                                !f2fs_sb_has_blkzoned(sbi)) {
-                       f2fs_msg(sbi->sb, KERN_ERR,
-                               "Zoned block device feature not enabled\n");
+                       f2fs_err(sbi, "Zoned block device feature not enabled\n");
                        return -EINVAL;
                }
                if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
                        if (init_blkz_info(sbi, i)) {
-                               f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Failed to initialize F2FS blkzone information");
+                               f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
                                return -EINVAL;
                        }
                        if (max_devices == 1)
                                break;
-                       f2fs_msg(sbi->sb, KERN_INFO,
-                               "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
-                               i, FDEV(i).path,
-                               FDEV(i).total_segments,
-                               FDEV(i).start_blk, FDEV(i).end_blk,
-                               bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
-                               "Host-aware" : "Host-managed");
+                       f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+                                 i, FDEV(i).path,
+                                 FDEV(i).total_segments,
+                                 FDEV(i).start_blk, FDEV(i).end_blk,
+                                 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+                                 "Host-aware" : "Host-managed");
                        continue;
                }
 #endif
-               f2fs_msg(sbi->sb, KERN_INFO,
-                       "Mount Device [%2d]: %20s, %8u, %8x - %8x",
-                               i, FDEV(i).path,
-                               FDEV(i).total_segments,
-                               FDEV(i).start_blk, FDEV(i).end_blk);
-       }
-       f2fs_msg(sbi->sb, KERN_INFO,
-                       "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
+               f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+                         i, FDEV(i).path,
+                         FDEV(i).total_segments,
+                         FDEV(i).start_blk, FDEV(i).end_blk);
+       }
+       f2fs_info(sbi,
+                 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
        return 0;
 }
 
@@ -3116,7 +3088,7 @@ try_onemore:
        /* Load the checksum driver */
        sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
        if (IS_ERR(sbi->s_chksum_driver)) {
-               f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
+               f2fs_err(sbi, "Cannot load crc32 driver.");
                err = PTR_ERR(sbi->s_chksum_driver);
                sbi->s_chksum_driver = NULL;
                goto free_sbi;
@@ -3124,7 +3096,7 @@ try_onemore:
 
        /* set a block size */
        if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
-               f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
+               f2fs_err(sbi, "unable to set blocksize");
                goto free_sbi;
        }
 
@@ -3148,8 +3120,7 @@ try_onemore:
         */
 #ifndef CONFIG_BLK_DEV_ZONED
        if (f2fs_sb_has_blkzoned(sbi)) {
-               f2fs_msg(sb, KERN_ERR,
-                        "Zoned block device support is not enabled");
+               f2fs_err(sbi, "Zoned block device support is not enabled");
                err = -EOPNOTSUPP;
                goto free_sb_buf;
        }
@@ -3202,6 +3173,7 @@ try_onemore:
        mutex_init(&sbi->gc_mutex);
        mutex_init(&sbi->writepages);
        mutex_init(&sbi->cp_mutex);
+       mutex_init(&sbi->resize_mutex);
        init_rwsem(&sbi->node_write);
        init_rwsem(&sbi->node_change);
 
@@ -3237,6 +3209,7 @@ try_onemore:
        }
 
        init_rwsem(&sbi->cp_rwsem);
+       init_rwsem(&sbi->quota_sem);
        init_waitqueue_head(&sbi->cp_wait);
        init_sb_info(sbi);
 
@@ -3256,14 +3229,14 @@ try_onemore:
        /* get an inode for meta space */
        sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
        if (IS_ERR(sbi->meta_inode)) {
-               f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
+               f2fs_err(sbi, "Failed to read F2FS meta data inode");
                err = PTR_ERR(sbi->meta_inode);
                goto free_io_dummy;
        }
 
        err = f2fs_get_valid_checkpoint(sbi);
        if (err) {
-               f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
+               f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
                goto free_meta_inode;
        }
 
@@ -3274,10 +3247,13 @@ try_onemore:
                sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
        }
 
+       if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+
        /* Initialize device list */
        err = f2fs_scan_devices(sbi);
        if (err) {
-               f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+               f2fs_err(sbi, "Failed to find devices");
                goto free_devices;
        }
 
@@ -3297,6 +3273,7 @@ try_onemore:
                INIT_LIST_HEAD(&sbi->inode_list[i]);
                spin_lock_init(&sbi->inode_lock[i]);
        }
+       mutex_init(&sbi->flush_lock);
 
        f2fs_init_extent_cache_info(sbi);
 
@@ -3307,14 +3284,14 @@ try_onemore:
        /* setup f2fs internal modules */
        err = f2fs_build_segment_manager(sbi);
        if (err) {
-               f2fs_msg(sb, KERN_ERR,
-                       "Failed to initialize F2FS segment manager");
+               f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
+                        err);
                goto free_sm;
        }
        err = f2fs_build_node_manager(sbi);
        if (err) {
-               f2fs_msg(sb, KERN_ERR,
-                       "Failed to initialize F2FS node manager");
+               f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
+                        err);
                goto free_nm;
        }
 
@@ -3338,7 +3315,7 @@ try_onemore:
        /* get an inode for node space */
        sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
        if (IS_ERR(sbi->node_inode)) {
-               f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
+               f2fs_err(sbi, "Failed to read node inode");
                err = PTR_ERR(sbi->node_inode);
                goto free_stats;
        }
@@ -3346,7 +3323,7 @@ try_onemore:
        /* read root inode and dentry */
        root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
        if (IS_ERR(root)) {
-               f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
+               f2fs_err(sbi, "Failed to read root inode");
                err = PTR_ERR(root);
                goto free_node_inode;
        }
@@ -3372,8 +3349,7 @@ try_onemore:
        if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
                err = f2fs_enable_quotas(sb);
                if (err)
-                       f2fs_msg(sb, KERN_ERR,
-                               "Cannot turn on quotas: error %d", err);
+                       f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
        }
 #endif
        /* if there are nt orphan nodes free them */
@@ -3393,13 +3369,10 @@ try_onemore:
                if (f2fs_hw_is_readonly(sbi)) {
                        if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
                                err = -EROFS;
-                               f2fs_msg(sb, KERN_ERR,
-                                       "Need to recover fsync data, but "
-                                       "write access unavailable");
+                               f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
                                goto free_meta;
                        }
-                       f2fs_msg(sbi->sb, KERN_INFO, "write access "
-                               "unavailable, skipping recovery");
+                       f2fs_info(sbi, "write access unavailable, skipping recovery");
                        goto reset_checkpoint;
                }
 
@@ -3414,8 +3387,8 @@ try_onemore:
                        if (err != -ENOMEM)
                                skip_recovery = true;
                        need_fsck = true;
-                       f2fs_msg(sb, KERN_ERR,
-                               "Cannot recover all fsync data errno=%d", err);
+                       f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
+                                err);
                        goto free_meta;
                }
        } else {
@@ -3423,8 +3396,7 @@ try_onemore:
 
                if (!f2fs_readonly(sb) && err > 0) {
                        err = -EINVAL;
-                       f2fs_msg(sb, KERN_ERR,
-                               "Need to recover fsync data");
+                       f2fs_err(sbi, "Need to recover fsync data");
                        goto free_meta;
                }
        }
@@ -3455,17 +3427,16 @@ reset_checkpoint:
        /* recover broken superblock */
        if (recovery) {
                err = f2fs_commit_super(sbi, true);
-               f2fs_msg(sb, KERN_INFO,
-                       "Try to recover %dth superblock, ret: %d",
-                       sbi->valid_super_block ? 1 : 2, err);
+               f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
+                         sbi->valid_super_block ? 1 : 2, err);
        }
 
        f2fs_join_shrinker(sbi);
 
        f2fs_tuning_parameters(sbi);
 
-       f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
-                               cur_cp_version(F2FS_CKPT(sbi)));
+       f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
+                   cur_cp_version(F2FS_CKPT(sbi)));
        f2fs_update_time(sbi, CP_TIME);
        f2fs_update_time(sbi, REQ_TIME);
        clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
index d7b4766..9e910b1 100644 (file)
@@ -68,6 +68,20 @@ static ssize_t dirty_segments_show(struct f2fs_attr *a,
                (unsigned long long)(dirty_segments(sbi)));
 }
 
+static ssize_t unusable_show(struct f2fs_attr *a,
+               struct f2fs_sb_info *sbi, char *buf)
+{
+       block_t unusable;
+
+       if (test_opt(sbi, DISABLE_CHECKPOINT))
+               unusable = sbi->unusable_block_count;
+       else
+               unusable = f2fs_get_unusable_blocks(sbi);
+       return snprintf(buf, PAGE_SIZE, "%llu\n",
+               (unsigned long long)unusable);
+}
+
+
 static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
                struct f2fs_sb_info *sbi, char *buf)
 {
@@ -440,6 +454,7 @@ F2FS_GENERAL_RO_ATTR(dirty_segments);
 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
 F2FS_GENERAL_RO_ATTR(features);
 F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
+F2FS_GENERAL_RO_ATTR(unusable);
 
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
 F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
@@ -495,6 +510,7 @@ static struct attribute *f2fs_attrs[] = {
        ATTR_LIST(inject_type),
 #endif
        ATTR_LIST(dirty_segments),
+       ATTR_LIST(unusable),
        ATTR_LIST(lifetime_write_kbytes),
        ATTR_LIST(features),
        ATTR_LIST(reserved_blocks),
@@ -566,8 +582,7 @@ static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
 
                if ((i % 10) == 0)
                        seq_printf(seq, "%-10d", i);
-               seq_printf(seq, "%d|%-3u", se->type,
-                                       get_valid_blocks(sbi, i, false));
+               seq_printf(seq, "%d|%-3u", se->type, se->valid_blocks);
                if ((i % 10) == 9 || i == (total_segs - 1))
                        seq_putc(seq, '\n');
                else
@@ -593,8 +608,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
                struct seg_entry *se = get_seg_entry(sbi, i);
 
                seq_printf(seq, "%-10d", i);
-               seq_printf(seq, "%d|%-3u|", se->type,
-                                       get_valid_blocks(sbi, i, false));
+               seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
                for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
                        seq_printf(seq, " %.2x", se->cur_valid_map[j]);
                seq_putc(seq, '\n');
index 376db95..d48af3b 100644 (file)
@@ -392,7 +392,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 
        *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
        if (!*xe) {
-               err = -EFAULT;
+               f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+                                                               inode->i_ino);
+               set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+               err = -EFSCORRUPTED;
                goto out;
        }
 check:
@@ -661,7 +664,10 @@ static int __f2fs_setxattr(struct inode *inode, int index,
        /* find entry with wanted name. */
        here = __find_xattr(base_addr, last_base_addr, index, len, name);
        if (!here) {
-               error = -EFAULT;
+               f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+                                                               inode->i_ino);
+               set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+               error = -EFSCORRUPTED;
                goto exit;
        }
 
index ef24894..9c159e6 100644 (file)
@@ -739,6 +739,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
 
                gfs2_free_clones(rgd);
                kfree(rgd->rd_bits);
+               rgd->rd_bits = NULL;
                return_all_reservations(rgd);
                kmem_cache_free(gfs2_rgrpd_cachep, rgd);
        }
@@ -933,10 +934,6 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        if (error)
                goto fail;
 
-       rgd->rd_gl->gl_object = rgd;
-       rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
-       rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
-                                                 rgd->rd_length) * bsize) - 1;
        rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
        rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
        if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -944,14 +941,20 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        spin_lock(&sdp->sd_rindex_spin);
        error = rgd_insert(rgd);
        spin_unlock(&sdp->sd_rindex_spin);
-       if (!error)
+       if (!error) {
+               rgd->rd_gl->gl_object = rgd;
+               rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
+               rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
+                                                   rgd->rd_length) * bsize) - 1;
                return 0;
+       }
 
        error = 0; /* someone else read in the rgrp; free it and ignore it */
        gfs2_glock_put(rgd->rd_gl);
 
 fail:
        kfree(rgd->rd_bits);
+       rgd->rd_bits = NULL;
        kmem_cache_free(gfs2_rgrpd_cachep, rgd);
        return error;
 }
index 10015f1..8e811a0 100644 (file)
@@ -2052,3 +2052,27 @@ void inode_nohighmem(struct inode *inode)
        mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
 }
 EXPORT_SYMBOL(inode_nohighmem);
+
+/*
+ * Generic function to check FS_IOC_SETFLAGS values and reject any invalid
+ * configurations.
+ *
+ * Note: the caller should be holding i_mutex, or else be sure that they have
+ * exclusive access to the inode structure.
+ */
+int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
+                            unsigned int flags)
+{
+       /*
+        * The IMMUTABLE and APPEND_ONLY flags can only be changed by
+        * the relevant capability.
+        *
+        * This test looks nicer. Thanks to Pauline Middelink
+        */
+       if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
+           !capable(CAP_LINUX_IMMUTABLE))
+               return -EPERM;
+
+       return 0;
+}
+EXPORT_SYMBOL(vfs_ioc_setflags_prepare);
index 57f0306..8c4706e 100644 (file)
@@ -40,6 +40,9 @@
 #include "internal.h"
 #include "mount.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/namei.h>
+
 /* [Feb-1997 T. Schoebel-Theuer]
  * Fundamental changes in the pathname lookup mechanisms (namei)
  * were necessary because of omirr.  The reason is that omirr needs
@@ -784,6 +787,81 @@ static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
        return dentry->d_op->d_revalidate(dentry, flags);
 }
 
+#define INIT_PATH_SIZE 64
+
+static void success_walk_trace(struct nameidata *nd)
+{
+       struct path *pt = &nd->path;
+       struct inode *i = nd->inode;
+       char buf[INIT_PATH_SIZE], *try_buf;
+       int cur_path_size;
+       char *p;
+
+       /* When eBPF/ tracepoint is disabled, keep overhead low. */
+       if (!trace_inodepath_enabled())
+               return;
+
+       /* First try stack allocated buffer. */
+       try_buf = buf;
+       cur_path_size = INIT_PATH_SIZE;
+
+       while (cur_path_size <= PATH_MAX) {
+               /* Free previous heap allocation if we are now trying
+                * a second or later heap allocation.
+                */
+               if (try_buf != buf)
+                       kfree(try_buf);
+
+               /* All but the first alloc are on the heap. */
+               if (cur_path_size != INIT_PATH_SIZE) {
+                       try_buf = kmalloc(cur_path_size, GFP_KERNEL);
+                       if (!try_buf) {
+                               try_buf = buf;
+                               sprintf(try_buf, "error:buf_alloc_failed");
+                               break;
+                       }
+               }
+
+               p = d_path(pt, try_buf, cur_path_size);
+
+               if (!IS_ERR(p)) {
+                       char *end = mangle_path(try_buf, p, "\n");
+
+                       if (end) {
+                               try_buf[end - try_buf] = 0;
+                               break;
+                       } else {
+                               /* On mangle errors, double path size
+                                * till PATH_MAX.
+                                */
+                               cur_path_size = cur_path_size << 1;
+                               continue;
+                       }
+               }
+
+               if (PTR_ERR(p) == -ENAMETOOLONG) {
+                       /* If d_path complains that name is too long,
+                        * then double path size till PATH_MAX.
+                        */
+                       cur_path_size = cur_path_size << 1;
+                       continue;
+               }
+
+               sprintf(try_buf, "error:d_path_failed_%lu",
+                       -1 * PTR_ERR(p));
+               break;
+       }
+
+       if (cur_path_size > PATH_MAX)
+               sprintf(try_buf, "error:d_path_name_too_long");
+
+       trace_inodepath(i, try_buf);
+
+       if (try_buf != buf)
+               kfree(try_buf);
+       return;
+}
+
 /**
  * complete_walk - successful completion of path walk
  * @nd:  pointer nameidata
@@ -806,15 +884,21 @@ static int complete_walk(struct nameidata *nd)
                        return -ECHILD;
        }
 
-       if (likely(!(nd->flags & LOOKUP_JUMPED)))
+       if (likely(!(nd->flags & LOOKUP_JUMPED))) {
+               success_walk_trace(nd);
                return 0;
+       }
 
-       if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
+       if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) {
+               success_walk_trace(nd);
                return 0;
+       }
 
        status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
-       if (status > 0)
+       if (status > 0) {
+               success_walk_trace(nd);
                return 0;
+       }
 
        if (!status)
                status = -ESTALE;
index 668ac19..d25b55c 100644 (file)
@@ -935,6 +935,7 @@ int nfs_open(struct inode *inode, struct file *filp)
        nfs_fscache_open_file(inode, filp);
        return 0;
 }
+EXPORT_SYMBOL_GPL(nfs_open);
 
 /*
  * This function is called whenever some part of NFS notices that
index 4afdee4..9f15696 100644 (file)
@@ -416,7 +416,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
 
 extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
 extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, fmode_t);
index ae91d1e..dac20f3 100644 (file)
@@ -685,9 +685,12 @@ found:
 
 static void nfs4_destroy_server(struct nfs_server *server)
 {
+       LIST_HEAD(freeme);
+
        nfs_server_return_all_delegations(server);
        unset_pnfs_layoutdriver(server);
-       nfs4_purge_state_owners(server);
+       nfs4_purge_state_owners(server, &freeme);
+       nfs4_free_state_owners(&freeme);
 }
 
 /*
index 679e003..c5e8845 100644 (file)
@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
                return err;
 
        if ((openflags & O_ACCMODE) == 3)
-               openflags--;
+               return nfs_open(inode, filp);
 
        /* We can't create new files here */
        openflags &= ~(O_CREAT|O_EXCL);
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                switch (err) {
-               case -EPERM:
-               case -EACCES:
-               case -EDQUOT:
-               case -ENOSPC:
-               case -EROFS:
-                       goto out_put_ctx;
                default:
+                       goto out_put_ctx;
+               case -ENOENT:
+               case -ESTALE:
+               case -EISDIR:
+               case -ENOTDIR:
+               case -ELOOP:
                        goto out_drop;
                }
        }
index 41c8ddb..d1816ee 100644 (file)
@@ -997,6 +997,12 @@ struct nfs4_opendata {
        int cancelled;
 };
 
+struct nfs4_open_createattrs {
+       struct nfs4_label *label;
+       struct iattr *sattr;
+       const __u32 verf[2];
+};
+
 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
                int err, struct nfs4_exception *exception)
 {
@@ -1066,8 +1072,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
 
 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
                struct nfs4_state_owner *sp, fmode_t fmode, int flags,
-               const struct iattr *attrs,
-               struct nfs4_label *label,
+               const struct nfs4_open_createattrs *c,
                enum open_claim_type4 claim,
                gfp_t gfp_mask)
 {
@@ -1075,6 +1080,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        struct inode *dir = d_inode(parent);
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
+       struct nfs4_label *label = (c != NULL) ? c->label : NULL;
        struct nfs4_opendata *p;
 
        p = kzalloc(sizeof(*p), gfp_mask);
@@ -1131,15 +1137,11 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
                p->o_arg.fh = NFS_FH(d_inode(dentry));
        }
-       if (attrs != NULL && attrs->ia_valid != 0) {
-               __u32 verf[2];
-
+       if (c != NULL && c->sattr != NULL && c->sattr->ia_valid != 0) {
                p->o_arg.u.attrs = &p->attrs;
-               memcpy(&p->attrs, attrs, sizeof(p->attrs));
+               memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
 
-               verf[0] = jiffies;
-               verf[1] = current->pid;
-               memcpy(p->o_arg.u.verifier.data, verf,
+               memcpy(p->o_arg.u.verifier.data, c->verf,
                                sizeof(p->o_arg.u.verifier.data));
        }
        p->c_arg.fh = &p->o_res.fh;
@@ -1653,7 +1655,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
        struct nfs4_opendata *opendata;
 
        opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
-                       NULL, NULL, claim, GFP_NOFS);
+                       NULL, claim, GFP_NOFS);
        if (opendata == NULL)
                return ERR_PTR(-ENOMEM);
        opendata->state = state;
@@ -2488,8 +2490,7 @@ out:
 static int _nfs4_do_open(struct inode *dir,
                        struct nfs_open_context *ctx,
                        int flags,
-                       struct iattr *sattr,
-                       struct nfs4_label *label,
+                       const struct nfs4_open_createattrs *c,
                        int *opened)
 {
        struct nfs4_state_owner  *sp;
@@ -2501,6 +2502,8 @@ static int _nfs4_do_open(struct inode *dir,
        struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
        fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
        enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
+       struct iattr *sattr = c->sattr;
+       struct nfs4_label *label = c->label;
        struct nfs4_label *olabel = NULL;
        int status;
 
@@ -2519,8 +2522,8 @@ static int _nfs4_do_open(struct inode *dir,
        status = -ENOMEM;
        if (d_really_is_positive(dentry))
                claim = NFS4_OPEN_CLAIM_FH;
-       opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
-                       label, claim, GFP_KERNEL);
+       opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
+                       c, claim, GFP_KERNEL);
        if (opendata == NULL)
                goto err_put_state_owner;
 
@@ -2596,10 +2599,18 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs4_exception exception = { };
        struct nfs4_state *res;
+       struct nfs4_open_createattrs c = {
+               .label = label,
+               .sattr = sattr,
+               .verf = {
+                       [0] = (__u32)jiffies,
+                       [1] = (__u32)current->pid,
+               },
+       };
        int status;
 
        do {
-               status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
+               status = _nfs4_do_open(dir, ctx, flags, &c, opened);
                res = ctx->state;
                trace_nfs4_open_file(ctx, flags, status);
                if (status == 0)
index 5be61af..ef3ed2b 100644 (file)
@@ -611,24 +611,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 /**
  * nfs4_purge_state_owners - Release all cached state owners
  * @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
  *
  * Called at umount time.  Remaining state owners will be on
  * the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
  */
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
 {
        struct nfs_client *clp = server->nfs_client;
        struct nfs4_state_owner *sp, *tmp;
-       LIST_HEAD(doomed);
 
        spin_lock(&clp->cl_lock);
        list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
-               list_move(&sp->so_lru, &doomed);
+               list_move(&sp->so_lru, head);
                nfs4_remove_state_owner_locked(sp);
        }
        spin_unlock(&clp->cl_lock);
+}
 
-       list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+       struct nfs4_state_owner *sp, *tmp;
+
+       list_for_each_entry_safe(sp, tmp, head, so_lru) {
                list_del(&sp->so_lru);
                nfs4_free_state_owner(sp);
        }
@@ -1724,12 +1739,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
        struct nfs4_state_owner *sp;
        struct nfs_server *server;
        struct rb_node *pos;
+       LIST_HEAD(freeme);
        int status = 0;
 
 restart:
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
-               nfs4_purge_state_owners(server);
+               nfs4_purge_state_owners(server, &freeme);
                spin_lock(&clp->cl_lock);
                for (pos = rb_first(&server->state_owners);
                     pos != NULL;
@@ -1758,6 +1774,7 @@ restart:
                spin_unlock(&clp->cl_lock);
        }
        rcu_read_unlock();
+       nfs4_free_state_owners(&freeme);
        return 0;
 }
 
index 8a20774..af1bb73 100644 (file)
@@ -593,7 +593,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
        }
 
        hdr->res.fattr   = &hdr->fattr;
-       hdr->res.count   = count;
+       hdr->res.count   = 0;
        hdr->res.eof     = 0;
        hdr->res.verf    = &hdr->verf;
        nfs_fattr_init(&hdr->fattr);
index b417bbc..b83e14a 100644 (file)
@@ -588,7 +588,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
                /* Emulate the eof flag, which isn't normally needed in NFSv2
                 * as it is guaranteed to always return the file attributes
                 */
-               if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+               if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+                   hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
                        hdr->res.eof = 1;
        }
        return 0;
@@ -609,8 +610,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
-       if (task->tk_status >= 0)
+       if (task->tk_status >= 0) {
+               hdr->res.count = hdr->args.count;
                nfs_writeback_update_inode(hdr);
+       }
        return 0;
 }
 
index ba27a5f..ea5cb1b 100644 (file)
@@ -1391,11 +1391,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
 {
        u32 slotsize = slot_bytes(ca);
        u32 num = ca->maxreqs;
-       int avail;
+       unsigned long avail, total_avail;
 
        spin_lock(&nfsd_drc_lock);
-       avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
-                   nfsd_drc_max_mem - nfsd_drc_mem_used);
+       total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
+       avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
+       /*
+        * Never use more than a third of the remaining memory,
+        * unless it's the only way to give this client a slot:
+        */
+       avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
        num = min_t(int, num, avail / slotsize);
        nfsd_drc_mem_used += num * slotsize;
        spin_unlock(&nfsd_drc_lock);
index b6eb56d..0fa990f 100644 (file)
@@ -360,7 +360,7 @@ void nfsd_reset_versions(void)
  */
 static void set_max_drc(void)
 {
-       #define NFSD_DRC_SIZE_SHIFT     10
+       #define NFSD_DRC_SIZE_SHIFT     7
        nfsd_drc_max_mem = (nr_free_buffer_pages()
                                        >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
        nfsd_drc_mem_used = 0;
index 4f07882..06faa60 100644 (file)
@@ -3808,7 +3808,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
        u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
        int low_bucket = 0, bucket, high_bucket;
        struct ocfs2_xattr_bucket *search;
-       u32 last_hash;
        u64 blkno, lower_blkno = 0;
 
        search = ocfs2_xattr_bucket_new(inode);
@@ -3852,8 +3851,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
                if (xh->xh_count)
                        xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
 
-               last_hash = le32_to_cpu(xe->xe_name_hash);
-
                /* record lower_blkno which may be the insert place. */
                lower_blkno = blkno;
 
index e6a55dd..b7e2889 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -373,6 +373,25 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
                                override_cred->cap_permitted;
        }
 
+       /*
+        * The new set of credentials can *only* be used in
+        * task-synchronous circumstances, and does not need
+        * RCU freeing, unless somebody then takes a separate
+        * reference to it.
+        *
+        * NOTE! This is _only_ true because this credential
+        * is used purely for override_creds() that installs
+        * it as the subjective cred. Other threads will be
+        * accessing ->real_cred, not the subjective cred.
+        *
+        * If somebody _does_ make a copy of this (using the
+        * 'get_current_cred()' function), that will clear the
+        * non_rcu field, because now that other user may be
+        * expecting RCU freeing. But normal thread-synchronous
+        * cred accesses will keep things non-RCY.
+        */
+       override_cred->non_rcu = 1;
+
        old_cred = override_creds(override_cred);
 retry:
        res = user_path_at(dfd, filename, lookup_flags, &path);
index 4cc40de..953c88d 100644 (file)
@@ -407,7 +407,7 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
        if (!ovl_dentry_is_opaque(dentry)) {
                err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
        } else {
-               const struct cred *old_cred;
+               const struct cred *old_cred, *hold_cred = NULL;
                struct cred *override_cred;
 
                old_cred = ovl_override_creds(dentry->d_sb);
@@ -422,13 +422,15 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
                                our_cred = current_cred();
                        override_cred->fsuid = our_cred->fsuid;
                        override_cred->fsgid = our_cred->fsgid;
-                       put_cred(override_creds(override_cred));
+                       hold_cred = override_creds(override_cred);
                        put_cred(override_cred);
 
                        err = ovl_create_over_whiteout(dentry, inode, &stat,
                                                       link, hardlink);
                }
-               ovl_revert_creds(old_cred);
+               ovl_revert_creds(old_cred ?: hold_cred);
+               if (old_cred && hold_cred)
+                       put_cred(hold_cred);
        }
 
        if (!err)
index 9aff817..8550790 100644 (file)
@@ -155,7 +155,7 @@ int ovl_permission(struct inode *inode, int mask)
 
        old_cred = ovl_override_creds(inode->i_sb);
        err = __inode_permission(realinode, mask);
-       revert_creds(old_cred);
+       ovl_revert_creds(old_cred);
 
 out_dput:
        dput(alias);
index 08cc09b..85877c5 100644 (file)
@@ -431,6 +431,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
        /* len == 0 means wake all */
        struct userfaultfd_wake_range range = { .len = 0, };
        unsigned long new_flags;
+       bool still_valid;
 
        ACCESS_ONCE(ctx->released) = true;
 
@@ -446,8 +447,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * taking the mmap_sem for writing.
         */
        down_write(&mm->mmap_sem);
-       if (!mmget_still_valid(mm))
-               goto skip_mm;
+       still_valid = mmget_still_valid(mm);
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                cond_resched();
@@ -458,20 +458,21 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                        continue;
                }
                new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
-               prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
-                                new_flags, vma->anon_vma,
-                                vma->vm_file, vma->vm_pgoff,
-                                vma_policy(vma),
-                                NULL_VM_UFFD_CTX,
-                                vma_get_anon_name(vma));
-               if (prev)
-                       vma = prev;
-               else
-                       prev = vma;
+               if (still_valid) {
+                       prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+                                        new_flags, vma->anon_vma,
+                                        vma->vm_file, vma->vm_pgoff,
+                                        vma_policy(vma),
+                                        NULL_VM_UFFD_CTX,
+                                        vma_get_anon_name(vma));
+                       if (prev)
+                               vma = prev;
+                       else
+                               prev = vma;
+               }
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
-skip_mm:
        up_write(&mm->mmap_sem);
        mmput(mm);
 wakeup:
index 09aa521..af2c3d4 100644 (file)
@@ -143,7 +143,7 @@ extern void warn_slowpath_null(const char *file, const int line);
 #endif
 
 #ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (condition) ; } while (0)
+#define BUG_ON(condition) do { if (condition) BUG(); } while (0)
 #endif
 
 #ifndef HAVE_ARCH_WARN_ON
index 65e4468..52fbf23 100644 (file)
@@ -6,24 +6,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-       int order;
-
-       size--;
-       size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-       order = fls(size);
-#else
-       order = fls64(size);
-#endif
-       return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -42,19 +24,27 @@ int __get_order(unsigned long size)
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)                                           \
-(                                                              \
-       __builtin_constant_p(n) ? (                             \
-               ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :     \
-               (((n) < (1UL << PAGE_SHIFT)) ? 0 :              \
-                ilog2((n) - 1) - PAGE_SHIFT + 1)               \
-       ) :                                                     \
-       __get_order(n)                                          \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+       if (__builtin_constant_p(size)) {
+               if (!size)
+                       return BITS_PER_LONG - PAGE_SHIFT;
+
+               if (size < (1UL << PAGE_SHIFT))
+                       return 0;
+
+               return ilog2((size) - 1) - PAGE_SHIFT + 1;
+       }
+
+       size--;
+       size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+       return fls(size);
+#else
+       return fls64(size);
+#endif
+}
 
 #endif /* __ASSEMBLY__ */
 
index 3672893..6a30f1e 100644 (file)
@@ -226,7 +226,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
 #ifdef CONFIG_X86_IO_APIC
 extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
 #else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+       return -1;
+}
 #endif
 /*
  * This function undoes the effect of one call to acpi_register_gsi().
index 07ca15e..dada47a 100644 (file)
@@ -29,7 +29,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
 
 static inline void ceph_buffer_put(struct ceph_buffer *b)
 {
-       kref_put(&b->kref, ceph_buffer_release);
+       if (b)
+               kref_put(&b->kref, ceph_buffer_release);
 }
 
 extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
index d30209b..0ca0c83 100644 (file)
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
 #ifndef _CODA_HEADER_
 #define _CODA_HEADER_
 
-#if defined(__linux__)
 typedef unsigned long long u_quad_t;
-#endif
+
 #include <uapi/linux/coda.h>
 #endif 
index 5b8721e..fe1466d 100644 (file)
@@ -19,6 +19,17 @@ struct venus_comm {
        struct mutex        vc_mutex;
 };
 
+/* messages between coda filesystem in kernel and Venus */
+struct upc_req {
+       struct list_head        uc_chain;
+       caddr_t                 uc_data;
+       u_short                 uc_flags;
+       u_short                 uc_inSize;  /* Size is at most 5000 bytes */
+       u_short                 uc_outSize;
+       u_short                 uc_opcode;  /* copied from data to save lookup */
+       int                     uc_unique;
+       wait_queue_head_t       uc_sleep;   /* process' wait queue */
+};
 
 static inline struct venus_comm *coda_vcp(struct super_block *sb)
 {
index ed77231..5508011 100644 (file)
@@ -52,6 +52,22 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 
 #ifdef __KERNEL__
 
+/*
+ * Minimal backport of compiler_attributes.h to add support for __copy
+ * to v4.9.y so that we can use it in init/exit_module to avoid
+ * -Werror=missing-attributes errors on GCC 9.
+ */
+#ifndef __has_attribute
+# define __has_attribute(x) __GCC4_has_attribute_##x
+# define __GCC4_has_attribute___copy__                0
+#endif
+
+#if __has_attribute(__copy__)
+# define __copy(symbol)                 __attribute__((__copy__(symbol)))
+#else
+# define __copy(symbol)
+#endif
+
 #ifdef __GNUC__
 #include <linux/compiler-gcc.h>
 #endif
index 9e120c9..d2db1da 100644 (file)
@@ -153,7 +153,11 @@ struct cred {
        struct user_struct *user;       /* real user ID subscription */
        struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
        struct group_info *group_info;  /* supplementary groups for euid/fsgid */
-       struct rcu_head rcu;            /* RCU deletion hook */
+       /* RCU deletion */
+       union {
+               int non_rcu;                    /* Can we skip RCU deletion? */
+               struct rcu_head rcu;            /* RCU deletion hook */
+       };
 };
 
 extern void __put_cred(struct cred *);
@@ -251,6 +255,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
 {
        struct cred *nonconst_cred = (struct cred *) cred;
        validate_creds(cred);
+       nonconst_cred->non_rcu = 0;
        return get_new_cred(nonconst_cred);
 }
 
index 605f973..1046fb4 100644 (file)
@@ -145,7 +145,7 @@ the appropriate macros. */
 /* This needs to be modified manually now, when we add
  a new RANGE of SSIDs to the msg_mask_tbl */
 #define MSG_MASK_TBL_CNT               26
-#define APPS_EVENT_LAST_ID             0xCAA
+#define APPS_EVENT_LAST_ID             0xCB4
 
 #define MSG_SSID_0                     0
 #define MSG_SSID_0_LAST                        130
@@ -911,7 +911,7 @@ static const uint32_t msg_bld_masks_25[] = {
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
        0x0,    /* EQUIP ID 0 */
-       0x1C9A, /* EQUIP ID 1 */
+       0x1CB2, /* EQUIP ID 1 */
        0x0,    /* EQUIP ID 2 */
        0x0,    /* EQUIP ID 3 */
        0x4910, /* EQUIP ID 4 */
index 638b324..92ad08a 100644 (file)
@@ -97,7 +97,7 @@ struct elevator_type
        struct module *elevator_owner;
 
        /* managed by elevator core */
-       char icq_cache_name[ELV_NAME_MAX + 5];  /* elvname + "_io_cq" */
+       char icq_cache_name[ELV_NAME_MAX + 6];  /* elvname + "_io_cq" */
        struct list_head list;
 };
 
index 7493e80..66360a2 100644 (file)
@@ -3141,4 +3141,7 @@ static inline bool dir_relax(struct inode *inode)
 extern bool path_noexec(const struct path *path);
 extern void inode_nohighmem(struct inode *inode);
 
+int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
+                            unsigned int flags);
+
 #endif /* _LINUX_FS_H */
index d12b5d5..11555bd 100644 (file)
@@ -229,30 +229,6 @@ static inline int irq_to_gpio(unsigned irq)
        return -EINVAL;
 }
 
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
-                      unsigned int gpio_offset, unsigned int pin_offset,
-                      unsigned int npins)
-{
-       WARN_ON(1);
-       return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
-                       struct pinctrl_dev *pctldev,
-                       unsigned int gpio_offset, const char *pin_group)
-{
-       WARN_ON(1);
-       return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-       WARN_ON(1);
-}
-
 static inline int devm_gpio_request(struct device *dev, unsigned gpio,
                                    const char *label)
 {
index 5f1e901..d16de62 100644 (file)
@@ -378,7 +378,6 @@ struct hid_global {
 
 struct hid_local {
        unsigned usage[HID_MAX_USAGES]; /* usage array */
-       u8 usage_size[HID_MAX_USAGES]; /* usage size array */
        unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
        unsigned usage_index;
        unsigned usage_minimum;
index 63828a5..e578f38 100644 (file)
@@ -105,6 +105,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
 extern void unregister_pppox_proto(int proto_num);
 extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
 extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32    _IOW(0xB1 ,0, compat_size_t)
 
 /* PPPoX socket states */
 enum {
index 49648aa..2dff3db 100644 (file)
@@ -735,8 +735,8 @@ static inline int mmc_boot_partition_access(struct mmc_host *host)
 
 static inline bool mmc_card_and_host_support_async_int(struct mmc_host *host)
 {
-       return ((host->caps2 & MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) &&
-                       (host->card->cccr.async_intr_sup));
+       return (host->card && (host->caps2 & MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE)
+                       && (host->card->cccr.async_intr_sup));
 }
 
 static inline int mmc_host_uhs(struct mmc_host *host)
index dfe5c2e..d237d05 100644 (file)
@@ -127,13 +127,13 @@ extern void cleanup_module(void);
 #define module_init(initfn)                                    \
        static inline initcall_t __maybe_unused __inittest(void)                \
        { return initfn; }                                      \
-       int init_module(void) __attribute__((alias(#initfn)));
+       int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
 
 /* This is only required if you want to be unloadable. */
 #define module_exit(exitfn)                                    \
        static inline exitcall_t __maybe_unused __exittest(void)                \
        { return exitfn; }                                      \
-       void cleanup_module(void) __attribute__((alias(#exitfn)));
+       void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
 
 #endif
 
diff --git a/include/linux/qcn_sdio_al.h b/include/linux/qcn_sdio_al.h
new file mode 100644 (file)
index 0000000..bee020b
--- /dev/null
@@ -0,0 +1,296 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCN_SDIO_AL_
+#define _QCN_SDIO_AL_
+
+
+/**
+ * ------------------------------------
+ * ------- SDIO AL Interface ----------
+ * ------------------------------------
+ *
+ * This file contains the proposed SDIO AL (Abstraction Layer) interface.
+ * Terminologies:
+ *     SDIO AL : SDIO host function-1 driver
+ *     SDIO AL client: Clients of SDIO host function-1 driver.
+ *                     WLAN, QMI, DIAG etc. are possible clients.
+ *     Remote SDIO client: SDIO client on device side which implements ADMA
+ *                         functionality as function-1
+ */
+
+enum sdio_al_dma_direction {
+       SDIO_AL_TX,
+       SDIO_AL_RX,
+};
+
+/**
+ * struct sdio_al_client_handle - unique handler to identify
+ *                               each SDIO AL (Abstraction Layer) client
+ *
+ * @id: unique id for each client
+ * @block_size: block size
+ * @func: pointer to sdio_func data structure, some clients may need this.
+ * @client_priv: This is client priv that can used by client driver.
+ */
+struct sdio_al_client_handle {
+       int id;
+       struct sdio_al_client_data *client_data;
+       unsigned int block_size;
+       struct sdio_func *func;
+       void *client_priv;
+};
+
+/**
+ * struct sdio_al_xfer_result - Completed buffer information
+ *
+ * @buf_addr: Address of data buffer
+ *
+ * @xfer_len: Transfer data length in bytes
+ *
+ * @xfer_status: status of transfer, 0 if successful,
+ *                     negative in case of error
+ */
+struct sdio_al_xfer_result {
+       void *buf_addr;
+       size_t xfer_len;
+       int xfer_status;
+};
+
+enum sdio_al_lpm_event {
+       LPM_ENTER, /* SDIO client will be put to LPM mode soon */
+       LPM_EXIT,  /* SDIO client has exited LPM mode */
+};
+
+/**
+ * sdio_al_client_data - client data of sdio_al
+ *
+ * @name: client name, could be one of the following:
+ *                  "SDIO_AL_CLIENT_WLAN",
+ *                  "SDIO_AL_CLIENT_QMI",
+ *                  "SDIO_AL_CLIENT_DIAG",
+ *                  "SDIO_AL_CLIENT_TTY"
+ *
+ * @probe: This probe function is called by SDIO AL driver when it is ready for
+ *        SDIO traffic. SDIO AL client must wait for this callback before
+ *        initiating any transfer over SDIO transport layer.
+ *
+ * @remove: This remove function is called by SDIO AL driver when it isn't ready
+ *         for SDIO traffic. SDIO AL client must stop issuing any transfers
+ *         after getting this callback, ongoing transfers would be errored out
+ *         by SDIO AL.
+ *
+ * @lpm_notify_cb: callback to notify SDIO AL clients about Low Power modes.
+ *
+ */
+struct sdio_al_client_data {
+       const char *name;
+
+       int id;
+
+       int mode;
+
+       int (*probe)(struct sdio_al_client_handle *);
+
+       int (*remove)(struct sdio_al_client_handle *);
+
+       void (*lpm_notify_cb)(struct sdio_al_client_handle *,
+                       enum sdio_al_lpm_event event);
+};
+
+/**
+ * sdio_al_channel_handle - channel handle of sdio_al
+ *
+ * @id: Channel id unique at the AL layer
+ *
+ * @client_data: Client to which this channel belongs
+ *
+ */
+struct sdio_al_channel_handle {
+       unsigned int channel_id;
+
+       struct sdio_al_channel_data *channel_data;
+       void *priv;
+};
+
+/**
+ * sdio_al_channel_data - channel data of sdio_al
+ *
+ * @name: channel name, could be one of the following:
+ *                  "SDIO_AL_WLAN_CH0",
+ *                  "SDIO_AL_WLAN_CH1",
+ *                  "SDIO_AL_QMI_CH0",
+ *                  "SDIO_AL_DIAG_CH0",
+ *                  "SDIO_AL_TTY_CH0"
+ *
+ * @client_data: The client driver by which this channel is being claimed
+ *
+ * @ul_xfer_cb: UL/TX data transfer callback.
+ *             SDIO AL client can queue request using sdio_al_queue_transfer()
+ *             asynchronous API, once request is transported over SDIO
+ *             transport, SDIO AL calls "ul_xfer_cb" to notify the transfer
+ complete.
+ *
+ * @dl_xfer_cb: DL/RX data transfer callback
+ *             Once SDIO AL receives requested data from remote SDIO client
+ *             then SDIO AL invokes "dl_xfer_cb" callback to notify the SDIO
+ *             AL client.
+ *
+ * @dl_data_avail_cb: callback to notify SDIO AL client that it can read
+ *             specified bytes of data from remote SDIO client, SDIO AL client
+ *             is then expected call sdio_al_queue_transfer() to read the data.
+ *             This is optional and if client doesn't provide this callback
+ *             then SDIO AL would allocate the buffer and SDIO AL
+ *             client would have to memcpy the buffer in dl_xfer_cb().
+ *
+ */
+struct sdio_al_channel_data {
+       const char *name;
+
+       struct sdio_al_client_data *client_data;
+
+       void (*ul_xfer_cb)(struct sdio_al_channel_handle *,
+                       struct sdio_al_xfer_result *, void *ctxt);
+
+       void (*dl_xfer_cb)(struct sdio_al_channel_handle *,
+                       struct sdio_al_xfer_result *, void *ctxt);
+
+       void (*dl_data_avail_cb)(struct sdio_al_channel_handle *,
+                       unsigned int len);
+
+       void (*dl_meta_data_cb)(struct sdio_al_channel_handle *,
+                       unsigned int data);
+};
+
+/**
+ * sdio_al_is_ready - API Check to know whether the al driver is ready
+ * This API can be used to deffer the probe incase of early execution.
+ *
+ * @return zero on success and negative value on error.
+ *
+ */
+int sdio_al_is_ready(void);
+
+/**
+ * sdio_al_register_client - register as client of sdio AL (function-1 driver)
+ *  SDIO AL driver would allocate the unique instance of
+ *  "struct sdio_al_client_handle" and returns to client.
+ *
+ * @client_data: pointer to SDIO AL client data (struct sdio_al_client_data)
+ *
+ * @return valid sdio_al_client_handler ptr on success, negative value on error.
+ *
+ */
+struct sdio_al_client_handle *sdio_al_register_client(
+               struct sdio_al_client_data *client_data);
+
+/**
+ * sdio_al_deregister_client - deregisters client from SDIO AL
+ * (function-1 driver)
+ *
+ * @handle: sdio_al client handler
+ *
+ */
+void sdio_al_deregister_client(struct sdio_al_client_handle *handle);
+
+/**
+ * sdio_al_register_channel - register a channel for a client of SDIO AL
+ * SDIO AL driver would allocate a unique instance of the "struct
+ * sdio_al_channel_handle" and returns to the client.
+ *
+ * @client_handle: The client to which the channel shall belong
+ *
+ * @channel_data: The channel data which contains the details of the channel
+ *
+ * @return valid channel handle in success error on success, error pointer on
+ * failure
+ */
+struct sdio_al_channel_handle *sdio_al_register_channel(
+               struct sdio_al_client_handle *client_handle,
+               struct sdio_al_channel_data *client_data);
+
+/**
+ * sdio_al_deregister_channel - deregister a channel for a client of SDIO AL
+ *
+ * @ch_handle: The channel handle which needs to deregistered
+ *
+ * @return none
+ */
+void sdio_al_deregister_channel(struct sdio_al_channel_handle *ch_handle);
+
+
+/**
+ * sdio_al_queue_transfer_async - Queue asynchronous data transfer request
+ * All transfers are asynchronous transfers, SDIO AL will call
+ * ul_xfer_cb or dl_xfer_cb callback to nofity completion to SDIO AL client.
+ *
+ * @ch_handle: sdio_al channel handle
+ *
+ * @dir: Data direction (DMA_TO_DEVICE for TX, DMA_FROM_DEVICE for RX)
+ *
+ * @buf: Data buffer
+ *
+ * @len: Size in bytes
+ *
+ * @priority: set any non-zero value for higher priority, 0 for normal priority
+ *           All SDIO AL clients except WLAN client is expected to use normal
+ *           priority.
+ *
+ * @return 0 on success, non-zero in case of error
+ */
+int sdio_al_queue_transfer_async(struct sdio_al_channel_handle *handle,
+               enum sdio_al_dma_direction dir,
+               void *buf, size_t len, int priority, void *ctxt);
+
+/**
+ * sdio_al_queue_transfer - Queue synchronous data transfer request
+ * In constrast to asynchronous transfer API sdio_al_queue_transfer(), this
+ * API will completely the request synchronously. If there is no outstanding
+ * request at SDIO AL Layer, request will be immediately initiated on SDIO bus.
+ *
+ * @ch_handle: sdio_al channel handle
+ *
+ * @dir: Data direction (DMA_TO_DEVICE for TX, DMA_FROM_DEVICE for RX)
+ *
+ * @buf: Data buffer
+ *
+ * @len: Size in bytes
+ *
+ * @priority: set any non-zero value for higher priority, 0 for normal priority
+ *           All SDIO AL clients except WLAN client is expected to use normal
+ *           priority.
+ *
+ * @return 0 on success, non-zero in case of error
+ */
+int sdio_al_queue_transfer(struct sdio_al_channel_handle *ch_handle,
+               enum sdio_al_dma_direction dir,
+               void *buf, size_t len, int priority);
+
+
+/**
+ * sdio_al_meta_transfer - Queue synchronous data transfer request
+ * In constrast to asynchronous transfer API sdio_al_queue_transfer(), this
+ * API will completely the request synchronously. If there is no outstanding
+ * request at SDIO AL Layer, request will be immediately initiated on SDIO bus.
+ *
+ * @ch_handle: sdio_al channel handle
+ *
+ * @data: Meta data to be transferred
+ *
+ * @return 0 on success, non-zero in case of error
+ */
+int sdio_al_meta_transfer(struct sdio_al_channel_handle *ch_handle,
+               unsigned int data, unsigned int trans);
+
+extern void qcn_sdio_client_probe_complete(int id);
+int qcn_sdio_card_state(bool enable);
+#endif /* _QCN_SDIO_AL_ */
index e7e2a53..d80320e 100644 (file)
@@ -210,4 +210,6 @@ static inline int apr_end_rx_rt(void *handle)
 int apr_start_rx_rt(void *handle);
 int apr_end_rx_rt(void *handle);
 #endif
+int apr_dummy_init(void);
+void apr_dummy_exit(void);
 #endif
index eeea0eb..5bd0923 100644 (file)
@@ -22,7 +22,7 @@
 #define RTAC_CVS               1
 #define RTAC_VOICE_MODES       2
 
-#define RTAC_MAX_ACTIVE_DEVICES                4
+#define RTAC_MAX_ACTIVE_DEVICES                6
 #define RTAC_MAX_ACTIVE_POPP           8
 
 #define DEFAULT_APP_TYPE       0x00011130
index addd036..0a93e9d 100644 (file)
@@ -852,7 +852,7 @@ static inline void rcu_preempt_sleep_check(void)
  * read-side critical sections may be preempted and they may also block, but
  * only when acquiring spinlocks that are subject to priority inheritance.
  */
-static inline void rcu_read_lock(void)
+static __always_inline void rcu_read_lock(void)
 {
        __rcu_read_lock();
        __acquire(RCU);
index 2378cbf..a05e1e8 100644 (file)
@@ -2149,7 +2149,7 @@ extern int arch_task_struct_size __read_mostly;
 extern void task_numa_fault(int last_node, int node, int pages, int flags);
 extern pid_t task_numa_group_id(struct task_struct *p);
 extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
                                        int src_nid, int dst_cpu);
 #else
@@ -2164,7 +2164,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
 static inline void set_numabalancing_state(bool enabled)
 {
 }
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
 {
 }
 static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644 (file)
index 0000000..bf21591
--- /dev/null
@@ -0,0 +1,145 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+       u64 key[2];
+} siphash_key_t;
+
+static inline bool siphash_key_is_zero(const siphash_key_t *key)
+{
+       return !(key->key[0] | key->key[1]);
+}
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
+#endif
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+                const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+                const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+                const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+                              const siphash_key_t *key)
+{
+       return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+                              const u32 d, const siphash_key_t *key)
+{
+       return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+                                    const siphash_key_t *key)
+{
+       if (__builtin_constant_p(len) && len == 4)
+               return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+       if (__builtin_constant_p(len) && len == 8)
+               return siphash_1u64(le64_to_cpu(data[0]), key);
+       if (__builtin_constant_p(len) && len == 16)
+               return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   key);
+       if (__builtin_constant_p(len) && len == 24)
+               return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   le64_to_cpu(data[2]), key);
+       if (__builtin_constant_p(len) && len == 32)
+               return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+                                   key);
+       return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+                         const siphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+               return __siphash_unaligned(data, len, key);
+#endif
+       return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+       unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+                      const hsiphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key);
+#endif
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+                 const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+                 const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+                                     const hsiphash_key_t *key)
+{
+       if (__builtin_constant_p(len) && len == 4)
+               return hsiphash_1u32(le32_to_cpu(data[0]), key);
+       if (__builtin_constant_p(len) && len == 8)
+               return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    key);
+       if (__builtin_constant_p(len) && len == 12)
+               return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    le32_to_cpu(data[2]), key);
+       if (__builtin_constant_p(len) && len == 16)
+               return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+                                    key);
+       return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+                          const hsiphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+               return __hsiphash_unaligned(data, len, key);
+#endif
+       return ___hsiphash_aligned(data, len, key);
+}
+
+#endif /* _LINUX_SIPHASH_H */
index e57b65f..5ed1b05 100644 (file)
@@ -221,6 +221,11 @@ extern int cnss_self_recovery(struct device *dev,
                              enum cnss_recovery_reason reason);
 extern int cnss_force_fw_assert(struct device *dev);
 extern int cnss_force_collect_rddm(struct device *dev);
+extern int cnss_qmi_send_get(struct device *dev);
+extern int cnss_qmi_send_put(struct device *dev);
+extern int cnss_qmi_send(struct device *dev, int type, void *cmd,
+                        int cmd_len, void *cb_ctx,
+                        int (*cb)(void *ctx, void *event, int event_len));
 extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
 extern int cnss_get_fw_files_for_target(struct device *dev,
                                        struct cnss_fw_files *pfw_files,
index 2d8edaa..da7ffc0 100644 (file)
@@ -35,6 +35,7 @@ enum {
        ND_OPT_ROUTE_INFO = 24,         /* RFC4191 */
        ND_OPT_RDNSS = 25,              /* RFC5006 */
        ND_OPT_DNSSL = 31,              /* RFC6106 */
+       ND_OPT_CAPTIVE_PORTAL = 37,     /* RFC7710 */
        __ND_OPT_MAX
 };
 
index fde4068..636e9e1 100644 (file)
@@ -297,6 +297,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 gfp_t flags);
 void nf_ct_tmpl_free(struct nf_conn *tmpl);
 
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
 #define NF_CT_STAT_INC(net, count)       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
 
index 61c38f8..e6f49f2 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/uidgid.h>
 #include <net/inet_frag.h>
 #include <linux/rcupdate.h>
+#include <linux/siphash.h>
 
 struct tcpm_hash_bucket;
 struct ctl_table_header;
@@ -109,5 +110,6 @@ struct netns_ipv4 {
 #endif
 #endif
        atomic_t        rt_genid;
+       siphash_key_t   ip_id_key;
 };
 #endif
index b9b5bc2..e06a10c 100644 (file)
@@ -1461,6 +1461,11 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
 void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
+static inline void tcp_init_send_head(struct sock *sk)
+{
+       sk->sk_send_head = NULL;
+}
+
 /* Fastopen key context */
 struct tcp_fastopen_context {
        struct crypto_cipher    *tfm;
@@ -1477,6 +1482,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
                sk_wmem_free_skb(sk, skb);
        sk_mem_reclaim(sk);
        tcp_clear_all_retrans_hints(tcp_sk(sk));
+       tcp_init_send_head(sk);
        inet_csk(sk)->icsk_backoff = 0;
 }
 
@@ -1538,9 +1544,25 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
                tcp_sk(sk)->highest_sack = NULL;
 }
 
-static inline void tcp_init_send_head(struct sock *sk)
+static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
 {
-       sk->sk_send_head = NULL;
+       struct sk_buff *skb = tcp_write_queue_head(sk);
+
+       if (skb == tcp_send_head(sk))
+               skb = NULL;
+
+       return skb;
+}
+
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+       struct sk_buff *skb = tcp_send_head(sk);
+
+       /* empty retransmit queue, for example due to zero window */
+       if (skb == tcp_write_queue_head(sk))
+               return NULL;
+
+       return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
 }
 
 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
index 9e1325e..c5c03fa 100644 (file)
@@ -1304,6 +1304,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
        return atomic_read(&x->tunnel_users);
 }
 
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+       switch (proto) {
+       case IPPROTO_AH:
+       case IPPROTO_ESP:
+       case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+       case IPPROTO_ROUTING:
+       case IPPROTO_DSTOPTS:
+#endif
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
 {
        return (!userproto || proto == userproto ||
index de7e3ee..e591802 100644 (file)
@@ -236,6 +236,7 @@ struct fcoe_fcf {
  * @vn_mac:    VN_Node assigned MAC address for data
  */
 struct fcoe_rport {
+       struct fc_rport_priv rdata;
        unsigned long time;
        u16 fcoe_len;
        u16 flags;
index f4241ca..5bb9261 100644 (file)
@@ -184,10 +184,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
        if (snd_BUG_ON(!stream))
                return;
 
-       if (stream->direction == SND_COMPRESS_PLAYBACK)
-               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-       else
-               stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 
        wake_up(&stream->runtime->sleep);
 }
index e84cbb1..b6850fe 100644 (file)
@@ -1016,8 +1016,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
        ),
 
        TP_fast_assign(
-               __entry->dev            = page->mapping->host->i_sb->s_dev;
-               __entry->ino            = page->mapping->host->i_ino;
+               __entry->dev            = page_file_mapping(page)->host->i_sb->s_dev;
+               __entry->ino            = page_file_mapping(page)->host->i_ino;
                __entry->index          = page->index;
                __entry->old_blkaddr    = fio->old_blkaddr;
                __entry->new_blkaddr    = fio->new_blkaddr;
@@ -1204,10 +1204,11 @@ DECLARE_EVENT_CLASS(f2fs__page,
        ),
 
        TP_fast_assign(
-               __entry->dev    = page->mapping->host->i_sb->s_dev;
-               __entry->ino    = page->mapping->host->i_ino;
+               __entry->dev    = page_file_mapping(page)->host->i_sb->s_dev;
+               __entry->ino    = page_file_mapping(page)->host->i_ino;
                __entry->type   = type;
-               __entry->dir    = S_ISDIR(page->mapping->host->i_mode);
+               __entry->dir    =
+                       S_ISDIR(page_file_mapping(page)->host->i_mode);
                __entry->index  = page->index;
                __entry->dirty  = PageDirty(page);
                __entry->uptodate = PageUptodate(page);
diff --git a/include/trace/events/namei.h b/include/trace/events/namei.h
new file mode 100644 (file)
index 0000000..e8c3e21
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM namei
+
+#if !defined(_TRACE_INODEPATH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INODEPATH_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+TRACE_EVENT(inodepath,
+               TP_PROTO(struct inode *inode, char *path),
+
+               TP_ARGS(inode, path),
+
+               TP_STRUCT__entry(
+                       /* dev_t and ino_t are arch dependent bit width
+                        * so just use 64-bit
+                        */
+                       __field(unsigned long, ino)
+                       __field(unsigned long, dev)
+                       __string(path, path)
+               ),
+
+               TP_fast_assign(
+                       __entry->ino = inode->i_ino;
+                       __entry->dev = inode->i_sb->s_dev;
+                       __assign_str(path, path);
+               ),
+
+               TP_printk("dev %d:%d ino=%lu path=%s",
+                       MAJOR(__entry->dev), MINOR(__entry->dev),
+                       __entry->ino, __get_str(path))
+);
+#endif /* _TRACE_INODEPATH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 79d0598..e2c44d2 100644 (file)
@@ -6,19 +6,6 @@
 #define CODA_PSDEV_MAJOR 67
 #define MAX_CODADEVS  5           /* how many do we allow */
 
-
-/* messages between coda filesystem in kernel and Venus */
-struct upc_req {
-       struct list_head    uc_chain;
-       caddr_t             uc_data;
-       u_short             uc_flags;
-       u_short             uc_inSize;  /* Size is at most 5000 bytes */
-       u_short             uc_outSize;
-       u_short             uc_opcode;  /* copied from data to save lookup */
-       int                 uc_unique;
-       wait_queue_head_t   uc_sleep;   /* process' wait queue */
-};
-
 #define CODA_REQ_ASYNC  0x1
 #define CODA_REQ_READ   0x2
 #define CODA_REQ_WRITE  0x4
index d122ea5..c66b523 100644 (file)
@@ -239,6 +239,7 @@ struct fscrypt_key {
 #define FS_NOCOMP_FL                   0x00000400 /* Don't compress */
 #define FS_ECOMPR_FL                   0x00000800 /* Compression error */
 /* End compression flags --- maybe not all used */
+#define FS_ENCRYPT_FL                  0x00000800 /* Encrypted file */
 #define FS_BTREE_FL                    0x00001000 /* btree format dir */
 #define FS_INDEX_FL                    0x00001000 /* hash-indexed directory */
 #define FS_IMAGIC_FL                   0x00002000 /* AFS directory */
@@ -249,6 +250,7 @@ struct fscrypt_key {
 #define FS_EXTENT_FL                   0x00080000 /* Extents */
 #define FS_DIRECTIO_FL                 0x00100000 /* Use direct i/o */
 #define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
+#define FS_INLINE_DATA_FL              0x10000000 /* Reserved for ext4 */
 #define FS_PROJINHERIT_FL              0x20000000 /* Create with parents projid */
 #define FS_RESERVED_FL                 0x80000000 /* reserved for ext2 lib */
 
index b58635f..ae1e1fb 100644 (file)
@@ -15,6 +15,7 @@
 #define CAPI_MSG_BASELEN               8
 #define CAPI_DATA_B3_REQ_LEN           (CAPI_MSG_BASELEN+4+4+2+2+2)
 #define CAPI_DATA_B3_RESP_LEN          (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN    (CAPI_MSG_BASELEN+4)
 
 /*----- CAPI commands -----*/
 #define CAPI_ALERT                 0x01
index a73b645..2a0e455 100644 (file)
@@ -1159,6 +1159,7 @@ config FAIR_GROUP_SCHED
 config CFS_BANDWIDTH
        bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
        depends on FAIR_GROUP_SCHED
+       depends on !SCHED_WALT
        default n
        help
          This option allows users to define CPU bandwidth rates (limits) for
index 598b8bf..a94880d 100644 (file)
@@ -371,7 +371,6 @@ static void mqueue_evict_inode(struct inode *inode)
 {
        struct mqueue_inode_info *info;
        struct user_struct *user;
-       unsigned long mq_bytes, mq_treesize;
        struct ipc_namespace *ipc_ns;
        struct msg_msg *msg, *nmsg;
        LIST_HEAD(tmp_msg);
@@ -394,16 +393,18 @@ static void mqueue_evict_inode(struct inode *inode)
                free_msg(msg);
        }
 
-       /* Total amount of bytes accounted for the mqueue */
-       mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
-               min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
-               sizeof(struct posix_msg_tree_node);
-
-       mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
-                                 info->attr.mq_msgsize);
-
        user = info->user;
        if (user) {
+               unsigned long mq_bytes, mq_treesize;
+
+               /* Total amount of bytes accounted for the mqueue */
+               mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+                       min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+                       sizeof(struct posix_msg_tree_node);
+
+               mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                         info->attr.mq_msgsize);
+
                spin_lock(&mq_lock);
                user->mq_bytes -= mq_bytes;
                /*
index 1327258..677991f 100644 (file)
@@ -1,4 +1,5 @@
 obj-y := core.o
+CFLAGS_core.o += $(call cc-disable-warning, override-init)
 
 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
 obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o
index 098af0b..a2e06b0 100644 (file)
@@ -146,7 +146,10 @@ void __put_cred(struct cred *cred)
        BUG_ON(cred == current->cred);
        BUG_ON(cred == current->real_cred);
 
-       call_rcu(&cred->rcu, put_cred_rcu);
+       if (cred->non_rcu)
+               put_cred_rcu(&cred->rcu);
+       else
+               call_rcu(&cred->rcu, put_cred_rcu);
 }
 EXPORT_SYMBOL(__put_cred);
 
@@ -257,6 +260,7 @@ struct cred *prepare_creds(void)
        old = task->cred;
        memcpy(new, old, sizeof(struct cred));
 
+       new->non_rcu = 0;
        atomic_set(&new->usage, 1);
        set_cred_subscribers(new, 0);
        get_group_info(new->group_info);
@@ -536,7 +540,19 @@ const struct cred *override_creds(const struct cred *new)
 
        validate_creds(old);
        validate_creds(new);
-       get_cred(new);
+
+       /*
+        * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
+        *
+        * That means that we do not clear the 'non_rcu' flag, since
+        * we are only installing the cred into the thread-synchronous
+        * '->cred' pointer, not the '->real_cred' pointer that is
+        * visible to other threads under RCU.
+        *
+        * Also note that we did validate_creds() manually, not depending
+        * on the validation in 'get_cred()'.
+        */
+       get_new_cred((struct cred *)new);
        alter_cred_subscribers(new, 1);
        rcu_assign_pointer(current->cred, new);
        alter_cred_subscribers(old, -1);
@@ -619,6 +635,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        validate_creds(old);
 
        *new = *old;
+       new->non_rcu = 0;
        atomic_set(&new->usage, 1);
        set_cred_subscribers(new, 0);
        get_uid(new->user);
index bc33e20..1d065a8 100644 (file)
@@ -8986,7 +8986,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
                goto err_free;
        }
 
-       perf_install_in_context(ctx, event, cpu);
+       perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
        mutex_unlock(&ctx->mutex);
 
index e8bc7ed..b389960 100644 (file)
@@ -261,7 +261,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(tsk == current);
 
        cgroup_free(tsk);
-       task_numa_free(tsk);
+       task_numa_free(tsk, true);
        security_task_free(tsk);
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
index b86886b..867fb0e 100644 (file)
@@ -37,6 +37,8 @@ static void resend_irqs(unsigned long arg)
                irq = find_first_bit(irqs_resend, nr_irqs);
                clear_bit(irq, irqs_resend);
                desc = irq_to_desc(irq);
+               if (!desc)
+                       continue;
                local_irq_disable();
                desc->handle_irq(desc);
                local_irq_enable();
index 774ab79..f2df5f8 100644 (file)
@@ -3128,17 +3128,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (depth) {
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
-                       if (hlock->references) {
-                               /*
-                                * Check: unsigned int references:12, overflow.
-                                */
-                               if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
-                                       return 0;
+                       if (!references)
+                               references++;
 
+                       if (!hlock->references)
                                hlock->references++;
-                       } else {
-                               hlock->references = 2;
-                       }
+
+                       hlock->references += references;
+
+                       /* Overflow */
+                       if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
+                               return 0;
 
                        return 1;
                }
index dbb61a3..35b34ec 100644 (file)
@@ -217,7 +217,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
 
 static int lockdep_stats_show(struct seq_file *m, void *v)
 {
-       struct lock_class *class;
        unsigned long nr_unused = 0, nr_uncategorized = 0,
                      nr_irq_safe = 0, nr_irq_unsafe = 0,
                      nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -227,6 +226,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
                      nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
                      sum_forward_deps = 0;
 
+#ifdef CONFIG_PROVE_LOCKING
+       struct lock_class *class;
+
        list_for_each_entry(class, &all_lock_classes, lock_entry) {
 
                if (class->usage_mask == 0)
@@ -258,13 +260,13 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
                if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
                        nr_hardirq_read_unsafe++;
 
-#ifdef CONFIG_PROVE_LOCKING
                sum_forward_deps += lockdep_count_forward_deps(class);
-#endif
        }
 #ifdef CONFIG_DEBUG_LOCKDEP
        DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
 #endif
+
+#endif
        seq_printf(m, " lock-classes:                  %11lu [max: %lu]\n",
                        nr_lock_classes, MAX_LOCKDEP_KEYS);
        seq_printf(m, " direct dependencies:           %11lu [max: %lu]\n",
index 57809d2..ad49282 100644 (file)
@@ -3231,8 +3231,7 @@ static bool finished_loading(const char *name)
        sched_annotate_sleep();
        mutex_lock(&module_mutex);
        mod = find_module_all(name, strlen(name), true);
-       ret = !mod || mod->state == MODULE_STATE_LIVE
-               || mod->state == MODULE_STATE_GOING;
+       ret = !mod || mod->state == MODULE_STATE_LIVE;
        mutex_unlock(&module_mutex);
 
        return ret;
@@ -3391,8 +3390,7 @@ again:
        mutex_lock(&module_mutex);
        old = find_module_all(mod->name, strlen(mod->name), true);
        if (old != NULL) {
-               if (old->state == MODULE_STATE_COMING
-                   || old->state == MODULE_STATE_UNFORMED) {
+               if (old->state != MODULE_STATE_LIVE) {
                        /* Wait in case it fails to load. */
                        mutex_unlock(&module_mutex);
                        err = wait_event_interruptible(module_wq,
index ecc7b3f..282b489 100644 (file)
@@ -273,7 +273,12 @@ static void padata_reorder(struct parallel_data *pd)
         * The next object that needs serialization might have arrived to
         * the reorder queues in the meantime, we will be called again
         * from the timer function if no one else cares for it.
+        *
+        * Ensure reorder_objects is read after pd->lock is dropped so we see
+        * an increment from another task in padata_do_serial.  Pairs with
+        * smp_mb__after_atomic in padata_do_serial.
         */
+       smp_mb();
        if (atomic_read(&pd->reorder_objects)
                        && !(pinst->flags & PADATA_RESET))
                mod_timer(&pd->timer, jiffies + HZ);
@@ -342,6 +347,13 @@ void padata_do_serial(struct padata_priv *padata)
        list_add_tail(&padata->list, &pqueue->reorder.list);
        spin_unlock(&pqueue->reorder.lock);
 
+       /*
+        * Ensure the atomic_inc of reorder_objects above is ordered correctly
+        * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
+        * in padata_reorder.
+        */
+       smp_mb__after_atomic();
+
        put_cpu();
 
        padata_reorder(pd);
index 567ecc8..6353372 100644 (file)
@@ -325,7 +325,7 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
        }
 
        read_lock(&tasklist_lock);
-       force_sig(SIGKILL, pid_ns->child_reaper);
+       send_sig(SIGKILL, pid_ns->child_reaper, 1);
        read_unlock(&tasklist_lock);
 
        do_exit(0);
index 1eac064..e497ef0 100644 (file)
@@ -2233,13 +2233,23 @@ no_join:
        return;
 }
 
-void task_numa_free(struct task_struct *p)
+/*
+ * Get rid of NUMA staticstics associated with a task (either current or dead).
+ * If @final is set, the task is dead and has reached refcount zero, so we can
+ * safely free all relevant data structures. Otherwise, there might be
+ * concurrent reads from places like load balancing and procfs, and we should
+ * reset the data back to default state without freeing ->numa_faults.
+ */
+void task_numa_free(struct task_struct *p, bool final)
 {
        struct numa_group *grp = p->numa_group;
-       void *numa_faults = p->numa_faults;
+       unsigned long *numa_faults = p->numa_faults;
        unsigned long flags;
        int i;
 
+       if (!numa_faults)
+               return;
+
        if (grp) {
                spin_lock_irqsave(&grp->lock, flags);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2252,8 +2262,14 @@ void task_numa_free(struct task_struct *p)
                put_numa_group(grp);
        }
 
-       p->numa_faults = NULL;
-       kfree(numa_faults);
+       if (final) {
+               p->numa_faults = NULL;
+               kfree(numa_faults);
+       } else {
+               p->total_numa_faults = 0;
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       numa_faults[i] = 0;
+       }
 }
 
 /*
index 0e0dc5d..bbe767b 100644 (file)
@@ -39,6 +39,7 @@ static u64                    tick_length_base;
 #define MAX_TICKADJ            500LL           /* usecs */
 #define MAX_TICKADJ_SCALED \
        (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+#define MAX_TAI_OFFSET         100000
 
 /*
  * phase-lock loop variables
@@ -633,7 +634,8 @@ static inline void process_adjtimex_modes(struct timex *txc,
                time_constant = max(time_constant, 0l);
        }
 
-       if (txc->modes & ADJ_TAI && txc->constant >= 0)
+       if (txc->modes & ADJ_TAI &&
+                       txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
                *time_tai = txc->constant;
 
        if (txc->modes & ADJ_OFFSET)
index d38f8dd..7b67a9f 100644 (file)
@@ -289,23 +289,6 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
        SEQ_printf(m, "\n");
 }
 
-static int timer_list_show(struct seq_file *m, void *v)
-{
-       struct timer_list_iter *iter = v;
-
-       if (iter->cpu == -1 && !iter->second_pass)
-               timer_list_header(m, iter->now);
-       else if (!iter->second_pass)
-               print_cpu(m, iter->cpu, iter->now);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
-       else if (iter->cpu == -1 && iter->second_pass)
-               timer_list_show_tickdevices_header(m);
-       else
-               print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
-#endif
-       return 0;
-}
-
 void sysrq_timer_list_show(void)
 {
        u64 now = ktime_to_ns(ktime_get());
@@ -324,6 +307,24 @@ void sysrq_timer_list_show(void)
        return;
 }
 
+#ifdef CONFIG_PROC_FS
+static int timer_list_show(struct seq_file *m, void *v)
+{
+       struct timer_list_iter *iter = v;
+
+       if (iter->cpu == -1 && !iter->second_pass)
+               timer_list_header(m, iter->now);
+       else if (!iter->second_pass)
+               print_cpu(m, iter->cpu, iter->now);
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+       else if (iter->cpu == -1 && iter->second_pass)
+               timer_list_show_tickdevices_header(m);
+       else
+               print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
+#endif
+       return 0;
+}
+
 static void *move_iter(struct timer_list_iter *iter, loff_t offset)
 {
        for (; offset; offset--) {
@@ -395,3 +396,4 @@ static int __init init_timer_list_procfs(void)
        return 0;
 }
 __initcall(init_timer_list_procfs);
+#endif
index 6368792..12f8bf9 100644 (file)
@@ -5567,11 +5567,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                        break;
                }
 #endif
-               if (!tr->allocated_snapshot) {
+               if (!tr->allocated_snapshot)
+                       ret = resize_buffer_duplicate_size(&tr->max_buffer,
+                               &tr->trace_buffer, iter->cpu_file);
+               else
                        ret = alloc_snapshot(tr);
-                       if (ret < 0)
-                               break;
-               }
+
+               if (ret < 0)
+                       break;
+
                local_irq_disable();
                /* Now, we're going to swap */
                if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
index 17fcf03..30f5182 100644 (file)
@@ -1845,6 +1845,16 @@ config TEST_RHASHTABLE
 
          If unsure, say N.
 
+config TEST_HASH
+       tristate "Perform selftest on hash functions"
+       default n
+       help
+         Enable this option to test the kernel's siphash (<linux/siphash.h>)
+         hash functions on boot (or module load).
+
+         This is intended to help people writing architecture-specific
+         optimized versions.  If unsure, say N.
+
 endmenu # runtime tests
 
 config PROVIDE_OHCI1394_DMA_INIT
index 2c34635..1c4947c 100644 (file)
@@ -25,7 +25,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         sha1.o md5.o irq_regs.o argv_split.o \
         proportions.o flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o seq_buf.o nmi_backtrace.o
+        earlycpio.o seq_buf.o siphash.o nmi_backtrace.o
 
 obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
 lib-$(CONFIG_MMU) += ioremap.o
@@ -49,6 +49,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_HASH) += test_siphash.o
 obj-$(CONFIG_TEST_KASAN) += test_kasan.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 obj-$(CONFIG_TEST_LKM) += test_module.o
index 0ec3f25..a5d3133 100644 (file)
        BUG_ON(pad < 0 || pad >= nn);
 
        /* Does the caller provide the syndrome ? */
-       if (s != NULL)
-               goto decode;
+       if (s != NULL) {
+               for (i = 0; i < nroots; i++) {
+                       /* The syndrome is in index form,
+                        * so nn represents zero
+                        */
+                       if (s[i] != nn)
+                               goto decode;
+               }
+
+               /* syndrome is zero, no errors to correct  */
+               return 0;
+       }
 
        /* form the syndromes; i.e., evaluate data(x) at roots of
         * g(x) */
        if (no_eras > 0) {
                /* Init lambda to be the erasure locator polynomial */
                lambda[1] = alpha_to[rs_modnn(rs,
-                                             prim * (nn - 1 - eras_pos[0]))];
+                                       prim * (nn - 1 - (eras_pos[0] + pad)))];
                for (i = 1; i < no_eras; i++) {
-                       u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+                       u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
                        for (j = i + 1; j > 0; j--) {
                                tmp = index_of[lambda[j - 1]];
                                if (tmp != nn) {
index bafa993..0b86b79 100644 (file)
@@ -496,17 +496,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
 {
        if (!miter->__remaining) {
                struct scatterlist *sg;
-               unsigned long pgoffset;
 
                if (!__sg_page_iter_next(&miter->piter))
                        return false;
 
                sg = miter->piter.sg;
-               pgoffset = miter->piter.sg_pgoffset;
 
-               miter->__offset = pgoffset ? 0 : sg->offset;
+               miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+               miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+               miter->__offset &= PAGE_SIZE - 1;
                miter->__remaining = sg->offset + sg->length -
-                               (pgoffset << PAGE_SHIFT) - miter->__offset;
+                                    (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+                                    miter->__offset;
                miter->__remaining = min_t(unsigned long, miter->__remaining,
                                           PAGE_SIZE - miter->__offset);
        }
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644 (file)
index 0000000..3ae58b4
--- /dev/null
@@ -0,0 +1,551 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define SIPROUND \
+       do { \
+       v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
+       v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
+       v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
+       v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
+       } while (0)
+
+#define PREAMBLE(len) \
+       u64 v0 = 0x736f6d6570736575ULL; \
+       u64 v1 = 0x646f72616e646f6dULL; \
+       u64 v2 = 0x6c7967656e657261ULL; \
+       u64 v3 = 0x7465646279746573ULL; \
+       u64 b = ((u64)(len)) << 56; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+       v1 ^= key->key[1]; \
+       v0 ^= key->key[0];
+
+#define POSTAMBLE \
+       v3 ^= b; \
+       SIPROUND; \
+       SIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       SIPROUND; \
+       SIPROUND; \
+       SIPROUND; \
+       SIPROUND; \
+       return (v0 ^ v1) ^ (v2 ^ v3);
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       PREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = le64_to_cpup(data);
+               v3 ^= m;
+               SIPROUND;
+               SIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= le32_to_cpup(data); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+#endif
+       POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       PREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = get_unaligned_le64(data);
+               v3 ^= m;
+               SIPROUND;
+               SIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= get_unaligned_le32(end); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+#endif
+       POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+#endif
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+       PREAMBLE(8)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+       PREAMBLE(16)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+                const siphash_key_t *key)
+{
+       PREAMBLE(24)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= third;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+                const u64 forth, const siphash_key_t *key)
+{
+       PREAMBLE(32)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= third;
+       v3 ^= forth;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= forth;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+       PREAMBLE(4)
+       b |= first;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+                const siphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       PREAMBLE(12)
+       v3 ^= combined;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= combined;
+       b |= third;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+       v3 ^= b; \
+       HSIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       HSIPROUND; \
+       HSIPROUND; \
+       HSIPROUND; \
+       return (v0 ^ v1) ^ (v2 ^ v3);
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = le64_to_cpup(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= le32_to_cpup(data); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+#endif
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = get_unaligned_le64(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= get_unaligned_le32(end); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+#endif
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+       HPREAMBLE(4)
+       b |= first;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(8)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+                 const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(12)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       b |= third;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+                 const u32 forth, const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(16)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       combined = (u64)forth << 32 | third;
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND \
+       do { \
+       v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
+       v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
+       v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
+       v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
+       } while (0)
+
+#define HPREAMBLE(len) \
+       u32 v0 = 0; \
+       u32 v1 = 0; \
+       u32 v2 = 0x6c796765U; \
+       u32 v3 = 0x74656462U; \
+       u32 b = ((u32)(len)) << 24; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+       v1 ^= key->key[1]; \
+       v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+       v3 ^= b; \
+       HSIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       HSIPROUND; \
+       HSIPROUND; \
+       HSIPROUND; \
+       return v1 ^ v3;
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u32));
+       const u8 left = len & (sizeof(u32) - 1);
+       u32 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u32)) {
+               m = le32_to_cpup(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+       switch (left) {
+       case 3: b |= ((u32)end[2]) << 16;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u32));
+       const u8 left = len & (sizeof(u32) - 1);
+       u32 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u32)) {
+               m = get_unaligned_le32(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+       switch (left) {
+       case 3: b |= ((u32)end[2]) << 16;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+       HPREAMBLE(4)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+       HPREAMBLE(8)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+                 const hsiphash_key_t *key)
+{
+       HPREAMBLE(12)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       HSIPROUND;
+       v0 ^= third;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+                 const u32 forth, const hsiphash_key_t *key)
+{
+       HPREAMBLE(16)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       HSIPROUND;
+       v0 ^= third;
+       v3 ^= forth;
+       HSIPROUND;
+       v0 ^= forth;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
new file mode 100644 (file)
index 0000000..a6d854d
--- /dev/null
@@ -0,0 +1,223 @@
+/* Test cases for siphash.c
+ *
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ *     https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+       {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+       0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+       0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+       0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+       0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+       0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+       0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+       0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+       0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+       0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+       0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+       0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+       0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+       0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+       0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+       0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+       0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+       0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+       0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+       0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+       0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+       0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+       0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+       {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+       0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+       0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+       0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+       0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+       0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+       0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+       0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+       0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+       0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+       0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+       0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+       0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+       0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+       0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+       0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+       0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+       0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+       0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+       0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+       0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+       0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+       0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+       {{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+       0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+       0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+       0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+       0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+       0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+       0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+       0x06712339U, 0x522aca67U, 0x911bb605U,
+       0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+       0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+       0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+       0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+       0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+       0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+       0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+       0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+       0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+       0x007783ebU, 0x95766243U, 0xab639262U,
+       0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+       0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+       0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+       0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+       0x87178304U
+};
+#endif
+
+static int __init siphash_test_init(void)
+{
+       u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+       u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+       u8 i;
+       int ret = 0;
+
+       for (i = 0; i < 64; ++i) {
+               in[i] = i;
+               in_unaligned[i + 1] = i;
+               if (siphash(in, i, &test_key_siphash) !=
+                                               test_vectors_siphash[i]) {
+                       pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+               if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
+                                               test_vectors_siphash[i]) {
+                       pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+               if (hsiphash(in, i, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[i]) {
+                       pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+               if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[i]) {
+                       pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+       }
+       if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
+                                               test_vectors_siphash[8]) {
+               pr_info("siphash self-test 1u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+                        &test_key_siphash) != test_vectors_siphash[16]) {
+               pr_info("siphash self-test 2u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+                        0x1716151413121110ULL, &test_key_siphash) !=
+                                               test_vectors_siphash[24]) {
+               pr_info("siphash self-test 3u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+                        0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+                        &test_key_siphash) != test_vectors_siphash[32]) {
+               pr_info("siphash self-test 4u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_1u32(0x03020100U, &test_key_siphash) !=
+                                               test_vectors_siphash[4]) {
+               pr_info("siphash self-test 1u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
+                                               test_vectors_siphash[8]) {
+               pr_info("siphash self-test 2u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_3u32(0x03020100U, 0x07060504U,
+                        0x0b0a0908U, &test_key_siphash) !=
+                                               test_vectors_siphash[12]) {
+               pr_info("siphash self-test 3u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_4u32(0x03020100U, 0x07060504U,
+                        0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
+                                               test_vectors_siphash[16]) {
+               pr_info("siphash self-test 4u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[4]) {
+               pr_info("hsiphash self-test 1u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[8]) {
+               pr_info("hsiphash self-test 2u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_3u32(0x03020100U, 0x07060504U,
+                         0x0b0a0908U, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[12]) {
+               pr_info("hsiphash self-test 3u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_4u32(0x03020100U, 0x07060504U,
+                         0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[16]) {
+               pr_info("hsiphash self-test 4u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (!ret)
+               pr_info("self-tests: pass\n");
+       return ret;
+}
+
+static void __exit siphash_test_exit(void)
+{
+}
+
+module_init(siphash_test_init);
+module_exit(siphash_test_exit);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_LICENSE("Dual BSD/GPL");
index 664364b..34e0995 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -273,6 +273,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
         */
        alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
                          max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
+       if (fixed && base & (alignment - 1)) {
+               ret = -EINVAL;
+               pr_err("Region at %pa must be aligned to %pa bytes\n",
+                       &base, &alignment);
+               goto err;
+       }
        base = ALIGN(base, alignment);
        size = ALIGN(size, alignment);
        limit &= ~(alignment - 1);
@@ -303,6 +309,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
        if (limit == 0 || limit > memblock_end)
                limit = memblock_end;
 
+       if (base + size > limit) {
+               ret = -EINVAL;
+               pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
+                       &size, &base, &limit);
+               goto err;
+       }
+
        /* Reserve memory */
        if (fixed) {
                if (memblock_is_region_reserved(base, size) ||
index de479ce..ff03908 100644 (file)
@@ -576,7 +576,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        if (in_irq()) {
                object->pid = 0;
                strncpy(object->comm, "hardirq", sizeof(object->comm));
-       } else if (in_softirq()) {
+       } else if (in_serving_softirq()) {
                object->pid = 0;
                strncpy(object->comm, "softirq", sizeof(object->comm));
        } else {
index 9a8e688..a84eb0f 100644 (file)
@@ -988,28 +988,47 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
                css_put(&prev->css);
 }
 
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+                                       struct mem_cgroup *dead_memcg)
 {
-       struct mem_cgroup *memcg = dead_memcg;
        struct mem_cgroup_reclaim_iter *iter;
        struct mem_cgroup_per_zone *mz;
        int nid, zid;
        int i;
 
-       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-               for_each_node(nid) {
-                       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
-                               mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
-                               for (i = 0; i <= DEF_PRIORITY; i++) {
-                                       iter = &mz->iter[i];
-                                       cmpxchg(&iter->position,
-                                               dead_memcg, NULL);
-                               }
+       for_each_node(nid) {
+               for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+                       mz = &from->nodeinfo[nid]->zoneinfo[zid];
+                       for (i = 0; i <= DEF_PRIORITY; i++) {
+                               iter = &mz->iter[i];
+                               cmpxchg(&iter->position,
+                                       dead_memcg, NULL);
                        }
                }
        }
 }
 
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+       struct mem_cgroup *memcg = dead_memcg;
+       struct mem_cgroup *last;
+
+       do {
+               __invalidate_reclaim_iterators(memcg, dead_memcg);
+               last = memcg;
+       } while ((memcg = parent_mem_cgroup(memcg)));
+
+       /*
+        * When cgruop1 non-hierarchy mode is used,
+        * parent_mem_cgroup() does not walk all the way up to the
+        * cgroup root (root_mem_cgroup). So we have to handle
+        * dead_memcg from cgroup root separately.
+        */
+       if (last != root_mem_cgroup)
+               __invalidate_reclaim_iterators(root_mem_cgroup,
+                                               dead_memcg);
+}
+
 /*
  * Iteration constructs for visiting all cgroups (under a tree).  If
  * loops are exited prematurely (break), mem_cgroup_iter_break() must
index 5fbdd36..ad90b8f 100644 (file)
@@ -286,7 +286,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
         * thanks to mm_take_all_locks().
         */
        spin_lock(&mm->mmu_notifier_mm->lock);
-       hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
+       hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
        mm_drop_all_locks(mm);
index 20fcc89..87b8c25 100644 (file)
@@ -1784,6 +1784,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                return NULL;
 
        /*
+        * First make sure the mappings are removed from all page-tables
+        * before they are freed.
+        */
+       vmalloc_sync_all();
+
+       /*
         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
         * flag. It means that vm_struct is not fully initialized.
         * Now, it is fully initialized, so remove this flag here.
@@ -2320,6 +2326,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
 /*
  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  * have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
  */
 void __weak vmalloc_sync_all(void)
 {
index ce1f13a..e297ed4 100644 (file)
@@ -1393,10 +1393,15 @@ static void vmstat_update(struct work_struct *w)
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
                 * update worker thread.
+                * If we were marked on cpu_stat_off clear the flag
+                * so that vmstat_shepherd doesn't schedule us again.
                 */
-               queue_delayed_work_on(smp_processor_id(), vmstat_wq,
-                       this_cpu_ptr(&vmstat_work),
-                       round_jiffies_relative(sysctl_stat_interval));
+               if (!cpumask_test_and_clear_cpu(smp_processor_id(),
+                                               cpu_stat_off)) {
+                       queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+                               this_cpu_ptr(&vmstat_work),
+                               round_jiffies_relative(sysctl_stat_interval));
+               }
        } else {
                /*
                 * We did not update any counters so the app may be in
@@ -1415,18 +1420,6 @@ static void vmstat_update(struct work_struct *w)
  * until the diffs stay at zero. The function is used by NOHZ and can only be
  * invoked when tick processing is not active.
  */
-void quiet_vmstat(void)
-{
-       if (system_state != SYSTEM_RUNNING)
-               return;
-
-       do {
-               if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
-                       cancel_delayed_work(this_cpu_ptr(&vmstat_work));
-
-       } while (refresh_cpu_vm_stats(false));
-}
-
 /*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
@@ -1450,6 +1443,30 @@ static bool need_update(int cpu)
        return false;
 }
 
+void quiet_vmstat(void)
+{
+       if (system_state != SYSTEM_RUNNING)
+               return;
+
+       /*
+        * If we are already in hands of the shepherd then there
+        * is nothing for us to do here.
+        */
+       if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+               return;
+
+       if (!need_update(smp_processor_id()))
+               return;
+
+       /*
+        * Just refresh counters and do not care about the pending delayed
+        * vmstat_update. It doesn't fire that often to matter and canceling
+        * it would be too expensive from this path.
+        * vmstat_shepherd will take care about that for us.
+        */
+       refresh_cpu_vm_stats(false);
+}
+
 
 /*
  * Shepherd worker thread that checks the
@@ -1467,18 +1484,25 @@ static void vmstat_shepherd(struct work_struct *w)
 
        get_online_cpus();
        /* Check processors whose vmstat worker threads have been disabled */
-       for_each_cpu(cpu, cpu_stat_off)
-               if (!cpu_isolated(cpu) && need_update(cpu) &&
-                       cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
-
-                       queue_delayed_work_on(cpu, vmstat_wq,
-                               &per_cpu(vmstat_work, cpu), 0);
+       for_each_cpu(cpu, cpu_stat_off) {
+               struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
+               if (!cpu_isolated(cpu) && need_update(cpu)) {
+                       if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
+                               queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+               } else {
+                       /*
+                        * Cancel the work if quiet_vmstat has put this
+                        * cpu on cpu_stat_off because the work item might
+                        * be still scheduled
+                        */
+                       cancel_delayed_work(dw);
+               }
+       }
        put_online_cpus();
 
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
-
 }
 
 static void __init start_shepherd_timer(void)
index 2a15b6a..5892bd1 100644 (file)
@@ -767,10 +767,16 @@ static struct p9_trans_module p9_virtio_trans = {
 /* The standard init function */
 static int __init p9_virtio_init(void)
 {
+       int rc;
+
        INIT_LIST_HEAD(&virtio_chan_list);
 
        v9fs_register_trans(&p9_virtio_trans);
-       return register_virtio_driver(&p9_virtio_drv);
+       rc = register_virtio_driver(&p9_virtio_drv);
+       if (rc)
+               v9fs_unregister_trans(&p9_virtio_trans);
+
+       return rc;
 }
 
 static void __exit p9_virtio_cleanup(void)
index f2079ac..ffd49b4 100644 (file)
@@ -3158,6 +3158,8 @@ static void batadv_tt_purge(struct work_struct *work)
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
        batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
        batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
 
index 795ddd8..4cd6b8d 100644 (file)
@@ -184,10 +184,16 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
        }
 
        if (!rt) {
-               nexthop = &lowpan_cb(skb)->gw;
-
-               if (ipv6_addr_any(nexthop))
-                       return NULL;
+               if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
+                       /* There is neither route nor gateway,
+                        * probably the destination is a direct peer.
+                        */
+                       nexthop = daddr;
+               } else {
+                       /* There is a known gateway
+                        */
+                       nexthop = &lowpan_cb(skb)->gw;
+               }
        } else {
                nexthop = rt6_nexthop(rt, daddr);
 
index cc1b748..8d65bea 100644 (file)
@@ -5062,6 +5062,11 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
                return send_conn_param_neg_reply(hdev, handle,
                                                 HCI_ERROR_UNKNOWN_CONN_ID);
 
+       if (min < hcon->le_conn_min_interval ||
+           max > hcon->le_conn_max_interval)
+               return send_conn_param_neg_reply(hdev, handle,
+                                                HCI_ERROR_INVALID_LL_PARAMS);
+
        if (hci_check_conn_params(min, max, latency, timeout))
                return send_conn_param_neg_reply(hdev, handle,
                                                 HCI_ERROR_INVALID_LL_PARAMS);
index ad00753..6233951 100644 (file)
@@ -4363,6 +4363,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
 
        l2cap_chan_lock(chan);
 
+       if (chan->state != BT_DISCONN) {
+               l2cap_chan_unlock(chan);
+               mutex_unlock(&conn->chan_lock);
+               return 0;
+       }
+
        l2cap_chan_hold(chan);
        l2cap_chan_del(chan, 0);
 
@@ -5261,7 +5267,14 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
 
        memset(&rsp, 0, sizeof(rsp));
 
-       err = hci_check_conn_params(min, max, latency, to_multiplier);
+       if (min < hcon->le_conn_min_interval ||
+           max > hcon->le_conn_max_interval) {
+               BT_DBG("requested connection interval exceeds current bounds.");
+               err = -EINVAL;
+       } else {
+               err = hci_check_conn_params(min, max, latency, to_multiplier);
+       }
+
        if (err)
                rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
        else
index 68ffdf0..d3114df 100644 (file)
@@ -2532,6 +2532,19 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
                goto distribute;
        }
 
+       /* Drop IRK if peer is using identity address during pairing but is
+        * providing different address as identity information.
+        *
+        * Microsoft Surface Precision Mouse is known to have this bug.
+        */
+       if (hci_is_identity_address(&hcon->dst, hcon->dst_type) &&
+           (bacmp(&info->bdaddr, &hcon->dst) ||
+            info->addr_type != hcon->dst_type)) {
+               bt_dev_err(hcon->hdev,
+                          "ignoring IRK with invalid identity address");
+               goto distribute;
+       }
+
        bacpy(&smp->id_addr, &info->bdaddr);
        smp->id_addr_type = info->addr_type;
 
index cd8deea..db6b65a 100644 (file)
@@ -256,7 +256,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
        struct nlmsghdr *nlh;
        struct nlattr *nest;
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
        if (!nlh)
                return -EMSGSIZE;
 
index a52b4ff..0298bfd 100644 (file)
@@ -1011,6 +1011,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        int type;
        int err = 0;
        __be32 group;
+       u16 nsrcs;
 
        ih = igmpv3_report_hdr(skb);
        num = ntohs(ih->ngrec);
@@ -1024,8 +1025,9 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                grec = (void *)(skb->data + len - sizeof(*grec));
                group = grec->grec_mca;
                type = grec->grec_type;
+               nsrcs = ntohs(grec->grec_nsrcs);
 
-               len += ntohs(grec->grec_nsrcs) * 4;
+               len += nsrcs * 4;
                if (!pskb_may_pull(skb, len))
                        return -EINVAL;
 
@@ -1045,7 +1047,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
 
                if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
                     type == IGMPV3_MODE_IS_INCLUDE) &&
-                   ntohs(grec->grec_nsrcs) == 0) {
+                   nsrcs == 0) {
                        br_ip4_multicast_leave_group(br, port, group, vid);
                } else {
                        err = br_ip4_multicast_add_group(br, port, group, vid);
@@ -1078,23 +1080,26 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
        len = skb_transport_offset(skb) + sizeof(*icmp6h);
 
        for (i = 0; i < num; i++) {
-               __be16 *nsrcs, _nsrcs;
-
-               nsrcs = skb_header_pointer(skb,
-                                          len + offsetof(struct mld2_grec,
-                                                         grec_nsrcs),
-                                          sizeof(_nsrcs), &_nsrcs);
-               if (!nsrcs)
+               __be16 *_nsrcs, __nsrcs;
+               u16 nsrcs;
+
+               _nsrcs = skb_header_pointer(skb,
+                                           len + offsetof(struct mld2_grec,
+                                                          grec_nsrcs),
+                                           sizeof(__nsrcs), &__nsrcs);
+               if (!_nsrcs)
                        return -EINVAL;
 
+               nsrcs = ntohs(*_nsrcs);
+
                if (!pskb_may_pull(skb,
                                   len + sizeof(*grec) +
-                                  sizeof(struct in6_addr) * ntohs(*nsrcs)))
+                                  sizeof(struct in6_addr) * nsrcs))
                        return -EINVAL;
 
                grec = (struct mld2_grec *)(skb->data + len);
                len += sizeof(*grec) +
-                      sizeof(struct in6_addr) * ntohs(*nsrcs);
+                      sizeof(struct in6_addr) * nsrcs;
 
                /* We treat these as MLDv1 reports for now. */
                switch (grec->grec_type) {
@@ -1112,7 +1117,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 
                if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
                     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
-                   ntohs(*nsrcs) == 0) {
+                   nsrcs == 0) {
                        br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
                                                     vid);
                } else {
@@ -1348,7 +1353,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                                  struct sk_buff *skb,
                                  u16 vid)
 {
-       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
        struct mld_msg *mld;
        struct net_bridge_mdb_entry *mp;
        struct mld2_query *mld2q;
@@ -1392,7 +1396,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
 
        if (is_general_query) {
                saddr.proto = htons(ETH_P_IPV6);
-               saddr.u.ip6 = ip6h->saddr;
+               saddr.u.ip6 = ipv6_hdr(skb)->saddr;
 
                br_multicast_query_received(br, port, &br->ip6_other_query,
                                            &saddr, max_delay);
index 5881fbc..36282eb 100644 (file)
@@ -147,7 +147,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
 void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
                struct net_device *dev)
 {
-       const unsigned char *dest = eth_hdr(skb)->h_dest;
        struct net_bridge_port *p;
        struct net_bridge *br;
        const unsigned char *buf;
@@ -176,7 +175,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
        if (p->state == BR_STATE_DISABLED)
                goto out;
 
-       if (!ether_addr_equal(dest, br->group_addr))
+       if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
                goto out;
 
        if (p->flags & BR_BPDU_GUARD) {
index 1394da6..a795396 100644 (file)
@@ -580,6 +580,11 @@ void br_vlan_flush(struct net_bridge *br)
 
        ASSERT_RTNL();
 
+       /* delete auto-added default pvid local fdb before flushing vlans
+        * otherwise it will be leaked on bridge device init failure
+        */
+       br_fdb_delete_by_port(br, NULL, 0, 1);
+
        vg = br_vlan_group(br);
        __vlan_flush(vg);
        RCU_INIT_POINTER(br->vlgrp, NULL);
index 1a87cf7..d9471e3 100644 (file)
@@ -2280,8 +2280,10 @@ static int compat_do_replace(struct net *net, void __user *user,
        state.buf_kern_len = size64;
 
        ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-       if (WARN_ON(ret < 0))
+       if (WARN_ON(ret < 0)) {
+               vfree(entries_tmp);
                goto out_unlock;
+       }
 
        vfree(entries_tmp);
        tmp.entries_size = size64;
index 1dbfbb4..e19be04 100644 (file)
@@ -6873,6 +6873,8 @@ int register_netdevice(struct net_device *dev)
        ret = notifier_to_errno(ret);
        if (ret) {
                rollback_registered(dev);
+               rcu_barrier();
+
                dev->reg_state = NETREG_UNREGISTERED;
        }
        /*
@@ -7811,6 +7813,8 @@ static void __net_exit default_device_exit(struct net *net)
 
                /* Push remaining network devices to init_net */
                snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+               if (__dev_get_by_name(&init_net, fb_name))
+                       snprintf(fb_name, IFNAMSIZ, "dev%%d");
                err = dev_change_net_namespace(dev, &init_net, fb_name);
                if (err) {
                        pr_emerg("%s: failed to move %s to init_net: %d\n",
index 2572b51..bfd16b5 100644 (file)
@@ -982,6 +982,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
                        atomic_set(&neigh->probes,
                                   NEIGH_VAR(neigh->parms, UCAST_PROBES));
+                       neigh_del_timer(neigh);
                        neigh->nud_state     = NUD_INCOMPLETE;
                        neigh->updated = now;
                        next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
@@ -998,6 +999,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
                }
        } else if (neigh->nud_state & NUD_STALE) {
                neigh_dbg(2, "neigh %p is delayed\n", neigh);
+               neigh_del_timer(neigh);
                neigh->nud_state = NUD_DELAY;
                neigh->updated = jiffies;
                neigh_add_timer(neigh, jiffies +
index b55f340..8f445f6 100644 (file)
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
                txq = netdev_get_tx_queue(dev, q_index);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
-                   netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+                   !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
                        skb_queue_head(&npinfo->txq, skb);
                        HARD_TX_UNLOCK(dev, txq);
                        local_irq_restore(flags);
@@ -357,7 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
                                HARD_TX_UNLOCK(dev, txq);
 
-                               if (status == NETDEV_TX_OK)
+                               if (dev_xmit_complete(status))
                                        break;
 
                        }
@@ -374,7 +374,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
        }
 
-       if (status != NETDEV_TX_OK) {
+       if (!dev_xmit_complete(status)) {
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
index b96f7a7..3089b01 100644 (file)
@@ -119,7 +119,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
        int err = 0;
        long vm_wait = 0;
        long current_timeo = *timeo_p;
-       bool noblock = (*timeo_p ? false : true);
        DEFINE_WAIT(wait);
 
        if (sk_stream_memory_free(sk))
@@ -132,11 +131,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                        goto do_error;
-               if (!*timeo_p) {
-                       if (noblock)
-                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-                       goto do_nonblock;
-               }
+               if (!*timeo_p)
+                       goto do_eagain;
                if (signal_pending(current))
                        goto do_interrupted;
                sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -168,7 +164,13 @@ out:
 do_error:
        err = -EPIPE;
        goto out;
-do_nonblock:
+do_eagain:
+       /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
+        * be generated later.
+        * When TCP receives ACK packets that make room, tcp_check_space()
+        * only calls tcp_new_space() if SOCK_NOSPACE is set.
+        */
+       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
        err = -EAGAIN;
        goto out;
 do_interrupted:
index 1e3c479..e9df88f 100644 (file)
 
 #include "fib_lookup.h"
 
+#define IPV6ONLY_FLAGS \
+               (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
+                IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
+                IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
+
 static struct ipv4_devconf ipv4_devconf = {
        .data = {
                [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
@@ -453,6 +458,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        ifa->ifa_flags &= ~IFA_F_SECONDARY;
        last_primary = &in_dev->ifa_list;
 
+       /* Don't set IPv6 only flags to IPv4 addresses */
+       ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
+
        for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
             ifap = &ifa1->ifa_next) {
                if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
index 2c66193..397b72f 100644 (file)
@@ -490,15 +490,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
 
 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
 {
-       static u32 ip_idents_hashrnd __read_mostly;
        u32 hash, id;
 
-       net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
+       /* Note the following code is not safe, but this is okay. */
+       if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
+               get_random_bytes(&net->ipv4.ip_id_key,
+                                sizeof(net->ipv4.ip_id_key));
 
-       hash = jhash_3words((__force u32)iph->daddr,
+       hash = siphash_3u32((__force u32)iph->daddr,
                            (__force u32)iph->saddr,
-                           iph->protocol ^ net_hash_mix(net),
-                           ip_idents_hashrnd);
+                           iph->protocol,
+                           &net->ipv4.ip_id_key);
        id = ip_idents_reserve(hash, segs);
        iph->id = htons(id);
 }
index 0fe1eaf..abae27d 100644 (file)
@@ -2281,6 +2281,8 @@ int tcp_disconnect(struct sock *sk, int flags)
        dst_release(sk->sk_rx_dst);
        sk->sk_rx_dst = NULL;
        tcp_saved_syn_free(tp);
+       tp->bytes_acked = 0;
+       tp->bytes_received = 0;
 
        WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
 
index 2fb43a1..2f48963 100644 (file)
@@ -226,7 +226,7 @@ static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
 
 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
 {
-       tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+       tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 }
 
 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
index c4f7902..e65c211 100644 (file)
@@ -1151,6 +1151,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
        int nsize, old_factor;
+       long limit;
        int nlen;
        u8 flags;
 
@@ -1161,7 +1162,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        if (nsize < 0)
                nsize = 0;
 
-       if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) {
+       /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
+        * We need some allowance to not penalize applications setting small
+        * SO_SNDBUF values.
+        * Also allow first and last skb in retransmit queue to be split.
+        */
+       limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+       if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
+                    skb != tcp_rtx_queue_head(sk) &&
+                    skb != tcp_rtx_queue_tail(sk))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
                return -ENOMEM;
        }
index e348a14..91f16e6 100644 (file)
@@ -1666,6 +1666,10 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
        struct net *net = sock_net(sk);
        struct mr6_table *mrt;
 
+       if (sk->sk_type != SOCK_RAW ||
+           inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
+               return -EOPNOTSUPP;
+
        mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
        if (!mrt)
                return -ENOENT;
@@ -1677,9 +1681,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
 
        switch (optname) {
        case MRT6_INIT:
-               if (sk->sk_type != SOCK_RAW ||
-                   inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
-                       return -EOPNOTSUPP;
                if (optlen < sizeof(int))
                        return -EINVAL;
 
@@ -1816,6 +1817,10 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
        struct net *net = sock_net(sk);
        struct mr6_table *mrt;
 
+       if (sk->sk_type != SOCK_RAW ||
+           inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
+               return -EOPNOTSUPP;
+
        mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
        if (!mrt)
                return -ENOENT;
index e03bb5a..e16a05c 100644 (file)
@@ -188,7 +188,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
 static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
 {
        return opt->nd_opt_type == ND_OPT_RDNSS ||
-               opt->nd_opt_type == ND_OPT_DNSSL;
+               opt->nd_opt_type == ND_OPT_DNSSL ||
+               opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL;
 }
 
 static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
index f99a046..6b896cc 100644 (file)
 #include <net/secure_seq.h>
 #include <linux/netfilter.h>
 
-static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
+static u32 __ipv6_select_ident(struct net *net,
                               const struct in6_addr *dst,
                               const struct in6_addr *src)
 {
+       const struct {
+               struct in6_addr dst;
+               struct in6_addr src;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .dst = *dst,
+               .src = *src,
+       };
        u32 hash, id;
 
-       hash = __ipv6_addr_jhash(dst, hashrnd);
-       hash = __ipv6_addr_jhash(src, hash);
-       hash ^= net_hash_mix(net);
+       /* Note the following code is not safe, but this is okay. */
+       if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
+               get_random_bytes(&net->ipv4.ip_id_key,
+                                sizeof(net->ipv4.ip_id_key));
+
+       hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
 
        /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
         * set the hight order instead thus minimizing possible future
@@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
  */
 void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
 {
-       static u32 ip6_proxy_idents_hashrnd __read_mostly;
        struct in6_addr buf[2];
        struct in6_addr *addrs;
        u32 id;
@@ -53,11 +62,7 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
        if (!addrs)
                return;
 
-       net_get_random_once(&ip6_proxy_idents_hashrnd,
-                           sizeof(ip6_proxy_idents_hashrnd));
-
-       id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
-                                &addrs[1], &addrs[0]);
+       id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
        skb_shinfo(skb)->ip6_frag_id = htonl(id);
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
@@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net,
                         const struct in6_addr *daddr,
                         const struct in6_addr *saddr)
 {
-       static u32 ip6_idents_hashrnd __read_mostly;
        u32 id;
 
-       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
-
-       id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
+       id = __ipv6_select_ident(net, daddr, saddr);
        return htonl(id);
 }
 EXPORT_SYMBOL(ipv6_select_ident);
index 4c753f4..40b8357 100644 (file)
@@ -231,7 +231,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
        return ping_proc_register(net, &ping_v6_seq_afinfo);
 }
 
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
 {
        return ping_proc_unregister(net, &ping_v6_seq_afinfo);
 }
index 58c045d..b400a4c 100644 (file)
@@ -1959,8 +1959,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
 
        if (rq->sadb_x_ipsecrequest_mode == 0)
                return -EINVAL;
+       if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
+               return -EINVAL;
 
-       t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
+       t->id.proto = rq->sadb_x_ipsecrequest_proto;
        if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
                return -EINVAL;
        t->mode = mode;
@@ -2453,8 +2455,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
                goto out;
        }
        err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(out_skb);
                goto out;
+       }
 
        out_hdr = (struct sadb_msg *) out_skb->data;
        out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@@ -2707,8 +2711,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
                return PTR_ERR(out_skb);
 
        err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(out_skb);
                return err;
+       }
 
        out_hdr = (struct sadb_msg *) out_skb->data;
        out_hdr->sadb_msg_version = pfk->dump.msg_version;
index 2764c4b..d3f1222 100644 (file)
@@ -1805,6 +1805,9 @@ static const struct proto_ops pppol2tp_ops = {
        .recvmsg        = pppol2tp_recvmsg,
        .mmap           = sock_no_mmap,
        .ioctl          = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = pppox_compat_ioctl,
+#endif
 };
 
 static const struct pppox_proto pppol2tp_proto = {
index 7349bf2..1999a7e 100644 (file)
@@ -1211,6 +1211,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (is_multicast_ether_addr(mac))
                return -EINVAL;
 
+       if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
+           sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !sdata->u.mgd.associated)
+               return -EINVAL;
+
        sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
        if (!sta)
                return -ENOMEM;
@@ -1228,10 +1233,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
                sta->sta.tdls = true;
 
-       if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !sdata->u.mgd.associated)
-               return -EINVAL;
-
        err = sta_apply_parameters(local, sta, params);
        if (err) {
                sta_info_free(local, sta);
index c258f10..df2e4e3 100644 (file)
@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
        if (!check_sdata_in_driver(sdata))
                return -EIO;
 
-       if (WARN_ONCE(params->cw_min == 0 ||
-                     params->cw_min > params->cw_max,
-                     "%s: invalid CW_min/CW_max: %d/%d\n",
-                     sdata->name, params->cw_min, params->cw_max))
+       if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+               /*
+                * If we can't configure hardware anyway, don't warn. We may
+                * never have initialized the CW parameters.
+                */
+               WARN_ONCE(local->ops->conf_tx,
+                         "%s: invalid CW_min/CW_max: %d/%d\n",
+                         sdata->name, params->cw_min, params->cw_max);
                return -EINVAL;
+       }
 
        trace_drv_conf_tx(local, sdata, ac, params);
        if (local->ops->conf_tx)
index 9e86e8f..1f2b1c8 100644 (file)
@@ -1892,6 +1892,16 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                }
        }
 
+       /* WMM specification requires all 4 ACIs. */
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               if (params[ac].cw_min == 0) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (missing AC %d), using defaults\n",
+                                  ac);
+                       return false;
+               }
+       }
+
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                mlme_dbg(sdata,
                         "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
index 0610e8c..d637d15 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/err.h>
 #include <linux/percpu.h>
 #include <linux/moduleparam.h>
@@ -234,6 +235,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 }
 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+       static __read_mostly siphash_key_t ct_id_seed;
+       unsigned long a, b, c, d;
+
+       net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+       a = (unsigned long)ct;
+       b = (unsigned long)ct->master;
+       c = (unsigned long)nf_ct_net(ct);
+       d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                  sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+                                  &ct_id_seed);
+#ifdef CONFIG_64BIT
+       return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+       return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
 static void
 clean_from_lists(struct nf_conn *ct)
 {
index b666959..b7c1317 100644 (file)
@@ -334,7 +334,7 @@ static int find_pattern(const char *data, size_t dlen,
                i++;
        }
 
-       pr_debug("Skipped up to `%c'!\n", skip);
+       pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
 
        *numoff = i;
        *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
index c68e020..3a24c01 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/siphash.h>
 
 #include <linux/netfilter.h>
 #include <net/netlink.h>
@@ -451,7 +452,9 @@ ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
 static inline int
 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+       __be32 id = (__force __be32)nf_ct_get_id(ct);
+
+       if (nla_put_be32(skb, CTA_ID, id))
                goto nla_put_failure;
        return 0;
 
@@ -1159,8 +1162,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        ct = nf_ct_tuplehash_to_ctrack(h);
 
        if (cda[CTA_ID]) {
-               u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
-               if (id != (u32)(unsigned long)ct) {
+               __be32 id = nla_get_be32(cda[CTA_ID]);
+
+               if (id != (__force __be32)nf_ct_get_id(ct)) {
                        nf_ct_put(ct);
                        return -ENOENT;
                }
@@ -2480,6 +2484,25 @@ nla_put_failure:
 
 static const union nf_inet_addr any_addr;
 
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+       static __read_mostly siphash_key_t exp_id_seed;
+       unsigned long a, b, c, d;
+
+       net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+       a = (unsigned long)exp;
+       b = (unsigned long)exp->helper;
+       c = (unsigned long)exp->master;
+       d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+       return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+       return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
 static int
 ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
@@ -2527,7 +2550,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
        }
 #endif
        if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
-           nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+           nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
            nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
            nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
                goto nla_put_failure;
@@ -2824,7 +2847,8 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
 
        if (cda[CTA_EXPECT_ID]) {
                __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
-               if (ntohl(id) != (u32)(unsigned long)exp) {
+
+               if (id != nf_expect_get_id(exp)) {
                        nf_ct_expect_put(exp);
                        return -ENOENT;
                }
index 9adedba..044559c 100644 (file)
@@ -495,7 +495,7 @@ static int nfnetlink_bind(struct net *net, int group)
        ss = nfnetlink_get_subsys(type << 8);
        rcu_read_unlock();
        if (!ss)
-               request_module("nfnetlink-subsys-%d", type);
+               request_module_nowait("nfnetlink-subsys-%d", type);
        return 0;
 }
 #endif
index 088e8da..0f3cb41 100644 (file)
@@ -97,6 +97,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
                        return -EINVAL;
                if (flags & NFACCT_F_OVERQUOTA)
                        return -EINVAL;
+               if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
+                       return -EINVAL;
 
                size += sizeof(u64);
        }
index 046ae1c..e588898 100644 (file)
@@ -870,7 +870,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
        unsigned short frametype, flags, window, timeout;
        int ret;
 
-       skb->sk = NULL;         /* Initially we don't know who it's for */
+       skb_orphan(skb);
 
        /*
         *      skb->data points to the netrom frame start
@@ -968,7 +968,9 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
 
        window = skb->data[20];
 
+       sock_hold(make);
        skb->sk             = make;
+       skb->destructor     = sock_efree;
        make->sk_state      = TCP_ESTABLISHED;
 
        /* Fill in his circuit details */
index dbd2425..d203837 100644 (file)
@@ -119,7 +119,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
        conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
        if (!conn_info) {
                rc = -EPROTO;
-               goto free_exit;
+               goto exit;
        }
 
        __skb_queue_head_init(&frags_q);
index c78bcc1..0dd9fc3 100644 (file)
@@ -2498,6 +2498,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 
        mutex_lock(&po->pg_vec_lock);
 
+       /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+        * we need to confirm it under protection of pg_vec_lock.
+        */
+       if (unlikely(!po->tx_ring.pg_vec)) {
+               err = -EBUSY;
+               goto out;
+       }
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
                proto   = po->num;
@@ -4169,7 +4176,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 
        /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
        if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
-               WARN(1, "Tx-ring is not supported.\n");
+               net_warn_ratelimited("Tx-ring is not supported.\n");
                goto out;
        }
 
index 9b7e298..3bc5dec 100644 (file)
@@ -68,7 +68,8 @@ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
 {
        struct sk_buff *skb = __skb_dequeue(&sch->q);
 
-       prefetch(&skb->end); /* we'll need skb_shinfo() */
+       if (skb)
+               prefetch(&skb->end); /* we'll need skb_shinfo() */
        return skb;
 }
 
index aa47250..eec6dc2 100644 (file)
@@ -671,7 +671,11 @@ static void qdisc_rcu_free(struct rcu_head *head)
 
 void qdisc_destroy(struct Qdisc *qdisc)
 {
-       const struct Qdisc_ops  *ops = qdisc->ops;
+       const struct Qdisc_ops *ops;
+
+       if (!qdisc)
+               return;
+       ops = qdisc->ops;
 
        if (qdisc->flags & TCQ_F_BUILTIN ||
            !atomic_dec_and_test(&qdisc->refcnt))
index aff2a1b..dc68dcc 100644 (file)
@@ -552,7 +552,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
                new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
 
        non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
-       if (non_hh_quantum > INT_MAX)
+       if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
                return -EINVAL;
 
        sch_tree_lock(sch);
index 247d188..07c54b2 100644 (file)
@@ -1331,7 +1331,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
        return status;
 }
 
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
 {
        /* Free the control endpoint.  */
        inet_ctl_sock_destroy(net->sctp.ctl_sock);
index 6098d4c..7c220e9 100644 (file)
@@ -504,8 +504,8 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
         * see SCTP Quick Failover Draft, section 5.1
         */
        if ((transport->state == SCTP_ACTIVE) &&
-          (asoc->pf_retrans < transport->pathmaxrxt) &&
-          (transport->error_count > asoc->pf_retrans)) {
+          (transport->error_count < transport->pathmaxrxt) &&
+          (transport->error_count > transport->pf_retrans)) {
 
                sctp_assoc_control_transport(asoc, transport,
                                             SCTP_TRANSPORT_PF,
index c4c151b..b57675f 100644 (file)
@@ -284,7 +284,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
                       publ->key);
        }
 
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
index 9a65664..d2bf92e 100644 (file)
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
        int rep_type;
        int rep_size;
        int req_type;
+       int req_size;
        struct net *net;
        struct sk_buff *rep;
        struct tlv_desc *req;
@@ -252,7 +253,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        int err;
        struct sk_buff *arg;
 
-       if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+       if (msg->req_type && (!msg->req_size ||
+                             !TLV_CHECK_TYPE(msg->req, msg->req_type)))
                return -EINVAL;
 
        msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -345,7 +347,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
 {
        int err;
 
-       if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+       if (msg->req_type && (!msg->req_size ||
+                             !TLV_CHECK_TYPE(msg->req, msg->req_type)))
                return -EINVAL;
 
        err = __tipc_nl_compat_doit(cmd, msg);
@@ -1192,8 +1195,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
                goto send;
        }
 
-       len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-       if (!len || !TLV_OK(msg.req, len)) {
+       msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+       if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
                msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
                err = -EOPNOTSUPP;
                goto send;
index 53925b3..a3d08bb 100644 (file)
@@ -2277,7 +2277,7 @@ static void reg_process_pending_hints(void)
 
        /* When last_request->processed becomes true this will be rescheduled */
        if (lr && !lr->processed) {
-               reg_process_hint(lr);
+               pr_debug("Pending regulatory request, waiting for it to be processed...\n");
                return;
        }
 
index 2e48d0b..7d50a37 100644 (file)
@@ -1857,11 +1857,6 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
        struct xfrm_mgr *km;
        struct xfrm_policy *pol = NULL;
 
-#ifdef CONFIG_COMPAT
-       if (is_compat_task())
-               return -EOPNOTSUPP;
-#endif
-
        if (!optval && !optlen) {
                xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
                xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
@@ -2142,7 +2137,7 @@ void xfrm_state_fini(struct net *net)
        unsigned int sz;
 
        flush_work(&net->xfrm.state_hash_work);
-       xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+       xfrm_state_flush(net, 0, false);
        flush_work(&net->xfrm.state_gc_work);
 
        WARN_ON(!list_empty(&net->xfrm.state_all));
index 4804f29..69126af 100644 (file)
@@ -151,6 +151,25 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
        err = -EINVAL;
        switch (p->family) {
        case AF_INET:
+               break;
+
+       case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+               break;
+#else
+               err = -EAFNOSUPPORT;
+               goto out;
+#endif
+
+       default:
+               goto out;
+       }
+
+       switch (p->sel.family) {
+       case AF_UNSPEC:
+               break;
+
+       case AF_INET:
                if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
                        goto out;
 
@@ -1437,20 +1456,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                        return -EINVAL;
                }
 
-               switch (ut[i].id.proto) {
-               case IPPROTO_AH:
-               case IPPROTO_ESP:
-               case IPPROTO_COMP:
-#if IS_ENABLED(CONFIG_IPV6)
-               case IPPROTO_ROUTING:
-               case IPPROTO_DSTOPTS:
-#endif
-               case IPSEC_PROTO_ANY:
-                       break;
-               default:
+               if (!xfrm_id_proto_valid(ut[i].id.proto))
                        return -EINVAL;
-               }
-
        }
 
        return 0;
@@ -2524,11 +2531,6 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        const struct xfrm_link *link;
        int type, err;
 
-#ifdef CONFIG_COMPAT
-       if (is_compat_task())
-               return -EOPNOTSUPP;
-#endif
-
        type = nlh->nlmsg_type;
        if (type > XFRM_MSG_MAX)
                return -EINVAL;
index 1366a94..7718a64 100644 (file)
@@ -74,7 +74,7 @@ modpost = scripts/mod/modpost                    \
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)  \
index ffc46c7..4f5e76f 100755 (executable)
@@ -64,7 +64,7 @@ parse_symbol() {
        fi
 
        # Strip out the base of the path
-       code=${code//^$basepath/""}
+       code=${code#$basepath/}
 
        # In the case of inlines, move everything to same line
        code=${code//$'\n'/' '}
index 8fa81e8..d117c68 100644 (file)
@@ -158,6 +158,9 @@ static int read_symbol(FILE *in, struct sym_entry *s)
        /* exclude debugging symbols */
        else if (stype == 'N')
                return -1;
+       /* exclude s390 kasan local symbols */
+       else if (!strncmp(sym, ".LASANPC", 8))
+               return -1;
 
        /* include the type field in the symbol name, so that it gets
         * compressed together */
index b9897e2..04151ed 100644 (file)
@@ -326,7 +326,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
                if (!mcountsym)
                        mcountsym = get_mcountsym(sym0, relp, str0);
 
-               if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+               if (mcountsym && mcountsym == Elf_r_sym(relp) &&
+                               !is_fake_mcount(relp)) {
                        uint_t const addend =
                                _w(_w(relp->r_offset) - recval + mcount_adjust);
                        mrelp->r_offset = _w(offbase
index 8882b72..976deea 100644 (file)
@@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
 {
        struct request_key_auth *rka = key->payload.data[0];
 
+       if (!rka)
+               return;
+
        seq_puts(m, "key:");
        seq_puts(m, key->description);
        if (key_is_positive(key))
@@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
        size_t datalen;
        long ret;
 
+       if (!rka)
+               return -EKEYREVOKED;
+
        datalen = rka->callout_len;
        ret = datalen;
 
index 965a55e..01fbbbf 100644 (file)
@@ -266,6 +266,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
        return v;
 }
 
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
 /*
  * Initialize a policy database structure.
  */
@@ -313,8 +315,10 @@ static int policydb_init(struct policydb *p)
 out:
        hashtab_destroy(p->filename_trans);
        hashtab_destroy(p->range_tr);
-       for (i = 0; i < SYM_NUM; i++)
+       for (i = 0; i < SYM_NUM; i++) {
+               hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
                hashtab_destroy(p->symtab[i].table);
+       }
        return rc;
 }
 
index 15f58ea..1fbd29c 100644 (file)
@@ -554,10 +554,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
                stream->metadata_set = false;
                stream->next_track = false;
 
-               if (stream->direction == SND_COMPRESS_PLAYBACK)
-                       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-               else
-                       stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
        } else {
                return -EPERM;
        }
@@ -674,8 +671,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
 {
        int retval;
 
-       if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_SETUP:
+               if (stream->direction != SND_COMPRESS_CAPTURE)
+                       return -EPERM;
+               break;
+       case SNDRV_PCM_STATE_PREPARED:
+               break;
+       default:
                return -EPERM;
+       }
+
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
        if (!retval)
                stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -686,9 +692,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 {
        int retval;
 
-       if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-                       stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_PREPARED:
                return -EPERM;
+       default:
+               break;
+       }
+
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
        if (!retval) {
                stream->runtime->state = SNDRV_PCM_STATE_SETUP;
@@ -708,12 +720,21 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
        int retval;
 
        mutex_lock(&stream->device->lock);
-       if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-                       stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_PREPARED:
+       case SNDRV_PCM_STATE_PAUSED:
                retval = -EPERM;
                goto ret;
+       case SNDRV_PCM_STATE_XRUN:
+               retval = -EPIPE;
+               goto ret;
+       default:
+               break;
        }
        mutex_unlock(&stream->device->lock);
+
        retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
        mutex_lock(&stream->device->lock);
        if (!retval) {
@@ -753,12 +774,21 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
        int retval;
 
        mutex_lock(&stream->device->lock);
-       if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-                       stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_PREPARED:
+       case SNDRV_PCM_STATE_PAUSED:
                mutex_unlock(&stream->device->lock);
                return -EPERM;
+       case SNDRV_PCM_STATE_XRUN:
+               mutex_unlock(&stream->device->lock);
+               return -EPIPE;
+       default:
+               break;
        }
        mutex_unlock(&stream->device->lock);
+
        /* stream can be drained only when next track has been signalled */
        if (stream->next_track == false)
                return -EPERM;
index 0d0e0c2..331a2b0 100644 (file)
@@ -1014,7 +1014,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
 {
        struct snd_seq_client *client = file->private_data;
        int written = 0, len;
-       int err;
+       int err, handled;
        struct snd_seq_event event;
 
        if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1027,6 +1027,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
        if (!client->accept_output || client->pool == NULL)
                return -ENXIO;
 
+ repeat:
+       handled = 0;
        /* allocate the pool now if the pool is not allocated yet */ 
        mutex_lock(&client->ioctl_mutex);
        if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
@@ -1086,12 +1088,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
                                                   0, 0, &client->ioctl_mutex);
                if (err < 0)
                        break;
+               handled++;
 
        __skip_event:
                /* Update pointers and counts */
                count -= len;
                buf += len;
                written += len;
+
+               /* let's have a coffee break if too many events are queued */
+               if (++handled >= 200) {
+                       mutex_unlock(&client->ioctl_mutex);
+                       goto repeat;
+               }
        }
 
  out:
@@ -1897,8 +1906,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
        if (cptr->type == USER_CLIENT) {
                info.input_pool = cptr->data.user.fifo_pool_size;
                info.input_free = info.input_pool;
-               if (cptr->data.user.fifo)
-                       info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+               info.input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
        } else {
                info.input_pool = 0;
                info.input_free = 0;
index 9acbed1..d9f5428 100644 (file)
@@ -278,3 +278,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
 
        return 0;
 }
+
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
+{
+       unsigned long flags;
+       int cells;
+
+       if (!f)
+               return 0;
+
+       snd_use_lock_use(&f->use_lock);
+       spin_lock_irqsave(&f->lock, flags);
+       cells = snd_seq_unused_cells(f->pool);
+       spin_unlock_irqrestore(&f->lock, flags);
+       snd_use_lock_free(&f->use_lock);
+       return cells;
+}
index 062c446..5d38a0d 100644 (file)
@@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
 /* resize pool in fifo */
 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
 
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
 
 #endif
index ea15066..3b09b8e 100644 (file)
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
        packets_per_page = PAGE_SIZE / packet_size;
        if (WARN_ON(!packets_per_page)) {
                err = -EINVAL;
-               goto error;
+               goto err_packets;
        }
        pages = DIV_ROUND_UP(count, packets_per_page);
 
index a03cf68..12d8720 100644 (file)
@@ -827,6 +827,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
        while (id >= 0) {
                const struct hda_fixup *fix = codec->fixup_list + id;
 
+               if (++depth > 10)
+                       break;
                if (fix->chained_before)
                        apply_fixup(codec, fix->chain_id, action, depth + 1);
 
@@ -866,8 +868,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
                }
                if (!fix->chained || fix->chained_before)
                        break;
-               if (++depth > 10)
-                       break;
                id = fix->chain_id;
        }
 }
index 689df78..869c322 100644 (file)
@@ -5826,7 +5826,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
        if (spec->init_hook)
                spec->init_hook(codec);
 
-       snd_hda_apply_verbs(codec);
+       if (!spec->skip_verbs)
+               snd_hda_apply_verbs(codec);
 
        init_multi_out(codec);
        init_extra_out(codec);
@@ -5917,7 +5918,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
 
        err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
        if (err < 0)
-               return err;
+               goto error;
 
        err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
        if (err < 0)
index 56e4139..25f2397 100644 (file)
@@ -236,6 +236,7 @@ struct hda_gen_spec {
        unsigned int indep_hp_enabled:1; /* independent HP enabled */
        unsigned int have_aamix_ctl:1;
        unsigned int hp_mic_jack_modes:1;
+       unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
 
        /* additional mute flags (only effective with auto_mute_via_amp=1) */
        u64 mute_bits;
index 40dd465..05e745e 100644 (file)
@@ -1008,6 +1008,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
  */
 
 static const struct hda_device_id snd_hda_id_conexant[] = {
+       HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
index 68d96c2..d5ca160 100644 (file)
@@ -772,9 +772,11 @@ static int alc_init(struct hda_codec *codec)
        if (spec->init_hook)
                spec->init_hook(codec);
 
+       spec->gen.skip_verbs = 1; /* applied in below */
        snd_hda_gen_init(codec);
        alc_fix_pll(codec);
        alc_auto_init_amp(codec, spec->init_amp);
+       snd_hda_apply_verbs(codec); /* apply verbs here after own init */
 
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
 
index 1c79e63..fa157b4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1871,7 +1871,8 @@ static int msm_sdw_probe(struct platform_device *pdev)
        int adsp_state;
 
        adsp_state = apr_get_subsys_state();
-       if (adsp_state != APR_SUBSYS_LOADED) {
+       if (adsp_state != APR_SUBSYS_LOADED ||
+               !q6core_is_adsp_ready()) {
                dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
                                adsp_state);
                return -EPROBE_DEFER;
index 94122de..66245ba 100644 (file)
@@ -4575,7 +4575,8 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev)
        int adsp_state;
 
        adsp_state = apr_get_subsys_state();
-       if (adsp_state != APR_SUBSYS_LOADED) {
+       if (adsp_state != APR_SUBSYS_LOADED ||
+               !q6core_is_adsp_ready()) {
                dev_err(&pdev->dev, "Adsp is not loaded yet %d\n",
                        adsp_state);
                return -EPROBE_DEFER;
index 512ec25..2f7be6c 100644 (file)
@@ -1128,6 +1128,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
        return ret;
 }
 
+static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
+                                           struct snd_pcm_hw_rule *rule)
+{
+       struct davinci_mcasp_ruledata *rd = rule->private;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+       struct snd_mask nfmt;
+       int i, slot_width;
+
+       snd_mask_none(&nfmt);
+       slot_width = rd->mcasp->slot_width;
+
+       for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+               if (snd_mask_test(fmt, i)) {
+                       if (snd_pcm_format_width(i) <= slot_width) {
+                               snd_mask_set(&nfmt, i);
+                       }
+               }
+       }
+
+       return snd_mask_refine(fmt, &nfmt);
+}
+
 static const unsigned int davinci_mcasp_dai_rates[] = {
        8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
        88200, 96000, 176400, 192000,
@@ -1219,7 +1241,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
        struct davinci_mcasp_ruledata *ruledata =
                                        &mcasp->ruledata[substream->stream];
        u32 max_channels = 0;
-       int i, dir;
+       int i, dir, ret;
        int tdm_slots = mcasp->tdm_slots;
 
        if (mcasp->tdm_mask[substream->stream])
@@ -1244,6 +1266,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
                        max_channels++;
        }
        ruledata->serializers = max_channels;
+       ruledata->mcasp = mcasp;
        max_channels *= tdm_slots;
        /*
         * If the already active stream has less channels than the calculated
@@ -1269,20 +1292,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
                                   0, SNDRV_PCM_HW_PARAM_CHANNELS,
                                   &mcasp->chconstr[substream->stream]);
 
-       if (mcasp->slot_width)
-               snd_pcm_hw_constraint_minmax(substream->runtime,
-                                            SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
-                                            8, mcasp->slot_width);
+       if (mcasp->slot_width) {
+               /* Only allow formats require <= slot_width bits on the bus */
+               ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+                                         SNDRV_PCM_HW_PARAM_FORMAT,
+                                         davinci_mcasp_hw_rule_slot_width,
+                                         ruledata,
+                                         SNDRV_PCM_HW_PARAM_FORMAT, -1);
+               if (ret)
+                       return ret;
+       }
 
        /*
         * If we rely on implicit BCLK divider setting we should
         * set constraints based on what we can provide.
         */
        if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
-               int ret;
-
-               ruledata->mcasp = mcasp;
-
                ret = snd_pcm_hw_rule_add(substream->runtime, 0,
                                          SNDRV_PCM_HW_PARAM_RATE,
                                          davinci_mcasp_hw_rule_rate,
index 0a33171..5572a3d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -962,7 +962,8 @@ void rtac_set_asm_handle(u32 session_id, void *handle)
        pr_debug("%s\n", __func__);
 
        mutex_lock(&rtac_asm_apr_mutex);
-       rtac_asm_apr_data[session_id].apr_handle = handle;
+       if (rtac_asm_apr_data[session_id].apr_handle)
+               rtac_asm_apr_data[session_id].apr_handle = handle;
        mutex_unlock(&rtac_asm_apr_mutex);
 }
 
index 99b73c6..20d4e2e 100644 (file)
@@ -287,7 +287,8 @@ retry:
                                goto retry;
                        }
                        spin_unlock(&sound_loader_lock);
-                       return -EBUSY;
+                       r = -EBUSY;
+                       goto fail;
                }
        }
 
index 63dcaef..7fa37ba 100644 (file)
@@ -155,7 +155,7 @@ static const struct line6_properties podhd_properties_table[] = {
                .capabilities   = LINE6_CAP_CONTROL
                                | LINE6_CAP_PCM
                                | LINE6_CAP_HWMON,
-               .altsetting = 1,
+               .altsetting = 0,
                .ep_ctrl_r = 0x81,
                .ep_ctrl_w = 0x01,
                .ep_audio_r = 0x86,
index de8fe20..ddf6620 100644 (file)
@@ -82,6 +82,7 @@ struct mixer_build {
        unsigned char *buffer;
        unsigned int buflen;
        DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+       DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
        struct usb_audio_term oterm;
        const struct usbmix_name_map *map;
        const struct usbmix_selector_map *selector_map;
@@ -721,15 +722,24 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
  * parse the source unit recursively until it reaches to a terminal
  * or a branched unit.
  */
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
                            struct usb_audio_term *term)
 {
        int err;
        void *p1;
+       unsigned char *hdr;
 
        memset(term, 0, sizeof(*term));
-       while ((p1 = find_audio_control_unit(state, id)) != NULL) {
-               unsigned char *hdr = p1;
+       for (;;) {
+               /* a loop in the terminal chain? */
+               if (test_and_set_bit(id, state->termbitmap))
+                       return -EINVAL;
+
+               p1 = find_audio_control_unit(state, id);
+               if (!p1)
+                       break;
+
+               hdr = p1;
                term->id = id;
                switch (hdr[2]) {
                case UAC_INPUT_TERMINAL:
@@ -744,7 +754,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
                                /* call recursively to verify that the
                                 * referenced clock entity is valid */
-                               err = check_input_term(state, d->bCSourceID, term);
+                               err = __check_input_term(state, d->bCSourceID, term);
                                if (err < 0)
                                        return err;
 
@@ -759,8 +769,8 @@ static int check_input_term(struct mixer_build *state, int id,
                        } else { /* UAC_VERSION_3 */
                                struct uac3_input_terminal_descriptor *d = p1;
 
-                               err = check_input_term(state,
-                                                       d->bCSourceID, term);
+                               err = __check_input_term(state,
+                                                        d->bCSourceID, term);
                                if (err < 0)
                                        return err;
 
@@ -817,8 +827,8 @@ static int check_input_term(struct mixer_build *state, int id,
                        } else {
                                struct uac_selector_unit_descriptor *d = p1;
                                /* call recursively to retrieve channel info */
-                               err = check_input_term(state,
-                                                       d->baSourceID[0], term);
+                               err = __check_input_term(state,
+                                                        d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
                                /* virtual type */
@@ -881,6 +891,15 @@ static int check_input_term(struct mixer_build *state, int id,
        return -ENODEV;
 }
 
+
+static int check_input_term(struct mixer_build *state, int id,
+                           struct usb_audio_term *term)
+{
+       memset(term, 0, sizeof(*term));
+       memset(state->termbitmap, 0, sizeof(state->termbitmap));
+       return __check_input_term(state, id, term);
+}
+
 /*
  * Feature Unit
  */
@@ -1886,7 +1905,8 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                    NUM_CHANNELS_MONO : NUM_CHANNELS_STEREO;
        } else {
                if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
-               !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
+                   desc->bLength < sizeof(*desc) + desc->bNrInPins ||
+                   !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
                        usb_audio_err(state->chip,
                                      "invalid MIXER UNIT descriptor %d\n",
                                      unitid);
index 1774800..8340086 100644 (file)
@@ -878,7 +878,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
        int sn_offset = 0;
        int error = 0;
        char *buffer;
-       struct hv_kvp_ipaddr_value *ip_buffer;
+       struct hv_kvp_ipaddr_value *ip_buffer = NULL;
        char cidr_mask[5]; /* /xyz */
        int weight;
        int i;
@@ -1379,6 +1379,8 @@ int main(int argc, char *argv[])
                        daemonize = 0;
                        break;
                case 'h':
+                       print_usage(argv);
+                       exit(0);
                default:
                        print_usage(argv);
                        exit(EXIT_FAILURE);
index 5d51d6f..b5465f9 100644 (file)
@@ -164,6 +164,8 @@ int main(int argc, char *argv[])
                        daemonize = 0;
                        break;
                case 'h':
+                       print_usage(argv);
+                       exit(0);
                default:
                        print_usage(argv);
                        exit(EXIT_FAILURE);
index 5eb6793..2d0dcd6 100644 (file)
@@ -163,9 +163,9 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
                        *be = (endianchar == 'b');
                        *bytes = padint / 8;
                        if (*bits_used == 64)
-                               *mask = ~0;
+                               *mask = ~(0ULL);
                        else
-                               *mask = (1ULL << *bits_used) - 1;
+                               *mask = (1ULL << *bits_used) - 1ULL;
 
                        *is_signed = (signchar == 's');
                        if (fclose(sysfsfp)) {
index df41dee..3bfba81 100644 (file)
@@ -370,8 +370,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
 
        /* Allocate and initialize all memory on CPU#0: */
        if (init_cpu0) {
-               orig_mask = bind_to_node(0);
-               bind_to_memnode(0);
+               int node = numa_node_of_cpu(0);
+
+               orig_mask = bind_to_node(node);
+               bind_to_memnode(node);
        }
 
        bytes = bytes0 + HPSIZE;
index 9d4ac90..66fb1d5 100644 (file)
@@ -613,6 +613,16 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
 
                ret = perf_add_probe_events(params.events, params.nevents);
                if (ret < 0) {
+
+                       /*
+                        * When perf_add_probe_events() fails it calls
+                        * cleanup_perf_probe_events(pevs, npevs), i.e.
+                        * cleanup_perf_probe_events(params.events, params.nevents), which
+                        * will call clear_perf_probe_event(), so set nevents to zero
+                        * to avoid cleanup_params() to call clear_perf_probe_event() again
+                        * on the same pevs.
+                        */
+                       params.nevents = 0;
                        pr_err_with_code("  Error: Failed to add events.", ret);
                        return ret;
                }
index 145050e..195ba31 100644 (file)
@@ -49,7 +49,7 @@ static void *thread_fn(void *arg)
 {
        struct thread_data *td = arg;
        ssize_t ret;
-       int go;
+       int go = 0;
 
        if (thread_init(td))
                return NULL;
index 9aa1004..448b43c 100644 (file)
@@ -492,6 +492,9 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
 {
        char bf[128];
 
+       if (!evsel)
+               goto out_unknown;
+
        if (evsel->name)
                return evsel->name;
 
@@ -528,7 +531,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
 
        evsel->name = strdup(bf);
 
-       return evsel->name ?: "unknown";
+       if (evsel->name)
+               return evsel->name;
+out_unknown:
+       return "unknown";
 }
 
 const char *perf_evsel__group_name(struct perf_evsel *evsel)
index 304f5d7..81ceb4a 100644 (file)
@@ -2591,6 +2591,13 @@ int perf_session__read_header(struct perf_session *session)
                           file->path);
        }
 
+       if (f_header.attr_size == 0) {
+               pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+                      "Was the 'perf record' command properly terminated?\n",
+                      file->path);
+               return -EINVAL;
+       }
+
        nr_attrs = f_header.attrs.size / f_header.attr_size;
        lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -2673,7 +2680,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
        size += sizeof(struct perf_event_header);
        size += ids * sizeof(u64);
 
-       ev = malloc(size);
+       ev = zalloc(size);
 
        if (ev == NULL)
                return -ENOMEM;
index 829508a..45c19b4 100644 (file)
@@ -110,14 +110,24 @@ struct comm *thread__comm(const struct thread *thread)
 
 struct comm *thread__exec_comm(const struct thread *thread)
 {
-       struct comm *comm, *last = NULL;
+       struct comm *comm, *last = NULL, *second_last = NULL;
 
        list_for_each_entry(comm, &thread->comm_list, list) {
                if (comm->exec)
                        return comm;
+               second_last = last;
                last = comm;
        }
 
+       /*
+        * 'last' with no start time might be the parent's comm of a synthesized
+        * thread (created by processing a synthesized fork event). For a main
+        * thread, that is very probably wrong. Prefer a later comm to avoid
+        * that case.
+        */
+       if (second_last && !last->start && thread->pid_ == thread->tid)
+               return second_last;
+
        return last;
 }
 
index 0fbd1a2..2f86935 100644 (file)
@@ -306,6 +306,8 @@ int cmd_freq_set(int argc, char **argv)
                                bitmask_setbit(cpus_chosen, cpus->cpu);
                                cpus = cpus->next;
                        }
+                       /* Set the last cpu in related cpus list */
+                       bitmask_setbit(cpus_chosen, cpus->cpu);
                        cpufreq_put_related_cpus(cpus);
                }
        }
index 532e7bf..58cf161 100644 (file)
@@ -3014,7 +3014,7 @@ int initialize_counters(int cpu_id)
 
 void allocate_output_buffer()
 {
-       output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+       output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
        outp = output_buffer;
        if (outp == NULL)
                err(-1, "calloc output buffer");
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644 (file)
index 0000000..63ed533
--- /dev/null
@@ -0,0 +1,3 @@
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
index 571c1ce..5c1efb8 100644 (file)
@@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
        return 1;
 }
 
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
 {
        struct kvm_coalesced_mmio_ring *ring;
        unsigned avail;
@@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
         * there is always one unused entry in the buffer
         */
        ring = dev->kvm->coalesced_mmio_ring;
-       avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+       avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
        if (avail == 0) {
                /* full */
                return 0;
@@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
 {
        struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
        struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+       __u32 insert;
 
        if (!coalesced_mmio_in_range(dev, addr, len))
                return -EOPNOTSUPP;
 
        spin_lock(&dev->kvm->ring_lock);
 
-       if (!coalesced_mmio_has_room(dev)) {
+       insert = READ_ONCE(ring->last);
+       if (!coalesced_mmio_has_room(dev, insert) ||
+           insert >= KVM_COALESCED_MMIO_MAX) {
                spin_unlock(&dev->kvm->ring_lock);
                return -EOPNOTSUPP;
        }
 
        /* copy data in first free entry of the ring */
 
-       ring->coalesced_mmio[ring->last].phys_addr = addr;
-       ring->coalesced_mmio[ring->last].len = len;
-       memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+       ring->coalesced_mmio[insert].phys_addr = addr;
+       ring->coalesced_mmio[insert].len = len;
+       memcpy(ring->coalesced_mmio[insert].data, val, len);
        smp_wmb();
-       ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+       ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
        spin_unlock(&dev->kvm->ring_lock);
        return 0;
 }