OSDN Git Service

Merge branch 'thermal-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Jan 2015 06:20:26 +0000 (19:20 +1300)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Jan 2015 06:20:26 +0000 (19:20 +1300)
Pull thermal fixes from Zhang Rui:
 "Specifics:

   - bogus type qualifier fix in OF thermal code.
   - Minor fixes on imx and rcar thermal drivers.
   - Update TI SoC thermal maintainer entry.
   - Updated documentation of OF cpufreq cooling register"

* 'thermal-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux:
  thermal: rcar: Spelling/grammar: s/drier use .../driver uses ...s/
  thermal: rcar: change type of ctemp in rcar_thermal_update_temp()
  thermal: rcar: fix ENR register value
  Documentation: thermal: document of_cpufreq_cooling_register()
  Thermal: imx: add clk disable/enable for suspend/resume
  MAINTAINERS: update ti-soc-thermal status
  MAINTAINERS: Add linux-omap to list of reviewers for TI Thermal
  thermal: of: Remove bogus type qualifier for of_thermal_get_trip_points()

311 files changed:
.mailmap
Documentation/networking/ip-sysctl.txt
Documentation/target/tcm_mod_builder.py
MAINTAINERS
Makefile
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/vf610-twr.dts
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/perf_regs.c
arch/arm/mm/dump.c
arch/arm/mm/init.c
arch/arm/mm/mmu.c
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/unistd.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/module.c
arch/arm64/kernel/perf_regs.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/reset.c
arch/blackfin/mach-bf533/boards/stamp.c
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/entry.S
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/platforms/pseries/lpar.c
arch/s390/hypfs/hypfs_vm.c
arch/s390/include/asm/irqflags.h
arch/s390/include/asm/timex.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/syscalls.S
arch/s390/kernel/uprobes.c
arch/s390/kernel/vtime.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit_comp.c
arch/x86/boot/Makefile
arch/x86/crypto/Makefile
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
arch/x86/include/asm/vgtod.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/mkcapflags.sh
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/perf_regs.c
arch/x86/lib/insn.c
arch/x86/mm/init.c
arch/x86/vdso/vma.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/time.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
block/blk-timeout.c
drivers/Makefile
drivers/acpi/acpi_processor.c
drivers/acpi/device_pm.c
drivers/acpi/int340x_thermal.c
drivers/acpi/processor_core.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/virtio_blk.c
drivers/char/ipmi/ipmi_ssif.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-grgpio.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/core/core/event.c
drivers/gpu/drm/nouveau/core/core/notify.c
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/dce3_1_afmt.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-kye.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/rockchip-iommu.c
drivers/isdn/hardware/eicon/message.c
drivers/leds/leds-netxbig.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/at91_ether.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/team/team.c
drivers/net/usb/kaweth.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-st.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/qla2xxx/qla_os.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_user.c
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
drivers/thermal/int340x_thermal/processor_thermal_device.c
drivers/vfio/pci/vfio_pci.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
fs/btrfs/backref.c
fs/btrfs/delayed-inode.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/ceph/addr.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/resize.c
fs/ext4/super.c
fs/fcntl.c
fs/locks.c
fs/nfsd/nfs4state.c
fs/notify/fanotify/fanotify_user.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/namei.c
include/acpi/processor.h
include/asm-generic/tlb.h
include/linux/acpi.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/ceph/osd_client.h
include/linux/compiler.h
include/linux/fs.h
include/linux/kdb.h
include/linux/mm.h
include/linux/mmc/sdhci.h
include/linux/netdevice.h
include/linux/perf_event.h
include/linux/perf_regs.h
include/linux/rmap.h
include/linux/writeback.h
include/net/mac80211.h
include/target/target_core_backend.h
include/target/target_core_backend_configfs.h
include/target/target_core_base.h
include/uapi/asm-generic/fcntl.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/openvswitch.h
include/xen/interface/nmi.h [new file with mode: 0644]
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_bp.c
kernel/debug/kdb/kdb_debugger.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/exit.c
kernel/locking/mutex-debug.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/trace/trace_kdb.c
lib/Kconfig.kgdb
lib/assoc_array.c
mm/Kconfig.debug
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/page-writeback.c
mm/rmap.c
mm/vmscan.c
net/batman-adv/multicast.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/bridge/br_input.c
net/ceph/auth_x.c
net/ceph/mon_client.c
net/core/neighbour.c
net/ipv4/netfilter/nft_redir_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/netfilter/nft_redir_ipv6.c
net/mac80211/key.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_nat.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/vport.c
net/packet/af_packet.c
net/sunrpc/xdr.c
net/tipc/bcast.c
scripts/Makefile.clean
security/keys/gc.c
sound/firewire/fireworks/fireworks_transaction.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_sigmatel.c
sound/usb/caiaq/audio.c
tools/lib/lockdep/preload.c
tools/perf/builtin-annotate.c
tools/perf/builtin-diff.c
tools/perf/builtin-list.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_output.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/hist.c
tools/perf/ui/tui/setup.c
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/testing/selftests/exec/execveat.c
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/vm/Makefile

index ada8ad6..d357e1b 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
+Henrik Rydberg <rydberg@bitmath.org>
 Herbert Xu <herbert@gondor.apana.org.au>
 Jacob Shin <Jacob.Shin@amd.com>
 James Bottomley <jejb@mulgrave.(none)>
index 9bffdfc..85b0221 100644 (file)
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
+       From linux kernel 3.6 onwards, this is deprecated for ipv4
+       as route cache is no longer used.
 
 neigh/default/gc_thresh1 - INTEGER
        Minimum number of entries to keep.  Garbage collector will not
index 230ce71..2b47704 100755 (executable)
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .release_cmd                    = " + fabric_mod_name + "_release_cmd,\n"
        buf += "        .shutdown_session               = " + fabric_mod_name + "_shutdown_session,\n"
        buf += "        .close_session                  = " + fabric_mod_name + "_close_session,\n"
-       buf += "        .stop_session                   = " + fabric_mod_name + "_stop_session,\n"
-       buf += "        .fall_back_to_erl0              = " + fabric_mod_name + "_reset_nexus,\n"
-       buf += "        .sess_logged_in                 = " + fabric_mod_name + "_sess_logged_in,\n"
        buf += "        .sess_get_index                 = " + fabric_mod_name + "_sess_get_index,\n"
        buf += "        .sess_get_initiator_sid         = NULL,\n"
        buf += "        .write_pending                  = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .queue_data_in                  = " + fabric_mod_name + "_queue_data_in,\n"
        buf += "        .queue_status                   = " + fabric_mod_name + "_queue_status,\n"
        buf += "        .queue_tm_rsp                   = " + fabric_mod_name + "_queue_tm_rsp,\n"
-       buf += "        .is_state_remove                = " + fabric_mod_name + "_is_state_remove,\n"
+       buf += "        .aborted_task                   = " + fabric_mod_name + "_aborted_task,\n"
        buf += "        /*\n"
        buf += "         * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
        buf += "         */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        /*\n"
        buf += "         * Register the top level struct config_item_type with TCM core\n"
        buf += "         */\n"
-       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
        buf += "        if (IS_ERR(fabric)) {\n"
        buf += "                printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
        buf += "                return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                if re.search('get_fabric_name', fo):
                        buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
                        buf += "{\n"
-                       buf += "        return \"" + fabric_mod_name[4:] + "\";\n"
+                       buf += "        return \"" + fabric_mod_name + "\";\n"
                        buf += "}\n\n"
                        bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
                        continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
 
-               if re.search('stop_session\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
-               if re.search('fall_back_to_erl0\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
-               if re.search('sess_logged_in\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return 0;\n"
-                       buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
                if re.search('sess_get_index\)\(', fo):
                        buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
                        buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
 
                if re.search('queue_tm_rsp\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+                       buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+                       bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
 
-               if re.search('is_state_remove\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+               if re.search('aborted_task\)\(', fo):
+                       buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+                       bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
 
        ret = p.write(buf)
        if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
        tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
        tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
 
index bc91817..ab6610e 100644 (file)
@@ -724,15 +724,15 @@ F:        include/uapi/linux/apm_bios.h
 F:     drivers/char/apm-emulation.c
 
 APPLE BCM5974 MULTITOUCH DRIVER
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     linux-input@vger.kernel.org
-S:     Maintained
+S:     Odd fixes
 F:     drivers/input/mouse/bcm5974.c
 
 APPLE SMC DRIVER
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     lm-sensors@lm-sensors.org
-S:     Maintained
+S:     Odd fixes
 F:     drivers/hwmon/applesmc.c
 
 APPLETALK NETWORK LAYER
@@ -2259,6 +2259,7 @@ F:        drivers/gpio/gpio-bt8xx.c
 BTRFS FILE SYSTEM
 M:     Chris Mason <clm@fb.com>
 M:     Josef Bacik <jbacik@fb.com>
+M:     David Sterba <dsterba@suse.cz>
 L:     linux-btrfs@vger.kernel.org
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -4748,7 +4749,7 @@ S:        Supported
 F:     drivers/scsi/ipr.*
 
 IBM Power Virtual Ethernet Device Driver
-M:     Santiago Leon <santil@linux.vnet.ibm.com>
+M:     Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
@@ -4940,10 +4941,10 @@ F:      include/uapi/linux/input.h
 F:     include/linux/input/
 
 INPUT MULTITOUCH (MT) PROTOCOL
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     linux-input@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
-S:     Maintained
+S:     Odd fixes
 F:     Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
@@ -5279,6 +5280,15 @@ W:       www.open-iscsi.org
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/ulp/iser/
 
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M:     Sagi Grimberg <sagig@mellanox.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L:     linux-rdma@vger.kernel.org
+L:     target-devel@vger.kernel.org
+S:     Supported
+W:     http://www.linux-iscsi.org
+F:     drivers/infiniband/ulp/isert
+
 ISDN SUBSYSTEM
 M:     Karsten Keil <isdn@linux-pingi.de>
 L:     isdn4linux@listserv.isdn4linux.de (subscribers-only)
index ef748e1..e41a335 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE    := \
 # Needed to be compatible with the O= option
 LINUXINCLUDE    := \
                -I$(srctree)/arch/$(hdr-arch)/include \
+               -Iarch/$(hdr-arch)/include/generated/uapi \
                -Iarch/$(hdr-arch)/include/generated \
                $(if $(KBUILD_SRC), -I$(srctree)/include) \
                -Iinclude \
index 1e6e5cc..8c1febd 100644 (file)
        pinctrl-0 = <&pinctrl_enet1>;
        phy-supply = <&reg_enet_3v3>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy1>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy1: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy2: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet2>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy2>;
        status = "okay";
 };
 
index a0f7621..f2b64b1 100644 (file)
 
 &fec0 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy0>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec0>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy0: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec1 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy1>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec1>;
        status = "okay";
index 705bb76..0c3f5a0 100644 (file)
 #define __NR_getrandom                 (__NR_SYSCALL_BASE+384)
 #define __NR_memfd_create              (__NR_SYSCALL_BASE+385)
 #define __NR_bpf                       (__NR_SYSCALL_BASE+386)
+#define __NR_execveat                  (__NR_SYSCALL_BASE+387)
 
 /*
  * The following SWIs are ARM private.
index e51833f..05745eb 100644 (file)
                CALL(sys_getrandom)
 /* 385 */      CALL(sys_memfd_create)
                CALL(sys_bpf)
+               CALL(sys_execveat)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 6e4379c..592dda3 100644 (file)
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_32;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
index 5942493..9fe8e24 100644 (file)
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
        static const char units[] = "KMGTPE";
        u64 prot = val & pg_level[level].mask;
 
-       if (addr < USER_PGTABLES_CEILING)
-               return;
-
        if (!st->level) {
                st->level = level;
                st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
        pgd_t *pgd = swapper_pg_dir;
        struct pg_state st;
        unsigned long addr;
-       unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
+       unsigned i;
 
        memset(&st, 0, sizeof(st));
        st.seq = m;
        st.marker = address_markers;
 
-       pgd += pgdoff;
-
-       for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
+       for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
                addr = i * PGDIR_SIZE;
                if (!pgd_none(*pgd)) {
                        walk_pud(&st, pgd, addr);
index 98ad9c7..2495c8c 100644 (file)
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-               .mask   = ~PMD_SECT_RDONLY,
-               .prot   = PMD_SECT_RDONLY,
+               .mask   = ~L_PMD_SECT_RDONLY,
+               .prot   = L_PMD_SECT_RDONLY,
 #else
                .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
                .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
index cda7c40..4e6ef89 100644 (file)
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
-       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
index b1fa4e6..fbe0ca3 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <asm/barrier.h>
 
+#include <linux/bug.h>
 #include <linux/init.h>
 #include <linux/types.h>
 
index ace7068..8e797b2 100644 (file)
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
        u64             reg_id_aa64pfr0;
        u64             reg_id_aa64pfr1;
 
+       u32             reg_id_dfr0;
        u32             reg_id_isar0;
        u32             reg_id_isar1;
        u32             reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
        u32             reg_id_mmfr3;
        u32             reg_id_pfr0;
        u32             reg_id_pfr1;
+
+       u32             reg_mvfr0;
+       u32             reg_mvfr1;
+       u32             reg_mvfr2;
 };
 
 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
index 8127e45..865a7e2 100644 (file)
@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+               vcpu->arch.hcr_el2 &= ~HCR_RW;
 }
 
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
index 286b1be..f9be30e 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
 
@@ -123,9 +124,6 @@ struct task_struct;
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk)   do { } while (0)
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()                    barrier()
index 49c9aef..b780c6c 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           386
+#define __NR_compat_syscalls           387
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 57b6417..07d435c 100644 (file)
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
         * If we have AArch32, we care about 32-bit features for compat. These
         * registers should be RES0 otherwise.
         */
+       diff |= CHECK(id_dfr0, boot, cur, cpu);
        diff |= CHECK(id_isar0, boot, cur, cpu);
        diff |= CHECK(id_isar1, boot, cur, cpu);
        diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
        diff |= CHECK(id_pfr0, boot, cur, cpu);
        diff |= CHECK(id_pfr1, boot, cur, cpu);
 
+       diff |= CHECK(mvfr0, boot, cur, cpu);
+       diff |= CHECK(mvfr1, boot, cur, cpu);
+       diff |= CHECK(mvfr2, boot, cur, cpu);
+
        /*
         * Mismatched CPU features are a recipe for disaster. Don't even
         * pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
        info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
 
+       info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
        info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
        info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
        info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
        info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
 
+       info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+       info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+       info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+
        cpuinfo_detect_icache_policy(info);
 
        check_local_cpu_errata();
index 6fac253..2bb4347 100644 (file)
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
 
        /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
        efi_setup_idmap();
+       early_memunmap(memmap.map, memmap.map_end - memmap.map);
 }
 
 static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
        }
 
        mapsize = memmap.map_end - memmap.map;
-       early_memunmap(memmap.map, mapsize);
 
        if (efi_runtime_disabled()) {
                pr_info("EFI runtime services will be disabled.\n");
index fd027b1..9b6f71d 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mm.h>
 #include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
+#include <asm/alternative.h>
 #include <asm/insn.h>
 #include <asm/sections.h>
 
index 6762ad7..3f62b35 100644 (file)
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
        else
                return PERF_SAMPLE_REGS_ABI_64;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
index b809911..20fe293 100644 (file)
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
        request_standard_resources();
 
        efi_idmap_init();
+       early_ioremap_reset();
 
        unflatten_device_tree();
 
index 4f93c67..14944e5 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cpu_ops.h>
 #include <asm/cputype.h>
+#include <asm/io.h>
 #include <asm/smp_plat.h>
 
 extern void secondary_holding_pen(void);
index fbe909f..c3ca89c 100644 (file)
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
+       lsr     x1, x1, #12
        tlbi    ipas2e1is, x1
        /*
         * We have to ensure completion of the invalidation at Stage-2,
index 70a7816..0b43265 100644 (file)
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                        if (!cpu_has_32bit_el1())
                                return -EINVAL;
                        cpu_reset = &default_regs_reset32;
-                       vcpu->arch.hcr_el2 &= ~HCR_RW;
                } else {
                        cpu_reset = &default_regs_reset;
                }
index 6f4bac9..23eada7 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
index f3b51b5..95c39b9 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    318 /* length of syscall table */
+#define NR_syscalls                    319 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 4c2240c..4610795 100644 (file)
 #define __NR_getrandom                 1339
 #define __NR_memfd_create              1340
 #define __NR_bpf                       1341
+#define __NR_execveat                  1342
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index 615ef81..e795cb8 100644 (file)
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
 {
        return _acpi_map_lsapic(handle, physid, pcpu);
 }
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
 
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
 {
        ia64_cpu_to_sapicid[cpu] = -1;
        set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
 
        return (0);
 }
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
 #endif                         /* CONFIG_ACPI_HOTPLUG_CPU */
 
 #ifdef CONFIG_ACPI_NUMA
index f5e96df..fcf8b8c 100644 (file)
@@ -1779,6 +1779,7 @@ sys_call_table:
        data8 sys_getrandom
        data8 sys_memfd_create                  // 1340
        data8 sys_bpf
+       data8 sys_execveat
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 19c36cb..a46f5f4 100644 (file)
@@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
 extern void reserve_crashkernel(void);
 extern void machine_kexec_mask_interrupts(void);
 
+static inline bool kdump_in_progress(void)
+{
+       return crashing_cpu >= 0;
+}
+
 #else /* !CONFIG_KEXEC */
 static inline void crash_kexec_secondary(struct pt_regs *regs) { }
 
@@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
        return 0;
 }
 
+static inline bool kdump_in_progress(void)
+{
+       return false;
+}
+
 #endif /* CONFIG_KEXEC */
 #endif /* ! __ASSEMBLY__ */
 #endif /* __KERNEL__ */
index ce9577d..91062ee 100644 (file)
@@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
 SYSCALL_SPU(getrandom)
 SYSCALL_SPU(memfd_create)
 SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
index e0da021..36b79c3 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          362
+#define __NR_syscalls          363
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index f55351f..ef5b5b1 100644 (file)
 #define __NR_getrandom         359
 #define __NR_memfd_create      360
 #define __NR_bpf               361
+#define __NR_execveat          362
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 879b3aa..f96d1ec 100644 (file)
@@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
         * using debugger IPI.
         */
 
-       if (crashing_cpu == -1)
+       if (!kdump_in_progress())
                kexec_prepare_cpus();
 
        pr_debug("kexec: Starting switchover sequence.\n");
index 8ec017c..8b2d2dc 100644 (file)
@@ -700,6 +700,7 @@ void start_secondary(void *unused)
        smp_store_cpu_info(cpu);
        set_dec(tb_ticks_per_jiffy);
        preempt_disable();
+       cpu_callin_map[cpu] = 1;
 
        if (smp_ops->setup_cpu)
                smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@ void start_secondary(void *unused)
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
 
-       /*
-        * CPU must be marked active and online before we signal back to the
-        * master, because the scheduler needs to see the cpu_online and
-        * cpu_active bits set.
-        */
-       smp_wmb();
-       cpu_callin_map[cpu] = 1;
-
        local_irq_enable();
 
        cpu_startup_entry(CPUHP_ONLINE);
index 469751d..b5682fd 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/trace.h>
 #include <asm/firmware.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
 #include <asm/fadump.h>
 
 #include "pseries.h"
@@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
                 * out to the user, but at least this will stop us from
                 * continuing on further and creating an even more
                 * difficult to debug situation.
+                *
+                * There is a known problem when kdump'ing, if cpus are offline
+                * the above call will fail. Rather than panicking again, keep
+                * going and hope the kdump kernel is also little endian, which
+                * it usually is.
                 */
-               if (rc)
+               if (rc && !kdump_in_progress())
                        panic("Could not enable big endian exceptions");
        }
 #endif
index 32040ac..afbe079 100644 (file)
@@ -231,7 +231,7 @@ failed:
 struct dbfs_d2fc_hdr {
        u64     len;            /* Length of d2fc buffer without header */
        u16     version;        /* Version of header */
-       char    tod_ext[16];    /* TOD clock for d2fc */
+       char    tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
        u64     count;          /* Number of VM guests in d2fc buffer */
        char    reserved[30];
 } __attribute__ ((packed));
index 37b9091..16aa0c7 100644 (file)
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
-       return __arch_local_irq_stosm(0x00);
+       return __arch_local_irq_stnsm(0xff);
 }
 
 static inline notrace unsigned long arch_local_irq_save(void)
index 8beee1c..98eb2a5 100644 (file)
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
        set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
-#define CLOCK_TICK_RATE        1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE                1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE   16      /* stcke writes 16 bytes */
 
 typedef unsigned long long cycles_t;
 
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
 {
-       typedef struct { char _[sizeof(clk)]; } addrtype;
+       typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
 
        asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
 }
 
 static inline unsigned long long get_tod_clock(void)
 {
-       unsigned char clk[16];
+       unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
index 2b446cf..67878af 100644 (file)
 #define __NR_bpf               351
 #define __NR_s390_pci_mmio_write       352
 #define __NR_s390_pci_mmio_read                353
-#define NR_syscalls 354
+#define __NR_execveat          354
+#define NR_syscalls 355
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index a298724..939ec47 100644 (file)
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
 SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
index f6b3cd0..cc73280 100644 (file)
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
        return false;
 }
 
+static int check_per_event(unsigned short cause, unsigned long control,
+                          struct pt_regs *regs)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return 0;
+       /* user space single step */
+       if (control == 0)
+               return 1;
+       /* over indication for storage alteration */
+       if ((control & 0x20200000) && (cause & 0x2000))
+               return 1;
+       if (cause & 0x8000) {
+               /* all branches */
+               if ((control & 0x80800000) == 0x80000000)
+                       return 1;
+               /* branch into selected range */
+               if (((control & 0x80800000) == 0x80800000) &&
+                   regs->psw.addr >= current->thread.per_user.start &&
+                   regs->psw.addr <= current->thread.per_user.end)
+                       return 1;
+       }
+       return 0;
+}
+
 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 {
        int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
                if (regs->psw.addr - utask->xol_vaddr == ilen)
                        regs->psw.addr = utask->vaddr + ilen;
        }
-       /* If per tracing was active generate trap */
-       if (regs->psw.mask & PSW_MASK_PER)
-               do_per_trap(regs);
+       if (check_per_event(current->thread.per_event.cause,
+                           current->thread.per_user.control, regs)) {
+               /* fix per address */
+               current->thread.per_event.address = utask->vaddr;
+               /* trigger per event */
+               set_pt_regs_flag(regs, PIF_PER_TRAP);
+       }
        return 0;
 }
 
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
        clear_thread_flag(TIF_UPROBE_SINGLESTEP);
        regs->int_code = auprobe->saved_int_code;
        regs->psw.addr = current->utask->vaddr;
+       current->thread.per_event.address = current->utask->vaddr;
 }
 
 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
        __rc;                                           \
 })
 
-#define emu_store_ril(ptr, input)                      \
+#define emu_store_ril(regs, ptr, input)                        \
 ({                                                     \
        unsigned int mask = sizeof(*(ptr)) - 1;         \
+       __typeof__(ptr) __ptr = (ptr);                  \
        int __rc = 0;                                   \
                                                        \
        if (!test_facility(34))                         \
                __rc = EMU_ILLEGAL_OP;                  \
-       else if ((u64 __force)ptr & mask)               \
+       else if ((u64 __force)__ptr & mask)             \
                __rc = EMU_SPECIFICATION;               \
-       else if (put_user(*(input), ptr))               \
+       else if (put_user(*(input), __ptr))             \
                __rc = EMU_ADDRESSING;                  \
+       if (__rc == 0)                                  \
+               sim_stor_event(regs, __ptr, mask + 1);  \
        __rc;                                           \
 })
 
@@ -198,6 +230,25 @@ union split_register {
 };
 
 /*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return;
+       if (!(current->thread.per_user.control & PER_EVENT_STORE))
+               return;
+       if ((void *)current->thread.per_user.start > (addr + len))
+               return;
+       if ((void *)current->thread.per_user.end < addr)
+               return;
+       current->thread.per_event.address = regs->psw.addr;
+       current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+       set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
+/*
  * pc relative instructions are emulated, since parameters may not be
  * accessible from the xol area due to range limitations.
  */
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
                        rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
                        break;
                case 0x07: /* sthrl */
-                       rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+                       rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
                        break;
                case 0x0b: /* stgrl */
-                       rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+                       rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
                        break;
                case 0x0f: /* strl */
-                       rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+                       rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
                        break;
                }
                break;
index 7f0089d..e34122e 100644 (file)
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, system;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        timer = S390_lowcore.last_update_timer;
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
index be99357..3cf8cc0 100644 (file)
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
 {
        struct page *page;
-       unsigned long offset;
+       unsigned long offset, mask;
 
        offset = (unsigned long) entry / sizeof(unsigned long);
        offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
-       page = pmd_to_page((pmd_t *) entry);
+       mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+       page = virt_to_page((void *)((unsigned long) entry & mask));
        return page->index + offset;
 }
 
index c52ac77..524496d 100644 (file)
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
                EMIT4_DISP(0x88500000, K);
                break;
        case BPF_ALU | BPF_NEG: /* A = -A */
-               /* lnr %r5,%r5 */
-               EMIT2(0x1155);
+               /* lcr %r5,%r5 */
+               EMIT2(0x1355);
                break;
        case BPF_JMP | BPF_JA: /* ip += K */
                offset = addrs[i + K] + jit->start - jit->prg;
@@ -502,8 +502,8 @@ branch:             if (filter->jt == filter->jf) {
 xbranch:       /* Emit compare if the branch targets are different */
                if (filter->jt != filter->jf) {
                        jit->seen |= SEEN_XREG;
-                       /* cr %r5,%r12 */
-                       EMIT2(0x195c);
+                       /* clr %r5,%r12 */
+                       EMIT2(0x155c);
                }
                goto branch;
        case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
index 5b016e2..3db07f3 100644 (file)
@@ -51,6 +51,7 @@ targets += cpustr.h
 $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
        $(call if_changed,cpustr)
 endif
+clean-files += cpustr.h
 
 # ---------------------------------------------------------------------------
 
index fd0f848..5a4a089 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
 obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
 obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
 ifeq ($(avx2_supported),yes)
        obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
        obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
+       obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
 endif
 
 aes-i586-y := aes-i586-asm_32.o aes_glue.o
index 2df2a02..a916c4a 100644 (file)
@@ -208,7 +208,7 @@ ddq_add_8:
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 3*16(p_keys), xkeyA
+                       vmovdqa 3*16(p_keys), xkey4
                .endif
        .else
                vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
        add     $(16*by), p_in
 
        .if (klen == KEY_128)
-               vmovdqa 4*16(p_keys), xkey4
+               vmovdqa 4*16(p_keys), xkeyB
        .else
                .if (load_keys)
                        vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyA, var_xdata, var_xdata             /* key 3 */
+               /* key 3 */
+               .if (klen == KEY_128)
+                       vaesenc xkey4, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyA, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -243,13 +248,18 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkey4, var_xdata, var_xdata             /* key 4 */
+               /* key 4 */
+               .if (klen == KEY_128)
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkey4, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 6*16(p_keys), xkeyB
+                       vmovdqa 6*16(p_keys), xkey8
                .endif
        .else
                vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyB, var_xdata, var_xdata             /* key 6 */
+               /* key 6 */
+               .if (klen == KEY_128)
+                       vaesenc xkey8, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
        .if (klen == KEY_128)
-               vmovdqa 8*16(p_keys), xkey8
+               vmovdqa 8*16(p_keys), xkeyB
        .else
                .if (load_keys)
                        vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 9*16(p_keys), xkeyA
+                       vmovdqa 9*16(p_keys), xkey12
                .endif
        .else
                vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkey8, var_xdata, var_xdata             /* key 8 */
+               /* key 8 */
+               .if (klen == KEY_128)
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkey8, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -306,7 +326,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyA, var_xdata, var_xdata             /* key 9 */
+               /* key 9 */
+               .if (klen == KEY_128)
+                       vaesenc xkey12, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyA, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -412,7 +437,6 @@ ddq_add_8:
 /* main body of aes ctr load */
 
 .macro do_aes_ctrmain key_len
-
        cmp     $16, num_bytes
        jb      .Ldo_return2\key_len
 
index e7e9682..f556c48 100644 (file)
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
 
        /*
         * Load per CPU data from GDT.  LSL is faster than RDTSCP and
-        * works on all CPUs.
+        * works on all CPUs.  This is volatile so that it orders
+        * correctly wrt barrier() and to keep gcc from cleverly
+        * hoisting it out of the calling function.
         */
-       asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
 
        return p;
 }
index 4433a4b..d162636 100644 (file)
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
 {
        return _acpi_map_lsapic(handle, physid, pcpu);
 }
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
 
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
 {
 #ifdef CONFIG_ACPI_NUMA
        set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
 
        return (0);
 }
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
 #endif                         /* CONFIG_ACPI_HOTPLUG_CPU */
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
index e27b49d..80091ae 100644 (file)
@@ -66,3 +66,4 @@ targets += capflags.c
 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
        $(call if_changed,mkcapflags)
 endif
+clean-files += capflags.c
index e2b22df..36d99a3 100644 (file)
@@ -28,7 +28,7 @@ function dump_array()
                # If the /* comment */ starts with a quote string, grab that.
                VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
                [ -z "$VALUE" ] && VALUE="\"$NAME\""
-               [ "$VALUE" == '""' ] && continue
+               [ "$VALUE" = '""' ] && continue
 
                # Name is uppercase, VALUE is all lowercase
                VALUE="$(echo "$VALUE" | tr A-Z a-z)"
index 18eb78b..863d9b0 100644 (file)
@@ -17,7 +17,7 @@
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX       2
+#define UNCORE_EXTRA_PCI_DEV_MAX       3
 
 /* support up to 8 sockets */
 #define UNCORE_SOCKET_MAX              8
index 745b158..21af614 100644 (file)
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
 enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
+       HSWEP_PCI_PCU_3,
 };
 
 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
 {
        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+       /* Detect 6-8 core systems with only two SBOXes */
+       if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+               u32 capid4;
+
+               pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+                                     0x94, &capid4);
+               if (((capid4 >> 6) & 0x3) == 0)
+                       hswep_uncore_sbox.num_boxes = 2;
+       }
+
        uncore_msr_uncores = hswep_msr_uncores;
 }
 
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
                                                   SNBEP_PCI_QPI_PORT1_FILTER),
        },
+       { /* PCU.3 (for Capability registers) */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  HSWEP_PCI_PCU_3),
+       },
        { /* end: all zeroes */ }
 };
 
index e309cc5..781861c 100644 (file)
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_32;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
 #else /* CONFIG_X86_64 */
 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
                       (1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
        else
                return PERF_SAMPLE_REGS_ABI_64;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       struct pt_regs *user_regs = task_pt_regs(current);
+
+       /*
+        * If we're in an NMI that interrupted task_pt_regs setup, then
+        * we can't sample user regs at all.  This check isn't really
+        * sufficient, though, as we could be in an NMI inside an interrupt
+        * that happened during task_pt_regs setup.
+        */
+       if (regs->sp > (unsigned long)&user_regs->r11 &&
+           regs->sp <= (unsigned long)(user_regs + 1)) {
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+               regs_user->regs = NULL;
+               return;
+       }
+
+       /*
+        * RIP, flags, and the argument registers are usually saved.
+        * orig_ax is probably okay, too.
+        */
+       regs_user_copy->ip = user_regs->ip;
+       regs_user_copy->cx = user_regs->cx;
+       regs_user_copy->dx = user_regs->dx;
+       regs_user_copy->si = user_regs->si;
+       regs_user_copy->di = user_regs->di;
+       regs_user_copy->r8 = user_regs->r8;
+       regs_user_copy->r9 = user_regs->r9;
+       regs_user_copy->r10 = user_regs->r10;
+       regs_user_copy->r11 = user_regs->r11;
+       regs_user_copy->orig_ax = user_regs->orig_ax;
+       regs_user_copy->flags = user_regs->flags;
+
+       /*
+        * Don't even try to report the "rest" regs.
+        */
+       regs_user_copy->bx = -1;
+       regs_user_copy->bp = -1;
+       regs_user_copy->r12 = -1;
+       regs_user_copy->r13 = -1;
+       regs_user_copy->r14 = -1;
+       regs_user_copy->r15 = -1;
+
+       /*
+        * For this to be at all useful, we need a reasonable guess for
+        * sp and the ABI.  Be careful: we're in NMI context, and we're
+        * considering current to be the current task, so we should
+        * be careful not to look at any other percpu variables that might
+        * change during context switches.
+        */
+       if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+           task_thread_info(current)->status & TS_COMPAT) {
+               /* Easy case: we're in a compat syscall. */
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
+               regs_user_copy->sp = user_regs->sp;
+               regs_user_copy->cs = user_regs->cs;
+               regs_user_copy->ss = user_regs->ss;
+       } else if (user_regs->orig_ax != -1) {
+               /*
+                * We're probably in a 64-bit syscall.
+                * Warning: this code is severely racy.  At least it's better
+                * than just blindly copying user_regs.
+                */
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
+               regs_user_copy->sp = this_cpu_read(old_rsp);
+               regs_user_copy->cs = __USER_CS;
+               regs_user_copy->ss = __USER_DS;
+               regs_user_copy->cx = -1;  /* usually contains garbage */
+       } else {
+               /* We're probably in an interrupt or exception. */
+               regs_user->abi = user_64bit_mode(user_regs) ?
+                       PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
+               regs_user_copy->sp = user_regs->sp;
+               regs_user_copy->cs = user_regs->cs;
+               regs_user_copy->ss = user_regs->ss;
+       }
+
+       regs_user->regs = regs_user_copy;
+}
 #endif /* CONFIG_X86_32 */
index 2480978..1313ae6 100644 (file)
@@ -28,7 +28,7 @@
 
 /* Verify next sizeof(t) bytes can be on the same instruction */
 #define validate_next(t, insn, n)      \
-       ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
+       ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
        ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
index a97ee08..08a7d31 100644 (file)
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
 static unsigned long __init get_new_step_size(unsigned long step_size)
 {
        /*
-        * Explain why we shift by 5 and why we don't have to worry about
-        * 'step_size << 5' overflowing:
-        *
-        * initial mapped size is PMD_SIZE (2M).
+        * Initial mapped size is PMD_SIZE (2M).
         * We can not set step_size to be PUD_SIZE (1G) yet.
         * In worse case, when we cross the 1G boundary, and
         * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
-        * to map 1G range with PTE. Use 5 as shift for now.
+        * to map 1G range with PTE. Hence we use one less than the
+        * difference of page table level shifts.
         *
-        * Don't need to worry about overflow, on 32bit, when step_size
-        * is 0, round_down() returns 0 for start, and that turns it
-        * into 0x100000000ULL.
+        * Don't need to worry about overflow in the top-down case, on 32bit,
+        * when step_size is 0, round_down() returns 0 for start, and that
+        * turns it into 0x100000000ULL.
+        * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+        * needs to be taken into consideration by the code below.
         */
-       return step_size << 5;
+       return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
 }
 
 /**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
        unsigned long step_size;
        unsigned long addr;
        unsigned long mapped_ram_size = 0;
-       unsigned long new_mapped_ram_size;
 
        /* xen has big range in reserved near end of ram, skip it at first.*/
        addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
                                start = map_start;
                } else
                        start = map_start;
-               new_mapped_ram_size = init_range_memory_mapping(start,
+               mapped_ram_size += init_range_memory_mapping(start,
                                                        last_start);
                last_start = start;
                min_pfn_mapped = last_start >> PAGE_SHIFT;
-               /* only increase step_size after big range get mapped */
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 
        if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
 static void __init memory_map_bottom_up(unsigned long map_start,
                                        unsigned long map_end)
 {
-       unsigned long next, new_mapped_ram_size, start;
+       unsigned long next, start;
        unsigned long mapped_ram_size = 0;
        /* step_size need to be small so pgt_buf from BRK could cover it */
        unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
         * for page table.
         */
        while (start < map_end) {
-               if (map_end - start > step_size) {
+               if (step_size && map_end - start > step_size) {
                        next = round_up(start + 1, step_size);
                        if (next > map_end)
                                next = map_end;
-               } else
+               } else {
                        next = map_end;
+               }
 
-               new_mapped_ram_size = init_range_memory_mapping(start, next);
+               mapped_ram_size += init_range_memory_mapping(start, next);
                start = next;
 
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 }
 
index 009495b..1c9f750 100644 (file)
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
 
 struct linux_binprm;
 
-/* Put the vdso above the (randomized) stack with another randomized offset.
-   This way there is no hole in the middle of address space.
-   To save memory make sure it is still in the same PTE as the stack top.
-   This doesn't give that many random bits.
-
-   Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
 #ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
 #else
        unsigned long addr, end;
        unsigned offset;
-       end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+       /*
+        * Round up the start address.  It can start out unaligned as a result
+        * of stack start randomization.
+        */
+       start = PAGE_ALIGN(start);
+
+       /* Round the lowest possible end address up to a PMD boundary. */
+       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
        if (end >= TASK_SIZE_MAX)
                end = TASK_SIZE_MAX;
        end -= len;
-       /* This loses some more bits than a modulo, but is cheaper */
-       offset = get_random_int() & (PTRS_PER_PTE - 1);
-       addr = start + (offset << PAGE_SHIFT);
-       if (addr >= end)
-               addr = end;
+
+       if (end > start) {
+               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+               addr = start + (offset << PAGE_SHIFT);
+       } else {
+               addr = start;
+       }
 
        /*
-        * page-align it here so that get_unmapped_area doesn't
-        * align it wrongfully again to the next page. addr can come in 4K
-        * unaligned here as a result of stack start randomization.
+        * Forcibly align the final address in case we have a hardware
+        * issue that requires alignment for performance reasons.
         */
-       addr = PAGE_ALIGN(addr);
        addr = align_vdso_addr(addr);
 
        return addr;
index 6bf3a13..78a881b 100644 (file)
@@ -40,6 +40,7 @@
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
 #include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
 #include <xen/interface/xen-mca.h>
 #include <xen/features.h>
 #include <xen/page.h>
@@ -66,6 +67,7 @@
 #include <asm/reboot.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
        .emergency_restart = xen_emergency_restart,
 };
 
+static unsigned char xen_get_nmi_reason(void)
+{
+       unsigned char reason = 0;
+
+       /* Construct a value which looks like it came from port 0x61. */
+       if (test_bit(_XEN_NMIREASON_io_error,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_IOCHK;
+       if (test_bit(_XEN_NMIREASON_pci_serr,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_SERR;
+
+       return reason;
+}
+
 static void __init xen_boot_params_init_edd(void)
 {
 #if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
        pv_apic_ops = xen_apic_ops;
-       if (!xen_pvh_domain())
+       if (!xen_pvh_domain()) {
                pv_cpu_ops = xen_cpu_ops;
 
+               x86_platform.get_nmi_reason = xen_get_nmi_reason;
+       }
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
        else
index edbc7a6..70fb507 100644 (file)
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
        return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
 }
 
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
 {
-       BUG_ON(!slab_is_available());
+       if (unlikely(!slab_is_available())) {
+               free_bootmem((unsigned long)p, PAGE_SIZE);
+               return;
+       }
+
        free_page((unsigned long)p);
 }
 
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
                        p2m_missing_pte : p2m_identity_pte;
                for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
                        pmdp = populate_extra_pmd(
-                               (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+                               (unsigned long)(p2m + pfn) + i * PMD_SIZE);
                        set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
                }
        }
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
  * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
  */
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
 {
        pte_t *ptechk;
-       pte_t *pteret = ptep;
        pte_t *pte_newpg[PMDS_PER_MID_PAGE];
        pmd_t *pmdp;
        unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                if (ptechk == pte_pg) {
                        set_pmd(pmdp,
                                __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
-                       if (vaddr == (addr & ~(PMD_SIZE - 1)))
-                               pteret = pte_offset_kernel(pmdp, addr);
                        pte_newpg[i] = NULL;
                }
 
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                vaddr += PMD_SIZE;
        }
 
-       return pteret;
+       return lookup_address(addr, &level);
 }
 
 /*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
 
        if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
                /* PMD level is missing, allocate a new one */
-               ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+               ptep = alloc_p2m_pmd(addr, pte_pg);
                if (!ptep)
                        return false;
        }
index dfd77de..865e56c 100644 (file)
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
 {
        int i;
-       unsigned long addr = PFN_PHYS(pfn);
+       phys_addr_t addr = PFN_PHYS(pfn);
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
                if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
        int i;
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               if (!xen_extra_mem[i].size)
+                       continue;
                pfn_s = PFN_DOWN(xen_extra_mem[i].start);
                pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
                for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
  * as a fallback if the remapping fails.
  */
 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
-       unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
-       unsigned long *released)
+       unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
 {
-       unsigned long len = 0;
        unsigned long pfn, end;
        int ret;
 
        WARN_ON(start_pfn > end_pfn);
 
+       /* Release pages first. */
        end = min(end_pfn, nr_pages);
        for (pfn = start_pfn; pfn < end; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
                WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
                if (ret == 1) {
+                       (*released)++;
                        if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
                                break;
-                       len++;
                } else
                        break;
        }
 
-       /* Need to release pages first */
-       *released += len;
-       *identity += set_phys_range_identity(start_pfn, end_pfn);
+       set_phys_range_identity(start_pfn, end_pfn);
 }
 
 /*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
        }
 
        /* Update kernel mapping, but not for highmem. */
-       if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+       if (pfn >= PFN_UP(__pa(high_memory - 1)))
                return;
 
        if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
        unsigned long ident_pfn_iter, remap_pfn_iter;
        unsigned long ident_end_pfn = start_pfn + size;
        unsigned long left = size;
-       unsigned long ident_cnt = 0;
        unsigned int i, chunk;
 
        WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
                xen_remap_mfn = mfn;
 
                /* Set identity map */
-               ident_cnt += set_phys_range_identity(ident_pfn_iter,
-                       ident_pfn_iter + chunk);
+               set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
 
                left -= chunk;
        }
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
 static unsigned long __init xen_set_identity_and_remap_chunk(
         const struct e820entry *list, size_t map_size, unsigned long start_pfn,
        unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
-       unsigned long *identity, unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        unsigned long pfn;
        unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Do not remap pages beyond the current allocation */
                if (cur_pfn >= nr_pages) {
                        /* Identity map remaining pages */
-                       *identity += set_phys_range_identity(cur_pfn,
-                               cur_pfn + size);
+                       set_phys_range_identity(cur_pfn, cur_pfn + size);
                        break;
                }
                if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                if (!remap_range_size) {
                        pr_warning("Unable to find available pfn range, not remapping identity pages\n");
                        xen_set_identity_and_release_chunk(cur_pfn,
-                               cur_pfn + left, nr_pages, identity, released);
+                               cur_pfn + left, nr_pages, released);
                        break;
                }
                /* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Update variables to reflect new mappings. */
                i += size;
                remap_pfn += size;
-               *identity += size;
+               *remapped += size;
        }
 
        /*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
 
 static void __init xen_set_identity_and_remap(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages,
-       unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        phys_addr_t start = 0;
-       unsigned long identity = 0;
        unsigned long last_pfn = nr_pages;
        const struct e820entry *entry;
        unsigned long num_released = 0;
+       unsigned long num_remapped = 0;
        int i;
 
        /*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
                                last_pfn = xen_set_identity_and_remap_chunk(
                                                list, map_size, start_pfn,
                                                end_pfn, nr_pages, last_pfn,
-                                               &identity, &num_released);
+                                               &num_released, &num_remapped);
                        start = end;
                }
        }
 
        *released = num_released;
+       *remapped = num_remapped;
 
-       pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
        pr_info("Released %ld page(s)\n", num_released);
 }
 
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
        struct xen_memory_map memmap;
        unsigned long max_pages;
        unsigned long extra_pages = 0;
+       unsigned long remapped_pages;
        int i;
        int op;
 
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
         * underlying RAM.
         */
        xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
-                                  &xen_released_pages);
+                                  &xen_released_pages, &remapped_pages);
 
        extra_pages += xen_released_pages;
+       extra_pages += remapped_pages;
 
        /*
         * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
index f473d26..6908734 100644 (file)
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
 
 struct xen_clock_event_device {
        struct clock_event_device evt;
-       char *name;
+       char name[16];
 };
 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
 
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
        if (evt->irq >= 0) {
                unbind_from_irqhandler(evt->irq, NULL);
                evt->irq = -1;
-               kfree(per_cpu(xen_clock_events, cpu).name);
-               per_cpu(xen_clock_events, cpu).name = NULL;
        }
 }
 
 void xen_setup_timer(int cpu)
 {
-       char *name;
-       struct clock_event_device *evt;
+       struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+       struct clock_event_device *evt = &xevt->evt;
        int irq;
 
-       evt = &per_cpu(xen_clock_events, cpu).evt;
        WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
        if (evt->irq >= 0)
                xen_teardown_timer(cpu);
 
        printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
 
-       name = kasprintf(GFP_KERNEL, "timer%d", cpu);
-       if (!name)
-               name = "<timer kasprintf failed>";
+       snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
 
        irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
                                      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
                                      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
-                                     name, NULL);
+                                     xevt->name, NULL);
        (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
 
        memcpy(evt, xen_clockevent, sizeof(*evt));
 
        evt->cpumask = cpumask_of(cpu);
        evt->irq = irq;
-       per_cpu(xen_clock_events, cpu).name = name;
 }
 
 
 void xen_setup_cpu_clockevents(void)
 {
-       BUG_ON(preemptible());
-
        clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
 }
 
index 30f6153..3ad4055 100644 (file)
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
+void blk_set_queue_dying(struct request_queue *q)
+{
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+       if (q->mq_ops)
+               blk_mq_wake_waiters(q);
+       else {
+               struct request_list *rl;
+
+               blk_queue_for_each_rl(rl, q) {
+                       if (rl->rq_pool) {
+                               wake_up(&rl->wait[BLK_RW_SYNC]);
+                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       blk_set_queue_dying(q);
        spin_lock_irq(lock);
 
        /*
index 32e8dbb..60c9d4a 100644 (file)
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 }
 
 /*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
  */
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 {
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
 
                wake_index = bt_index_inc(wake_index);
        }
+
+       if (include_reserve) {
+               bt = &tags->breserved_tags;
+               if (waitqueue_active(&bt->bs[0].wait))
+                       wake_up(&bt->bs[0].wait);
+       }
 }
 
 /*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 
        atomic_dec(&tags->active_queues);
 
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
 }
 
 /*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
         * static and should never need resizing.
         */
        bt_update_count(&tags->bitmap_tags, tdepth);
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
        return 0;
 }
 
index 6206ed1..a6fa0fc 100644 (file)
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index da1ab56..2f95747 100644 (file)
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        bool freeze;
 
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
                blk_mq_run_queues(q, false);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
        blk_mq_freeze_queue_wait(q);
 }
 
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        bool wake;
 
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
                wake_up_all(&q->mq_freeze_wq);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       queue_for_each_hw_ctx(q, hctx, i)
+               if (blk_mq_hw_queue_mapped(hctx))
+                       blk_mq_tag_wakeup_all(hctx->tags, true);
+
+       /*
+        * If we are called because the queue has now been marked as
+        * dying, we need to ensure that processes currently waiting on
+        * the queue are notified as well.
+        */
+       wake_up_all(&q->mq_freeze_wq);
+}
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 {
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
-       if (!rq)
+       if (!rq) {
+               blk_mq_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
+       }
        return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+int blk_mq_request_started(struct request *rq)
+{
+       return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
 void blk_mq_start_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 }
 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+       cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
        kblockd_schedule_work(&q->requeue_work);
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+       unsigned long flags;
+       LIST_HEAD(rq_list);
+
+       spin_lock_irqsave(&q->requeue_lock, flags);
+       list_splice_init(&q->requeue_list, &rq_list);
+       spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+       while (!list_empty(&rq_list)) {
+               struct request *rq;
+
+               rq = list_first_entry(&rq_list, struct request, queuelist);
+               list_del_init(&rq->queuelist);
+               rq->errors = -EIO;
+               blk_mq_end_request(rq, rq->errors);
+       }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
 static inline bool is_flush_request(struct request *rq,
                struct blk_flush_queue *fq, unsigned int tag)
 {
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
                break;
        }
 }
-               
+
 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                struct request *rq, void *priv, bool reserved)
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+               /*
+                * If a request wasn't started before the queue was
+                * marked dying, kill it here or it'll go unnoticed.
+                */
+               if (unlikely(blk_queue_dying(rq->q))) {
+                       rq->errors = -EIO;
+                       blk_mq_complete_request(rq);
+               }
+               return;
+       }
+       if (rq->cmd_flags & REQ_NO_TIMEOUT)
                return;
 
        if (time_after_eq(jiffies, rq->deadline)) {
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
        hctx->flags = set->flags;
-       hctx->cmd_size = set->cmd_size;
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
index 206230e..4f4f943 100644 (file)
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,
                struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
index 56c0258..246dfb1 100644 (file)
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
        struct request_queue *q = req->q;
        unsigned long expiry;
 
+       if (req->cmd_flags & REQ_NO_TIMEOUT)
+               return;
+
        /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
        if (!q->mq_ops && !q->rq_timed_out_fn)
                return;
index 67d2334..527a6da 100644 (file)
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER)       += reset/
 obj-y                          += tty/
 obj-y                          += char/
 
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT)    += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
 obj-y                          += gpu/
 
 obj-$(CONFIG_CONNECTOR)                += connector/
@@ -141,7 +144,6 @@ obj-y                               += clk/
 
 obj-$(CONFIG_MAILBOX)          += mailbox/
 obj-$(CONFIG_HWSPINLOCK)       += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT)    += iommu/
 obj-$(CONFIG_REMOTEPROC)       += remoteproc/
 obj-$(CONFIG_RPMSG)            += rpmsg/
 
index 1fdf5e0..1020b1b 100644 (file)
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        acpi_status status;
        int ret;
 
-       if (pr->apic_id == -1)
+       if (pr->phys_id == -1)
                return -ENODEV;
 
        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        cpu_maps_update_begin();
        cpu_hotplug_begin();
 
-       ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+       ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
        if (ret)
                goto out;
 
        ret = arch_register_cpu(pr->id);
        if (ret) {
-               acpi_unmap_lsapic(pr->id);
+               acpi_unmap_cpu(pr->id);
                goto out;
        }
 
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
        union acpi_object object = { 0 };
        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
        struct acpi_processor *pr = acpi_driver_data(device);
-       int apic_id, cpu_index, device_declaration = 0;
+       int phys_id, cpu_index, device_declaration = 0;
        acpi_status status = AE_OK;
        static int cpu0_initialized;
        unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->acpi_id = value;
        }
 
-       apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
-       if (apic_id < 0)
-               acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
-       pr->apic_id = apic_id;
+       phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+       if (phys_id < 0)
+               acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+       pr->phys_id = phys_id;
 
-       cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+       cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
        if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
                cpu0_initialized = 1;
-               /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+               /*
+                * Handle UP system running SMP kernel, with no CPU
+                * entry in MADT
+                */
                if ((cpu_index == -1) && (num_online_cpus() == 1))
                        cpu_index = 0;
        }
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
 
        /* Remove the CPU. */
        arch_unregister_cpu(pr->id);
-       acpi_unmap_lsapic(pr->id);
+       acpi_unmap_cpu(pr->id);
 
        cpu_hotplug_done();
        cpu_maps_update_done();
index c2daa85..c0d44d3 100644 (file)
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
 
        device->power.state = ACPI_STATE_UNKNOWN;
        if (!acpi_device_is_present(device))
-               return 0;
+               return -ENXIO;
 
        result = acpi_device_get_power(device, &state);
        if (result)
index a27d31d..9dcf836 100644 (file)
 
 #include "internal.h"
 
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
 static const struct acpi_device_id int340x_thermal_device_ids[] = {
-       {"INT3400", DO_ENUMERATION },
-       {"INT3401"},
+       {"INT3400"},
+       {"INT3401", INT3401_DEVICE},
        {"INT3402"},
        {"INT3403"},
        {"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
                                        const struct acpi_device_id *id)
 {
 #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
-       if (id->driver_data == DO_ENUMERATION)
+       acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+       /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+       if (id->driver_data == INT3401_DEVICE)
                acpi_create_platform_device(adev);
 #endif
        return 1;
index 342942f..02e4839 100644 (file)
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
        unsigned long madt_end, entry;
        static struct acpi_table_madt *madt;
        static int read_madt;
-       int apic_id = -1;
+       int phys_id = -1;       /* CPU hardware ID */
 
        if (!read_madt) {
                if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
        }
 
        if (!madt)
-               return apic_id;
+               return phys_id;
 
        entry = (unsigned long)madt;
        madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
                struct acpi_subtable_header *header =
                        (struct acpi_subtable_header *)entry;
                if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-                       if (!map_lapic_id(header, acpi_id, &apic_id))
+                       if (!map_lapic_id(header, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-                       if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+                       if (!map_x2apic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-                       if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+                       if (!map_lsapic_id(header, type, acpi_id, &phys_id))
                                break;
                }
                entry += header->length;
        }
-       return apic_id;
+       return phys_id;
 }
 
 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
        struct acpi_subtable_header *header;
-       int apic_id = -1;
+       int phys_id = -1;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
                goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 
        header = (struct acpi_subtable_header *)obj->buffer.pointer;
        if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
-               map_lapic_id(header, acpi_id, &apic_id);
+               map_lapic_id(header, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
-               map_lsapic_id(header, type, acpi_id, &apic_id);
+               map_lsapic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
-               map_x2apic_id(header, type, acpi_id, &apic_id);
+               map_x2apic_id(header, type, acpi_id, &phys_id);
 
 exit:
        kfree(buffer.pointer);
-       return apic_id;
+       return phys_id;
 }
 
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
 {
-       int apic_id;
+       int phys_id;
 
-       apic_id = map_mat_entry(handle, type, acpi_id);
-       if (apic_id == -1)
-               apic_id = map_madt_entry(type, acpi_id);
+       phys_id = map_mat_entry(handle, type, acpi_id);
+       if (phys_id == -1)
+               phys_id = map_madt_entry(type, acpi_id);
 
-       return apic_id;
+       return phys_id;
 }
 
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
 {
 #ifdef CONFIG_SMP
        int i;
 #endif
 
-       if (apic_id == -1) {
+       if (phys_id == -1) {
                /*
                 * On UP processor, there is no _MAT or MADT table.
-                * So above apic_id is always set to -1.
+                * So above phys_id is always set to -1.
                 *
                 * BIOS may define multiple CPU handles even for UP processor.
                 * For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always returns 0 for the processor
+                * Ignores phys_id and always returns 0 for the processor
                 * handle with acpi id 0 if nr_cpu_ids is 1.
                 * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
-                       return apic_id;
+                       return phys_id;
        }
 
 #ifdef CONFIG_SMP
        for_each_possible_cpu(i) {
-               if (cpu_physical_id(i) == apic_id)
+               if (cpu_physical_id(i) == phys_id)
                        return i;
        }
 #else
        /* In UP kernel, only processor 0 is valid */
-       if (apic_id == 0)
-               return apic_id;
+       if (phys_id == 0)
+               return phys_id;
 #endif
        return -1;
 }
 
 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 {
-       int apic_id;
+       int phys_id;
 
-       apic_id = acpi_get_apicid(handle, type, acpi_id);
+       phys_id = acpi_get_phys_id(handle, type, acpi_id);
 
-       return acpi_map_cpuid(apic_id, acpi_id);
+       return acpi_map_cpuid(phys_id, acpi_id);
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
index 16914cc..dc4d896 100644 (file)
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
        if (device->wakeup.flags.valid)
                acpi_power_resources_list_free(&device->wakeup.resources);
 
-       if (!device->flags.power_manageable)
+       if (!device->power.flags.power_resources)
                return;
 
        for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
                        device->power.flags.power_resources)
                device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
 
-       if (acpi_bus_init_power(device)) {
-               acpi_free_power_resources_lists(device);
+       if (acpi_bus_init_power(device))
                device->flags.power_manageable = 0;
-       }
 }
 
 static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
        /* Skip devices that are not present. */
        if (!acpi_device_is_present(device)) {
                device->flags.visited = false;
+               device->flags.power_manageable = 0;
                return;
        }
        if (device->handler)
                goto ok;
 
        if (!device->flags.initialized) {
-               acpi_bus_update_power(device, NULL);
+               device->flags.power_manageable =
+                       device->power.states[ACPI_STATE_D0].flags.valid;
+               if (acpi_bus_init_power(device))
+                       device->flags.power_manageable = 0;
+
                device->flags.initialized = true;
        }
        device->flags.visited = false;
index c72e79d..032db45 100644 (file)
@@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
                },
        },
+
+       {
+        /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+        .callback = video_disable_native_backlight,
+        .ident = "Dell XPS15 L521X",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+               },
+       },
        {}
 };
 
index ae9f615..aa2224a 100644 (file)
@@ -530,7 +530,7 @@ static int null_add_dev(void)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q) {
+               if (IS_ERR(nullb->q)) {
                        rv = -ENOMEM;
                        goto out_cleanup_tags;
                }
index b1d5d87..cb529e9 100644 (file)
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
        cmd->fn = handler;
        cmd->ctx = ctx;
        cmd->aborted = 0;
+       blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
 }
 
 /* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        if (unlikely(status)) {
                if (!(status & NVME_SC_DNR || blk_noretry_request(req))
                    && (jiffies - req->start_time) < req->timeout) {
+                       unsigned long flags;
+
                        blk_mq_requeue_request(req);
-                       blk_mq_kick_requeue_list(req->q);
+                       spin_lock_irqsave(req->q->queue_lock, flags);
+                       if (!blk_queue_stopped(req->q))
+                               blk_mq_kick_requeue_list(req->q);
+                       spin_unlock_irqrestore(req->q->queue_lock, flags);
                        return;
                }
                req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
        }
 
-       blk_mq_start_request(req);
-
        nvme_set_info(cmd, iod, req_completion);
        spin_lock_irq(&nvmeq->q_lock);
        if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       req->cmd_flags |= REQ_NO_TIMEOUT;
        cmd_info = blk_mq_rq_to_pdu(req);
        nvme_set_info(cmd_info, req, async_req_completion);
 
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
        struct nvme_command cmd;
 
        if (!nvmeq->qid || cmd_rq->aborted) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dev_list_lock, flags);
                if (work_busy(&dev->reset_work))
-                       return;
+                       goto out;
                list_del_init(&dev->node);
                dev_warn(&dev->pci_dev->dev,
                        "I/O %d QID %d timeout, reset controller\n",
                                                        req->tag, nvmeq->qid);
                dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
+ out:
+               spin_unlock_irqrestore(&dev_list_lock, flags);
                return;
        }
 
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
        void *ctx;
        nvme_completion_fn fn;
        struct nvme_cmd_info *cmd;
-       static struct nvme_completion cqe = {
-               .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
-       };
+       struct nvme_completion cqe;
+
+       if (!blk_mq_request_started(req))
+               return;
 
        cmd = blk_mq_rq_to_pdu(req);
 
        if (cmd->ctx == CMD_CTX_CANCELLED)
                return;
 
+       if (blk_queue_dying(req->q))
+               cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+       else
+               cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
        dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
                                                req->tag, nvmeq->qid);
        ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       if (nvmeq->dev->initialized)
-               nvme_abort_req(req);
-
        /*
         * The aborted req will be completed on receiving the abort req.
         * We enable the timer again. If hit twice, it'll cause a device reset,
         * as the device then is in a faulty state.
         */
-       return BLK_EH_RESET_TIMER;
+       int ret = BLK_EH_RESET_TIMER;
+
+       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+                                                       nvmeq->qid);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       if (!nvmeq->dev->initialized) {
+               /*
+                * Force cancelled command frees the request, which requires we
+                * return BLK_EH_NOT_HANDLED.
+                */
+               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+               ret = BLK_EH_NOT_HANDLED;
+       } else
+               nvme_abort_req(req);
+       spin_unlock_irq(&nvmeq->q_lock);
+
+       return ret;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  */
 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-       int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+       int vector;
 
        spin_lock_irq(&nvmeq->q_lock);
+       if (nvmeq->cq_vector == -1) {
+               spin_unlock_irq(&nvmeq->q_lock);
+               return 1;
+       }
+       vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
        nvmeq->dev->online_queues--;
+       nvmeq->cq_vector = -1;
        spin_unlock_irq(&nvmeq->q_lock);
 
        irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
                adapter_delete_sq(dev, qid);
                adapter_delete_cq(dev, qid);
        }
+       if (!qid && dev->admin_q)
+               blk_mq_freeze_queue_start(dev->admin_q);
        nvme_clear_queue(nvmeq);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
-                                                       int depth, int vector)
+                                                       int depth)
 {
        struct device *dmadev = &dev->pci_dev->dev;
        struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
-       nvmeq->cq_vector = vector;
        nvmeq->qid = qid;
        dev->queue_count++;
        dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        struct nvme_dev *dev = nvmeq->dev;
        int result;
 
+       nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
                return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
        .timeout        = nvme_timeout,
 };
 
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+       if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+               blk_cleanup_queue(dev->admin_q);
+               blk_mq_free_tag_set(&dev->admin_tagset);
+       }
+}
+
 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 {
        if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                        return -ENOMEM;
 
                dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
-               if (!dev->admin_q) {
+               if (IS_ERR(dev->admin_q)) {
                        blk_mq_free_tag_set(&dev->admin_tagset);
                        return -ENOMEM;
                }
-       }
+               if (!blk_get_queue(dev->admin_q)) {
+                       nvme_dev_remove_admin(dev);
+                       return -ENODEV;
+               }
+       } else
+               blk_mq_unfreeze_queue(dev->admin_q);
 
        return 0;
 }
 
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
-       if (dev->admin_q)
-               blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
        int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 
        nvmeq = dev->queues[0];
        if (!nvmeq) {
-               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
                if (!nvmeq)
                        return -ENOMEM;
        }
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                goto free_nvmeq;
 
-       result = nvme_alloc_admin_tags(dev);
-       if (result)
-               goto free_nvmeq;
-
+       nvmeq->cq_vector = 0;
        result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result)
-               goto free_tags;
+               goto free_nvmeq;
 
        return result;
 
- free_tags:
-       nvme_free_admin_tags(dev);
  free_nvmeq:
        nvme_free_queues(dev, 0);
        return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
        unsigned i;
 
        for (i = dev->queue_count; i <= dev->max_qid; i++)
-               if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+               if (!nvme_alloc_queue(dev, i, dev->q_depth))
                        break;
 
        for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
                        break;
                if (!schedule_timeout(ADMIN_TIMEOUT) ||
                                        fatal_signal_pending(current)) {
+                       /*
+                        * Disable the controller first since we can't trust it
+                        * at this point, but leave the admin queue enabled
+                        * until all queue deletion requests are flushed.
+                        * FIXME: This may take a while if there are more h/w
+                        * queues than admin tags.
+                        */
                        set_current_state(TASK_RUNNING);
-
                        nvme_disable_ctrl(dev, readq(&dev->bar->cap));
-                       nvme_disable_queue(dev, 0);
-
-                       send_sig(SIGKILL, dq->worker->task, 1);
+                       nvme_clear_queue(dev->queues[0]);
                        flush_kthread_worker(dq->worker);
+                       nvme_disable_queue(dev, 0);
                        return;
                }
        }
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
 {
        struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
                                                        cmdinfo.work);
-       allow_signal(SIGKILL);
        if (nvme_delete_sq(nvmeq))
                nvme_del_queue_end(nvmeq);
 }
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
                kthread_stop(tmp);
 }
 
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               blk_mq_freeze_queue_start(ns->queue);
+
+               spin_lock(ns->queue->queue_lock);
+               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+               spin_unlock(ns->queue->queue_lock);
+
+               blk_mq_cancel_requeue_work(ns->queue);
+               blk_mq_stop_hw_queues(ns->queue);
+       }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+               blk_mq_unfreeze_queue(ns->queue);
+               blk_mq_start_stopped_hw_queues(ns->queue, true);
+               blk_mq_kick_requeue_list(ns->queue);
+       }
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
        int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
-       if (dev->bar)
+       if (dev->bar) {
+               nvme_freeze_queues(dev);
                csts = readl(&dev->bar->csts);
+       }
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        nvme_dev_unmap(dev);
 }
 
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
-       if (dev->admin_q && !blk_queue_dying(dev->admin_q))
-               blk_cleanup_queue(dev->admin_q);
-}
-
 static void nvme_dev_remove(struct nvme_dev *dev)
 {
        struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        list_for_each_entry(ns, &dev->namespaces, list) {
                if (ns->disk->flags & GENHD_FL_UP)
                        del_gendisk(ns->disk);
-               if (!blk_queue_dying(ns->queue))
+               if (!blk_queue_dying(ns->queue)) {
+                       blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
+               }
        }
 }
 
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
+       blk_put_queue(dev->admin_q);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
        }
 
        nvme_init_queue(dev->queues[0], 0);
+       result = nvme_alloc_admin_tags(dev);
+       if (result)
+               goto disable;
 
        result = nvme_setup_io_queues(dev);
        if (result)
-               goto disable;
+               goto free_tags;
 
        nvme_set_irq_hints(dev);
 
        return result;
 
+ free_tags:
+       nvme_dev_remove_admin(dev);
  disable:
        nvme_disable_queue(dev, 0);
        nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                dev->reset_workfn = nvme_remove_disks;
                queue_work(nvme_workq, &dev->reset_work);
                spin_unlock(&dev_list_lock);
+       } else {
+               nvme_unfreeze_queues(dev);
+               nvme_set_irq_hints(dev);
        }
        dev->initialized = 1;
        return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->reset_work);
        misc_deregister(&dev->miscdev);
-       nvme_dev_remove(dev);
        nvme_dev_shutdown(dev);
+       nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
-       nvme_free_admin_tags(dev);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
 }
index 7ef7c09..cdfbd21 100644 (file)
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_put_disk;
 
        q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
-       if (!q) {
+       if (IS_ERR(q)) {
                err = -ENOMEM;
                goto out_free_tags;
        }
index fd5a5e8..982b963 100644 (file)
@@ -969,7 +969,8 @@ static void sender(void                *send_info,
 
                do_gettimeofday(&t);
                pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
-                      msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+                      msg->data[0], msg->data[1],
+                      (long) t.tv_sec, (long) t.tv_usec);
        }
 }
 
index 978b51e..ce3c155 100644 (file)
 
 #define DLN2_GPIO_MAX_PINS 32
 
-struct dln2_irq_work {
-       struct work_struct work;
-       struct dln2_gpio *dln2;
-       int pin;
-       int type;
-};
-
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
         */
        DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
 
-       DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
-       struct dln2_irq_work *irq_work;
+       /* active IRQs - not synced to hardware */
+       DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+       /* active IRQS - synced to hardware */
+       DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+       int irq_type[DLN2_GPIO_MAX_PINS];
+       struct mutex irq_lock;
 };
 
 struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
        return !!ret;
 }
 
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
-                                     unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+                                    unsigned int pin, int value)
 {
        struct dln2_gpio_pin_val req = {
                .pin = cpu_to_le16(pin),
                .value = value,
        };
 
-       dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
-                        sizeof(req));
+       return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+                               sizeof(req));
 }
 
 #define DLN2_GPIO_DIRECTION_IN         0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                      int value)
 {
+       struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+       int ret;
+
+       ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+       if (ret < 0)
+               return ret;
+
        return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
 }
 
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
                                &req, sizeof(req));
 }
 
-static void dln2_irq_work(struct work_struct *w)
-{
-       struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
-       struct dln2_gpio *dln2 = iw->dln2;
-       u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
-       if (test_bit(iw->pin, dln2->irqs_enabled))
-               dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
-       else
-               dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       int pin = irqd_to_hwirq(irqd);
-
-       set_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       clear_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
+       set_bit(pin, dln2->unmasked_irqs);
 }
 
 static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       struct device *dev = dln2->gpio.dev;
-       int pin = irqd_to_hwirq(irqd);
-
-       if (test_and_clear_bit(pin, dln2->irqs_pending)) {
-               int irq;
-
-               irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
-               if (!irq) {
-                       dev_err(dev, "pin %d not mapped to IRQ\n", pin);
-                       return;
-               }
-
-               generic_handle_irq(irq);
-       }
+       clear_bit(pin, dln2->unmasked_irqs);
 }
 
 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
 
        switch (type) {
        case IRQ_TYPE_LEVEL_HIGH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
                break;
        case IRQ_TYPE_LEVEL_LOW:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
                break;
        case IRQ_TYPE_EDGE_BOTH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
                break;
        case IRQ_TYPE_EDGE_RISING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
                break;
        case IRQ_TYPE_EDGE_FALLING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
                break;
        default:
                return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
        return 0;
 }
 
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+       mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+       int pin = irqd_to_hwirq(irqd);
+       int enabled, unmasked;
+       unsigned type;
+       int ret;
+
+       enabled = test_bit(pin, dln2->enabled_irqs);
+       unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+       if (enabled != unmasked) {
+               if (unmasked) {
+                       type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+                       set_bit(pin, dln2->enabled_irqs);
+               } else {
+                       type = DLN2_GPIO_EVENT_NONE;
+                       clear_bit(pin, dln2->enabled_irqs);
+               }
+
+               ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+               if (ret)
+                       dev_err(dln2->gpio.dev, "failed to set event\n");
+       }
+
+       mutex_unlock(&dln2->irq_lock);
+}
+
 static struct irq_chip dln2_gpio_irqchip = {
        .name = "dln2-irq",
-       .irq_enable = dln2_irq_enable,
-       .irq_disable = dln2_irq_disable,
        .irq_mask = dln2_irq_mask,
        .irq_unmask = dln2_irq_unmask,
        .irq_set_type = dln2_irq_set_type,
+       .irq_bus_lock = dln2_irq_bus_lock,
+       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
 };
 
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                return;
        }
 
-       if (!test_bit(pin, dln2->irqs_enabled))
-               return;
-       if (test_bit(pin, dln2->irqs_masked)) {
-               set_bit(pin, dln2->irqs_pending);
-               return;
-       }
-
-       switch (dln2->irq_work[pin].type) {
+       switch (dln2->irq_type[pin]) {
        case DLN2_GPIO_EVENT_CHANGE_RISING:
                if (event->value)
                        generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        struct dln2_gpio *dln2;
        struct device *dev = &pdev->dev;
        int pins;
-       int i, ret;
+       int ret;
 
        pins = dln2_gpio_get_pin_count(pdev);
        if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        if (!dln2)
                return -ENOMEM;
 
-       dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
-                                     sizeof(struct dln2_irq_work), GFP_KERNEL);
-       if (!dln2->irq_work)
-               return -ENOMEM;
-       for (i = 0; i < pins; i++) {
-               INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
-               dln2->irq_work[i].pin = i;
-               dln2->irq_work[i].dln2 = dln2;
-       }
+       mutex_init(&dln2->irq_lock);
 
        dln2->pdev = pdev;
 
@@ -529,11 +510,8 @@ out:
 static int dln2_gpio_remove(struct platform_device *pdev)
 {
        struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
-       int i;
 
        dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
-       for (i = 0; i < dln2->gpio.ngpio; i++)
-               flush_work(&dln2->irq_work[i].work);
        gpiochip_remove(&dln2->gpio);
 
        return 0;
index 09daaf2..3a5a710 100644 (file)
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
        err = gpiochip_add(gc);
        if (err) {
                dev_err(&ofdev->dev, "Could not add gpiochip\n");
-               irq_domain_remove(priv->domain);
+               if (priv->domain)
+                       irq_domain_remove(priv->domain);
                return err;
        }
 
index 66e4039..e620807 100644 (file)
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
 obj-$(CONFIG_DRM_TTM)  += ttm/
 obj-$(CONFIG_DRM_TDFX) += tdfx/
 obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
 obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_MGA)  += mga/
 obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
 obj-y                  += i2c/
 obj-y                  += panel/
 obj-y                  += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
index 7d4974b..fcfdf23 100644 (file)
@@ -31,7 +31,6 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <linux/uaccess.h>
 #include <uapi/asm-generic/mman-common.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
@@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep)
        return 0;
 }
 
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
-                                       void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+                                       void *data)
 {
-       struct kfd_ioctl_get_version_args args;
+       struct kfd_ioctl_get_version_args *args = data;
        int err = 0;
 
-       args.major_version = KFD_IOCTL_MAJOR_VERSION;
-       args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
-       if (copy_to_user(arg, &args, sizeof(args)))
-               err = -EFAULT;
+       args->major_version = KFD_IOCTL_MAJOR_VERSION;
+       args->minor_version = KFD_IOCTL_MINOR_VERSION;
 
        return err;
 }
@@ -221,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
        return 0;
 }
 
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
-                                       void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+                                       void *data)
 {
-       struct kfd_ioctl_create_queue_args args;
+       struct kfd_ioctl_create_queue_args *args = data;
        struct kfd_dev *dev;
        int err = 0;
        unsigned int queue_id;
@@ -233,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
        memset(&q_properties, 0, sizeof(struct queue_properties));
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
        pr_debug("kfd: creating queue ioctl\n");
 
-       err = set_queue_properties_from_user(&q_properties, &args);
+       err = set_queue_properties_from_user(&q_properties, args);
        if (err)
                return err;
 
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
@@ -250,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
-               err = PTR_ERR(pdd);
+               err = -ESRCH;
                goto err_bind_process;
        }
 
@@ -263,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
        if (err != 0)
                goto err_create_queue;
 
-       args.queue_id = queue_id;
+       args->queue_id = queue_id;
 
        /* Return gpu_id as doorbell offset for mmap usage */
-       args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
-       if (copy_to_user(arg, &args, sizeof(args))) {
-               err = -EFAULT;
-               goto err_copy_args_out;
-       }
+       args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
 
        mutex_unlock(&p->mutex);
 
-       pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+       pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
 
        pr_debug("ring buffer address == 0x%016llX\n",
-                       args.ring_base_address);
+                       args->ring_base_address);
 
        pr_debug("read ptr address    == 0x%016llX\n",
-                       args.read_pointer_address);
+                       args->read_pointer_address);
 
        pr_debug("write ptr address   == 0x%016llX\n",
-                       args.write_pointer_address);
+                       args->write_pointer_address);
 
        return 0;
 
-err_copy_args_out:
-       pqm_destroy_queue(&p->pqm, queue_id);
 err_create_queue:
 err_bind_process:
        mutex_unlock(&p->mutex);
@@ -297,99 +283,90 @@ err_bind_process:
 }
 
 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
-                                       void __user *arg)
+                                       void *data)
 {
        int retval;
-       struct kfd_ioctl_destroy_queue_args args;
-
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
+       struct kfd_ioctl_destroy_queue_args *args = data;
 
        pr_debug("kfd: destroying queue id %d for PASID %d\n",
-                               args.queue_id,
+                               args->queue_id,
                                p->pasid);
 
        mutex_lock(&p->mutex);
 
-       retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+       retval = pqm_destroy_queue(&p->pqm, args->queue_id);
 
        mutex_unlock(&p->mutex);
        return retval;
 }
 
 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
-                                       void __user *arg)
+                                       void *data)
 {
        int retval;
-       struct kfd_ioctl_update_queue_args args;
+       struct kfd_ioctl_update_queue_args *args = data;
        struct queue_properties properties;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+       if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
                pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
                return -EINVAL;
        }
 
-       if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+       if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
                pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
                return -EINVAL;
        }
 
-       if ((args.ring_base_address) &&
+       if ((args->ring_base_address) &&
                (!access_ok(VERIFY_WRITE,
-                       (const void __user *) args.ring_base_address,
+                       (const void __user *) args->ring_base_address,
                        sizeof(uint64_t)))) {
                pr_err("kfd: can't access ring base address\n");
                return -EFAULT;
        }
 
-       if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+       if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
                pr_err("kfd: ring size must be a power of 2 or 0\n");
                return -EINVAL;
        }
 
-       properties.queue_address = args.ring_base_address;
-       properties.queue_size = args.ring_size;
-       properties.queue_percent = args.queue_percentage;
-       properties.priority = args.queue_priority;
+       properties.queue_address = args->ring_base_address;
+       properties.queue_size = args->ring_size;
+       properties.queue_percent = args->queue_percentage;
+       properties.priority = args->queue_priority;
 
        pr_debug("kfd: updating queue id %d for PASID %d\n",
-                       args.queue_id, p->pasid);
+                       args->queue_id, p->pasid);
 
        mutex_lock(&p->mutex);
 
-       retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+       retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
 
        mutex_unlock(&p->mutex);
 
        return retval;
 }
 
-static long kfd_ioctl_set_memory_policy(struct file *filep,
-                               struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+                                       struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_set_memory_policy_args args;
+       struct kfd_ioctl_set_memory_policy_args *args = data;
        struct kfd_dev *dev;
        int err = 0;
        struct kfd_process_device *pdd;
        enum cache_policy default_policy, alternate_policy;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
-           && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+       if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
                return -EINVAL;
        }
 
-       if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
-           && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+       if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
                return -EINVAL;
        }
 
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
@@ -397,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
-               err = PTR_ERR(pdd);
+               err = -ESRCH;
                goto out;
        }
 
-       default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+       default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                         ? cache_policy_coherent : cache_policy_noncoherent;
 
        alternate_policy =
-               (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+               (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                   ? cache_policy_coherent : cache_policy_noncoherent;
 
        if (!dev->dqm->set_cache_memory_policy(dev->dqm,
                                &pdd->qpd,
                                default_policy,
                                alternate_policy,
-                               (void __user *)args.alternate_aperture_base,
-                               args.alternate_aperture_size))
+                               (void __user *)args->alternate_aperture_base,
+                               args->alternate_aperture_size))
                err = -EINVAL;
 
 out:
@@ -422,53 +399,44 @@ out:
        return err;
 }
 
-static long kfd_ioctl_get_clock_counters(struct file *filep,
-                               struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+                               struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_get_clock_counters_args args;
+       struct kfd_ioctl_get_clock_counters_args *args = data;
        struct kfd_dev *dev;
        struct timespec time;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
        /* Reading GPU clock counter from KGD */
-       args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+       args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
 
        /* No access to rdtsc. Using raw monotonic time */
        getrawmonotonic(&time);
-       args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+       args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
 
        get_monotonic_boottime(&time);
-       args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+       args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
 
        /* Since the counter is in nano-seconds we use 1GHz frequency */
-       args.system_clock_freq = 1000000000;
-
-       if (copy_to_user(arg, &args, sizeof(args)))
-               return -EFAULT;
+       args->system_clock_freq = 1000000000;
 
        return 0;
 }
 
 
 static int kfd_ioctl_get_process_apertures(struct file *filp,
-                               struct kfd_process *p, void __user *arg)
+                               struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_get_process_apertures_args args;
+       struct kfd_ioctl_get_process_apertures_args *args = data;
        struct kfd_process_device_apertures *pAperture;
        struct kfd_process_device *pdd;
 
        dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       args.num_of_nodes = 0;
+       args->num_of_nodes = 0;
 
        mutex_lock(&p->mutex);
 
@@ -477,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                /* Run over all pdd of the process */
                pdd = kfd_get_first_process_device_data(p);
                do {
-                       pAperture = &args.process_apertures[args.num_of_nodes];
+                       pAperture =
+                               &args->process_apertures[args->num_of_nodes];
                        pAperture->gpu_id = pdd->dev->id;
                        pAperture->lds_base = pdd->lds_base;
                        pAperture->lds_limit = pdd->lds_limit;
@@ -487,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                        pAperture->scratch_limit = pdd->scratch_limit;
 
                        dev_dbg(kfd_device,
-                               "node id %u\n", args.num_of_nodes);
+                               "node id %u\n", args->num_of_nodes);
                        dev_dbg(kfd_device,
                                "gpu id %u\n", pdd->dev->id);
                        dev_dbg(kfd_device,
@@ -503,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                        dev_dbg(kfd_device,
                                "scratch_limit %llX\n", pdd->scratch_limit);
 
-                       args.num_of_nodes++;
+                       args->num_of_nodes++;
                } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
-                               (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+                               (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
        }
 
        mutex_unlock(&p->mutex);
 
-       if (copy_to_user(arg, &args, sizeof(args)))
-               return -EFAULT;
-
        return 0;
 }
 
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+       [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+                       kfd_ioctl_get_version, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+                       kfd_ioctl_create_queue, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+                       kfd_ioctl_destroy_queue, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+                       kfd_ioctl_set_memory_policy, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+                       kfd_ioctl_get_clock_counters, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+                       kfd_ioctl_get_process_apertures, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+                       kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
+
 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 {
        struct kfd_process *process;
-       long err = -EINVAL;
+       amdkfd_ioctl_t *func;
+       const struct amdkfd_ioctl_desc *ioctl = NULL;
+       unsigned int nr = _IOC_NR(cmd);
+       char stack_kdata[128];
+       char *kdata = NULL;
+       unsigned int usize, asize;
+       int retcode = -EINVAL;
 
-       dev_dbg(kfd_device,
-               "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
-               cmd, _IOC_NR(cmd), arg);
+       if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+               goto err_i1;
+
+       if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+               u32 amdkfd_size;
+
+               ioctl = &amdkfd_ioctls[nr];
+
+               amdkfd_size = _IOC_SIZE(ioctl->cmd);
+               usize = asize = _IOC_SIZE(cmd);
+               if (amdkfd_size > asize)
+                       asize = amdkfd_size;
+
+               cmd = ioctl->cmd;
+       } else
+               goto err_i1;
+
+       dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
 
        process = kfd_get_process(current);
-       if (IS_ERR(process))
-               return PTR_ERR(process);
+       if (IS_ERR(process)) {
+               dev_dbg(kfd_device, "no process\n");
+               goto err_i1;
+       }
 
-       switch (cmd) {
-       case KFD_IOC_GET_VERSION:
-               err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
-               break;
-       case KFD_IOC_CREATE_QUEUE:
-               err = kfd_ioctl_create_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_DESTROY_QUEUE:
-               err = kfd_ioctl_destroy_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_SET_MEMORY_POLICY:
-               err = kfd_ioctl_set_memory_policy(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_GET_CLOCK_COUNTERS:
-               err = kfd_ioctl_get_clock_counters(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_GET_PROCESS_APERTURES:
-               err = kfd_ioctl_get_process_apertures(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_UPDATE_QUEUE:
-               err = kfd_ioctl_update_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       default:
-               dev_err(kfd_device,
-                       "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
-                       cmd, arg);
-               err = -EINVAL;
-               break;
+       /* Do not trust userspace, use our own definition */
+       func = ioctl->func;
+
+       if (unlikely(!func)) {
+               dev_dbg(kfd_device, "no function\n");
+               retcode = -EINVAL;
+               goto err_i1;
        }
 
-       if (err < 0)
-               dev_err(kfd_device,
-                       "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
-                       err, cmd, _IOC_NR(cmd));
+       if (cmd & (IOC_IN | IOC_OUT)) {
+               if (asize <= sizeof(stack_kdata)) {
+                       kdata = stack_kdata;
+               } else {
+                       kdata = kmalloc(asize, GFP_KERNEL);
+                       if (!kdata) {
+                               retcode = -ENOMEM;
+                               goto err_i1;
+                       }
+               }
+               if (asize > usize)
+                       memset(kdata + usize, 0, asize - usize);
+       }
 
-       return err;
+       if (cmd & IOC_IN) {
+               if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+                       retcode = -EFAULT;
+                       goto err_i1;
+               }
+       } else if (cmd & IOC_OUT) {
+               memset(kdata, 0, usize);
+       }
+
+       retcode = func(filep, process, kdata);
+
+       if (cmd & IOC_OUT)
+               if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+                       retcode = -EFAULT;
+
+err_i1:
+       if (!ioctl)
+               dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+                         task_pid_nr(current), cmd, nr);
+
+       if (kdata != stack_kdata)
+               kfree(kdata);
+
+       if (retcode)
+               dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+       return retcode;
 }
 
 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
index 924e90c..9c8961d 100644 (file)
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
 {
        int bit = qpd->vmid - KFD_VMID_START_OFFSET;
 
+       /* Release the vmid mapping */
+       set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
        set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
        qpd->vmid = 0;
        q->properties.vmid = 0;
@@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                return retval;
        }
 
+       pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+                       q->pipe,
+                       q->queue);
+
+       retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+                       q->queue, q->properties.write_ptr);
+       if (retval != 0) {
+               deallocate_hqd(dqm, q);
+               mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+               return retval;
+       }
+
        return 0;
 }
 
@@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 {
        int retval;
        struct mqd_manager *mqd;
+       bool prev_active = false;
 
        BUG_ON(!dqm || !q || !q->mqd);
 
@@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                return -ENOMEM;
        }
 
-       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
        if (q->properties.is_active == true)
+               prev_active = true;
+
+       /*
+        *
+        * check active state vs. the previous state
+        * and modify counter accordingly
+        */
+       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+       if ((q->properties.is_active == true) && (prev_active == false))
                dqm->queue_count++;
-       else
+       else if ((q->properties.is_active == false) && (prev_active == true))
                dqm->queue_count--;
 
        if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
index adc3147..4c3828c 100644 (file)
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
                        uint32_t queue_id)
 {
 
-       return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+       return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
                                        pipe_id, queue_id);
 
 }
index 71699ad..4c25ef5 100644 (file)
@@ -32,7 +32,7 @@ int kfd_pasid_init(void)
 {
        pasid_limit = max_num_of_processes;
 
-       pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+       pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
        if (!pasid_bitmap)
                return -ENOMEM;
 
index f9fb81e..a5edb29 100644 (file)
@@ -463,6 +463,24 @@ struct kfd_process {
        bool is_32bit_user_mode;
 };
 
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+                               void *data);
+
+struct amdkfd_ioctl_desc {
+       unsigned int cmd;
+       int flags;
+       amdkfd_ioctl_t *func;
+       unsigned int cmd_drv;
+       const char *name;
+};
+
 void kfd_process_create_wq(void);
 void kfd_process_destroy_wq(void);
 struct kfd_process *kfd_create_process(const struct task_struct *);
index b11792d..cca1708 100644 (file)
@@ -921,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
        uint32_t i = 0;
 
        list_for_each_entry(dev, &topology_device_list, list) {
-               ret = kfd_build_sysfs_node_entry(dev, 0);
+               ret = kfd_build_sysfs_node_entry(dev, i);
                if (ret < 0)
                        return ret;
                i++;
index 47b5519..96a5122 100644 (file)
@@ -183,7 +183,7 @@ struct kfd2kgd_calls {
        int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                        uint32_t queue_id, uint32_t __user *wptr);
 
-       bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+       bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
        int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
index 70d0f0f..e9f891c 100644 (file)
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
         */
        struct workqueue_struct *dp_wq;
 
-       uint32_t bios_vgacntr;
-
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
                int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
index 52adcb6..c11603b 100644 (file)
@@ -1048,6 +1048,7 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_pwrite *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                        return -EFAULT;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               return ret;
+               goto put_rpm;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (&obj->base == NULL) {
@@ -1121,6 +1124,9 @@ out:
        drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
+put_rpm:
+       intel_runtime_pm_put(dev_priv);
+
        return ret;
 }
 
index 996c293..d0d3dfb 100644 (file)
@@ -3725,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3736,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if (I915_READ16(ISR) & flip_pending)
                goto check_page_flip;
 
+       intel_prepare_page_flip(dev, plane);
        intel_finish_page_flip(dev, pipe);
        return true;
 
@@ -3907,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3918,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if (I915_READ(ISR) & flip_pending)
                goto check_page_flip;
 
+       intel_prepare_page_flip(dev, plane);
        intel_finish_page_flip(dev, pipe);
        return true;
 
index fb3e3d4..e2af138 100644 (file)
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
        udelay(300);
 
-       /*
-        * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
-        * from S3 without preserving (some of?) the other bits.
-        */
-       I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+       I915_WRITE(vga_reg, VGA_DISP_DISABLE);
        POSTING_READ(vga_reg);
 }
 
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
 
        intel_shared_dpll_init(dev);
 
-       /* save the BIOS value before clobbering it */
-       dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
        /* Just disable it once at startup */
        i915_disable_vga(dev);
        intel_setup_outputs(dev);
index f5a78d5..ac6da71 100644 (file)
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
                vlv_power_sequencer_reset(dev_priv);
 }
 
-static void check_power_well_state(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
-       if (power_well->always_on || !i915.disable_power_well) {
-               if (!enabled)
-                       goto mismatch;
-
-               return;
-       }
-
-       if (enabled != (power_well->count > 0))
-               goto mismatch;
-
-       return;
-
-mismatch:
-       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
-                 power_well->name, power_well->always_on, enabled,
-                 power_well->count, i915.disable_power_well);
-}
-
 /**
  * intel_display_power_get - grab a power domain reference
  * @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                        power_well->ops->enable(dev_priv, power_well);
                        power_well->hw_enabled = true;
                }
-
-               check_power_well_state(dev_priv, power_well);
        }
 
        power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
                        power_well->hw_enabled = false;
                        power_well->ops->disable(dev_priv, power_well);
                }
-
-               check_power_well_state(dev_priv, power_well);
        }
 
        mutex_unlock(&power_domains->lock);
index ff2b434..760947e 100644 (file)
@@ -26,7 +26,7 @@
 void
 nvkm_event_put(struct nvkm_event *event, u32 types, int index)
 {
-       BUG_ON(!spin_is_locked(&event->refs_lock));
+       assert_spin_locked(&event->refs_lock);
        while (types) {
                int type = __ffs(types); types &= ~(1 << type);
                if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
 void
 nvkm_event_get(struct nvkm_event *event, u32 types, int index)
 {
-       BUG_ON(!spin_is_locked(&event->refs_lock));
+       assert_spin_locked(&event->refs_lock);
        while (types) {
                int type = __ffs(types); types &= ~(1 << type);
                if (++event->refs[index * event->types_nr + type] == 1) {
index d1bcde5..839a325 100644 (file)
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
        struct nvkm_event *event = notify->event;
        unsigned long flags;
 
-       BUG_ON(!spin_is_locked(&event->list_lock));
+       assert_spin_locked(&event->list_lock);
        BUG_ON(size != notify->size);
 
        spin_lock_irqsave(&event->refs_lock, flags);
index 674da1f..7329226 100644 (file)
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
                break;
+       case 0x106:
+               device->cname = "GK208B";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] =  nve0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] =  nve0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_SUBDEV_PWR    ] =  nv108_pwr_oclass;
+               device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nvd0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_FIFO   ] =  nv108_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] =  nv108_graph_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] =  nvf0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+               break;
        case 0x108:
                device->cname = "GK208";
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
index 5e58bba..a7a890f 100644 (file)
@@ -44,8 +44,10 @@ static void
 pramin_fini(void *data)
 {
        struct priv *priv = data;
-       nv_wr32(priv->bios, 0x001700, priv->bar0);
-       kfree(priv);
+       if (priv) {
+               nv_wr32(priv->bios, 0x001700, priv->bar0);
+               kfree(priv);
+       }
 }
 
 static void *
index 00f2ca7..033a8e9 100644 (file)
 
 #include "nv50.h"
 
+struct nvaa_ram_priv {
+       struct nouveau_ram base;
+       u64 poller_base;
+};
+
 static int
 nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 datasize,
              struct nouveau_object **pobject)
 {
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 rsvd_head = ( 256 * 1024); /* vga memory */
+       u32 rsvd_tail = (1024 * 1024); /* vbios etc */
        struct nouveau_fb *pfb = nouveau_fb(parent);
-       struct nouveau_ram *ram;
+       struct nvaa_ram_priv *priv;
        int ret;
 
-       ret = nouveau_ram_create(parent, engine, oclass, &ram);
-       *pobject = nv_object(ram);
+       ret = nouveau_ram_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       ram->size = nv_rd32(pfb, 0x10020c);
-       ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+       priv->base.type   = NV_MEM_TYPE_STOLEN;
+       priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+       priv->base.size   = (u64)nv_rd32(pfb, 0x100e14) << 12;
 
-       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
-                             (rsvd_head + rsvd_tail), 1);
+       rsvd_tail += 0x1000;
+       priv->poller_base = priv->base.size - rsvd_tail;
+
+       ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+                             (priv->base.size  - (rsvd_head + rsvd_tail)) >> 12,
+                             1);
        if (ret)
                return ret;
 
-       ram->type   = NV_MEM_TYPE_STOLEN;
-       ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
-       ram->get = nv50_ram_get;
-       ram->put = nv50_ram_put;
+       priv->base.get = nv50_ram_get;
+       priv->base.put = nv50_ram_put;
+       return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+       struct nouveau_fb *pfb = nouveau_fb(object);
+       struct nvaa_ram_priv *priv = (void *)object;
+       int ret;
+       u64 dniso, hostnb, flush;
+
+       ret = nouveau_ram_init(&priv->base);
+       if (ret)
+               return ret;
+
+       dniso  = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+       hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+       flush  = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+       /* Enable NISO poller for various clients and set their associated
+        * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+        */
+       nv_wr32(pfb, 0x100c18, dniso);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+       nv_wr32(pfb, 0x100c1c, hostnb);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+       nv_wr32(pfb, 0x100c24, flush);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
        return 0;
 }
 
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvaa_ram_ctor,
                .dtor = _nouveau_ram_dtor,
-               .init = _nouveau_ram_init,
+               .init = nvaa_ram_init,
                .fini = _nouveau_ram_fini,
        },
 };
index a75c35c..165401c 100644 (file)
 
 #include "nv04.h"
 
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
-       struct nv04_mc_priv *priv = (void *)pmc;
-       nv_wr08(priv, 0x088050, 0xff);
-}
-
 struct nouveau_oclass *
 nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
        .base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
                .fini = _nouveau_mc_fini,
        },
        .intr = nv04_mc_intr,
-       .msi_rearm = nv4c_mc_msi_rearm,
 }.base;
index 21ec561..bba2960 100644 (file)
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
         * so use the DMA API for them.
         */
        if (!nv_device_is_cpu_coherent(device) &&
-           ttm->caching_state == tt_uncached)
+           ttm->caching_state == tt_uncached) {
                ttm_dma_unpopulate(ttm_dma, dev->dev);
+               return;
+       }
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
index 42c34ba..bf0f9e2 100644 (file)
@@ -36,7 +36,14 @@ void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 {
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
+       struct device *dev = drm->dev->dev;
+       int ret;
+
+       ret = pm_runtime_get_sync(dev);
+       if (WARN_ON(ret < 0 && ret != -EACCES))
+               return;
 
        if (gem->import_attach)
                drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
        /* reset filp so nouveau_bo_del_ttm() can test for it */
        gem->filp = NULL;
        ttm_bo_unref(&bo);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct nouveau_vma *vma;
+       struct device *dev = drm->dev->dev;
        int ret;
 
        if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
                        goto out;
                }
 
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0 && ret != -EACCES)
+                       goto out;
+
                ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
-               if (ret) {
+               if (ret)
                        kfree(vma);
-                       goto out;
-               }
+
+               pm_runtime_mark_last_busy(dev);
+               pm_runtime_put_autosuspend(dev);
        } else {
                vma->refcount++;
        }
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+       struct device *dev = drm->dev->dev;
        struct nouveau_vma *vma;
        int ret;
 
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 
        vma = nouveau_bo_vma_find(nvbo, cli->vm);
        if (vma) {
-               if (--vma->refcount == 0)
-                       nouveau_gem_object_unmap(nvbo, vma);
+               if (--vma->refcount == 0) {
+                       ret = pm_runtime_get_sync(dev);
+                       if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+                               nouveau_gem_object_unmap(nvbo, vma);
+                               pm_runtime_mark_last_busy(dev);
+                               pm_runtime_put_autosuspend(dev);
+                       }
+               }
        }
        ttm_bo_unreserve(&nvbo->bo);
 }
index d59ec49..ed644a4 100644 (file)
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                                return pll;
                }
                /* otherwise, pick one of the plls */
-               if ((rdev->family == CHIP_KAVERI) ||
-                   (rdev->family == CHIP_KABINI) ||
+               if ((rdev->family == CHIP_KABINI) ||
                    (rdev->family == CHIP_MULLINS)) {
-                       /* KB/KV/ML has PPLL1 and PPLL2 */
+                       /* KB/ML has PPLL1 and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                        DRM_ERROR("unable to allocate a PPLL\n");
                        return ATOM_PPLL_INVALID;
                } else {
-                       /* CI has PPLL0, PPLL1, and PPLL2 */
+                       /* CI/KV has PPLL0, PPLL1, and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
        case ATOM_PPLL0:
                /* disable the ppll */
                if ((rdev->family == CHIP_ARUBA) ||
+                   (rdev->family == CHIP_KAVERI) ||
                    (rdev->family == CHIP_BONAIRE) ||
                    (rdev->family == CHIP_HAWAII))
                        atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
index 11ba9d2..db42a67 100644 (file)
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
        struct radeon_connector_atom_dig *dig_connector;
        int dp_clock;
 
+       if ((mode->clock > 340000) &&
+           (!radeon_connector_is_dp12_capable(connector)))
+               return MODE_CLOCK_HIGH;
+
        if (!radeon_connector->con_priv)
                return MODE_CLOCK_HIGH;
        dig_connector = radeon_connector->con_priv;
index ba85986..03003f8 100644 (file)
 #define ATC_VM_APERTURE1_HIGH_ADDR                             0x330Cu
 #define ATC_VM_APERTURE1_LOW_ADDR                              0x3304u
 
+#define IH_VMID_0_LUT                                          0x3D40u
+
 #endif
index 2fe8cfc..bafdf92 100644 (file)
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
                return;
        }
index 9b42001..e3e9c10 100644 (file)
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
        if (radeon_bapm == -1) {
-               /* There are stability issues reported on with
-                * bapm enabled on an asrock system.
-                */
-               if (rdev->pdev->subsystem_vendor == 0x1849)
-                       pi->bapm_enable = false;
-               else
+               /* only enable bapm on KB, ML by default */
+               if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                        pi->bapm_enable = true;
+               else
+                       pi->bapm_enable = false;
        } else if (radeon_bapm == 0) {
                pi->bapm_enable = false;
        } else {
index 242fd8b..8bf87f1 100644 (file)
@@ -72,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                        uint32_t queue_id, uint32_t __user *wptr);
 
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -92,7 +92,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .init_memory = kgd_init_memory,
        .init_pipeline = kgd_init_pipeline,
        .hqd_load = kgd_hqd_load,
-       .hqd_is_occupies = kgd_hqd_is_occupies,
+       .hqd_is_occupied = kgd_hqd_is_occupied,
        .hqd_destroy = kgd_hqd_destroy,
        .get_fw_version = get_fw_version
 };
@@ -101,6 +101,7 @@ static const struct kgd2kfd_calls *kgd2kfd;
 
 bool radeon_kfd_init(void)
 {
+#if defined(CONFIG_HSA_AMD_MODULE)
        bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
                                const struct kgd2kfd_calls**);
 
@@ -117,6 +118,17 @@ bool radeon_kfd_init(void)
        }
 
        return true;
+#elif defined(CONFIG_HSA_AMD)
+       if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+               kgd2kfd = NULL;
+
+               return false;
+       }
+
+       return true;
+#else
+       return false;
+#endif
 }
 
 void radeon_kfd_fini(void)
@@ -378,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
                cpu_relax();
        write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
 
+       /* Mapping vmid to pasid also for IH block */
+       write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+                       pasid_mapping);
+
        return 0;
 }
 
@@ -517,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
        return 0;
 }
 
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id)
 {
        uint32_t act;
@@ -556,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                if (timeout == 0) {
                        pr_err("kfd: cp queue preemption time out (%dms)\n",
                                temp);
+                       release_queue(kgd);
                        return -ETIME;
                }
                msleep(20);
index 535403e..15aee72 100644 (file)
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
        u32 format;
        u32 *buffer;
        const u8 __user *data;
-       int size, dwords, tex_width, blit_width, spitch;
+       unsigned int size, dwords, tex_width, blit_width, spitch;
        u32 height;
        int i;
        u32 texpitch, microtile;
index 230b6f8..dfdc269 100644 (file)
@@ -27,7 +27,8 @@ if HID
 
 config HID_BATTERY_STRENGTH
        bool "Battery level reporting for HID devices"
-       depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+       depends on HID
+       select POWER_SUPPLY
        default n
        ---help---
        This option adds support of reporting battery strength (for HID devices
index c3d0ac1..8b63879 100644 (file)
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
index 7460f34..9243359 100644 (file)
 #define USB_DEVICE_ID_KYE_GPEN_560     0x5003
 #define USB_DEVICE_ID_KYE_EASYPEN_I405X        0x5010
 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X       0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2     0x501a
 #define USB_DEVICE_ID_KYE_EASYPEN_M610X        0x5013
 
 #define USB_VENDOR_ID_LABTEC           0x1020
index e0a0f06..9505605 100644 (file)
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        {}
index b92bf01..158fcf5 100644 (file)
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
                if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
                        rdesc = mousepen_i608x_rdesc_fixed;
                        *rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
        switch (id->product) {
        case USB_DEVICE_ID_KYE_EASYPEN_I405X:
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
        case USB_DEVICE_ID_KYE_EASYPEN_M610X:
                ret = kye_tablet_enable(hdev);
                if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
index c917ab6..5bc6d80 100644 (file)
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
 
        switch (data[0]) {
        case REPORT_ID_DJ_SHORT:
+               if (size != DJREPORT_SHORT_LENGTH) {
+                       dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+                       return false;
+               }
                return logi_dj_dj_event(hdev, report, data, size);
        case REPORT_ID_HIDPP_SHORT:
-               /* intentional fallthrough */
+               if (size != HIDPP_REPORT_SHORT_LENGTH) {
+                       dev_err(&hdev->dev,
+                               "Short HID++ report of bad size (%d)", size);
+                       return false;
+               }
+               return logi_dj_hidpp_event(hdev, report, data, size);
        case REPORT_ID_HIDPP_LONG:
+               if (size != HIDPP_REPORT_LONG_LENGTH) {
+                       dev_err(&hdev->dev,
+                               "Long HID++ report of bad size (%d)", size);
+                       return false;
+               }
                return logi_dj_hidpp_event(hdev, report, data, size);
        }
 
index 2f420c0..a93cefe 100644 (file)
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
                (report->rap.sub_id == 0x41);
 }
 
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+       int new_length;
+       char *new_name;
+
+       if (name_length > PREFIX_LENGTH &&
+           strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+               /* The prefix has is already in the name */
+               return;
+
+       new_length = PREFIX_LENGTH + name_length;
+       new_name = kzalloc(new_length, GFP_KERNEL);
+       if (!new_name)
+               return;
+
+       snprintf(new_name, new_length, "Logitech %s", *name);
+
+       kfree(*name);
+
+       *name = new_name;
+}
+
 /* -------------------------------------------------------------------------- */
 /* HIDP++ 1.0 commands                                                        */
 /* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
                return NULL;
 
        memcpy(name, &response.rap.params[2], len);
+
+       /* include the terminating '\0' */
+       hidpp_prefix_name(&name, len + 1);
+
        return name;
 }
 
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
                index += ret;
        }
 
+       /* include the terminating '\0' */
+       hidpp_prefix_name(&name, __name_length + 1);
+
        return name;
 }
 
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
 
        switch (data[0]) {
        case 0x02:
+               if (size < 2) {
+                       hid_err(hdev, "Received HID report of bad size (%d)",
+                               size);
+                       return 1;
+               }
                if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
                        input_event(wd->input, EV_KEY, BTN_LEFT,
                                        !!(data[1] & 0x01));
                        input_event(wd->input, EV_KEY, BTN_RIGHT,
                                        !!(data[1] & 0x02));
                        input_sync(wd->input);
+                       return 0;
                } else {
                        if (size < 21)
                                return 1;
                        return wtp_mouse_raw_xy_event(hidpp, &data[7]);
                }
        case REPORT_ID_HIDPP_LONG:
+               /* size is already checked in hidpp_raw_event. */
                if ((report->fap.feature_index != wd->mt_feature_index) ||
                    (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
                        return 1;
index 1a07e07..47d7e74 100644 (file)
@@ -35,6 +35,8 @@ static struct class *pyra_class;
 static void profile_activated(struct pyra_device *pyra,
                unsigned int new_profile)
 {
+       if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return;
        pyra->actual_profile = new_profile;
        pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
 }
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
        if (off != 0 || count != PYRA_SIZE_SETTINGS)
                return -EINVAL;
 
-       mutex_lock(&pyra->pyra_lock);
-
        settings = (struct pyra_settings const *)buf;
+       if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return -EINVAL;
+
+       mutex_lock(&pyra->pyra_lock);
 
        retval = pyra_set_settings(usb_dev, settings);
        if (retval) {
index d32037c..d43e967 100644 (file)
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
 
 static void i2c_hid_stop(struct hid_device *hid)
 {
-       struct i2c_client *client = hid->driver_data;
-       struct i2c_hid *ihid = i2c_get_clientdata(client);
-
        hid->claimed = 0;
-
-       i2c_hid_free_buffers(ihid);
 }
 
 static int i2c_hid_open(struct hid_device *hid)
index dc89be9..b27b3d3 100644 (file)
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
index 1232336..40dfbc0 100644 (file)
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
        if (action != BUS_NOTIFY_REMOVED_DEVICE)
                return 0;
 
-       /*
-        * If the device is still attached to a device driver we can't
-        * tear down the domain yet as DMA mappings may still be in use.
-        * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
-        */
-       if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
-               return 0;
-
        domain = find_domain(dev);
        if (!domain)
                return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                                domain_remove_one_dev_info(old_domain, dev);
                        else
                                domain_remove_dev_info(old_domain);
+
+                       if (!domain_type_is_vm_or_si(old_domain) &&
+                            list_empty(&old_domain->devices))
+                               domain_exit(old_domain);
                }
        }
 
index 68dfb0f..7486931 100644 (file)
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
 
 static u64 ipmmu_page_prot(unsigned int prot, u64 type)
 {
-       u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+       u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
                   | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
                   | ARM_VMSA_PTE_NS | type;
 
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
        if (prot & IOMMU_CACHE)
                pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
 
-       if (prot & IOMMU_EXEC)
-               pgprot &= ~ARM_VMSA_PTE_XN;
+       if (prot & IOMMU_NOEXEC)
+               pgprot |= ARM_VMSA_PTE_XN;
        else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
                /* If no access create a faulting entry to avoid TLB fills. */
                pgprot &= ~ARM_VMSA_PTE_PAGE;
index b2023af..6a8b1ec 100644 (file)
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
        .remove = rk_iommu_remove,
        .driver = {
                   .name = "rk_iommu",
-                  .owner = THIS_MODULE,
                   .of_match_table = of_match_ptr(rk_iommu_dt_ids),
        },
 };
index a82e542..0b38060 100644 (file)
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
        byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
        byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
        byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
-       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
        byte force_mt_info = false;
        byte dir;
        dword d;
index 26515c2..25e4197 100644 (file)
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
        led_dat->sata = 0;
        led_dat->cdev.brightness = LED_OFF;
        led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
-       /*
-        * If available, expose the SATA activity blink capability through
-        * a "sata" sysfs attribute.
-        */
-       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
-               led_dat->cdev.groups = netxbig_led_groups;
        led_dat->mode_addr = template->mode_addr;
        led_dat->mode_val = template->mode_val;
        led_dat->bright_addr = template->bright_addr;
        led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
        led_dat->timer = pdata->timer;
        led_dat->num_timer = pdata->num_timer;
+       /*
+        * If available, expose the SATA activity blink capability through
+        * a "sata" sysfs attribute.
+        */
+       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+               led_dat->cdev.groups = netxbig_led_groups;
 
        return led_classdev_register(&pdev->dev, &led_dat->cdev);
 }
index e3e56d3..970314e 100644 (file)
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
        { "INT33BB"  , "3" , &sdhci_acpi_slot_int_sd },
        { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "INT344D"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "PNP0D40"  },
        { },
 };
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
        { "INT33BB"  },
        { "INT33C6"  },
        { "INT3436"  },
+       { "INT344D"  },
        { "PNP0D40"  },
        { },
 };
index 0342775..4f38554 100644 (file)
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
                .subdevice      = PCI_ANY_ID,
                .driver_data    = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
        },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index d57c3d1..1ec684d 100644 (file)
@@ -21,6 +21,9 @@
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC0  0x08e5
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1  0x08e6
 #define PCI_DEVICE_ID_INTEL_QRK_SD     0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC   0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO   0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD     0x9d2d
 
 /*
  * PCI registers
index 4523887..ca3424e 100644 (file)
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (IS_ERR(host))
                return PTR_ERR(host);
 
-       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
-               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
-               if (ret < 0)
-                       goto err_mbus_win;
-       }
-
-
        pltfm_host = sdhci_priv(host);
        pltfm_host->priv = pxa;
 
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (!IS_ERR(pxa->clk_core))
                clk_prepare_enable(pxa->clk_core);
 
+       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+               if (ret < 0)
+                       goto err_mbus_win;
+       }
+
        /* enable 1/8V DDR capable */
        host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
@@ -396,11 +395,11 @@ err_add_host:
        pm_runtime_disable(&pdev->dev);
 err_of_parse:
 err_cd_req:
+err_mbus_win:
        clk_disable_unprepare(pxa->clk_io);
        if (!IS_ERR(pxa->clk_core))
                clk_disable_unprepare(pxa->clk_core);
 err_clk_get:
-err_mbus_win:
        sdhci_pltfm_free(pdev);
        return ret;
 }
index cbb245b..1453cd1 100644 (file)
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
 
                del_timer_sync(&host->tuning_timer);
                host->flags &= ~SDHCI_NEEDS_RETUNING;
-               host->mmc->max_blk_count =
-                       (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
        }
        sdhci_enable_card_detection(host);
 }
@@ -1353,6 +1351,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_runtime_pm_get(host);
 
+       present = mmc_gpio_get_cd(host->mmc);
+
        spin_lock_irqsave(&host->lock, flags);
 
        WARN_ON(host->mrq != NULL);
@@ -1381,7 +1381,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
         *     zero: cd-gpio is used, and card is removed
         *     one: cd-gpio is used, and card is present
         */
-       present = mmc_gpio_get_cd(host->mmc);
        if (present < 0) {
                /* If polling, assume that the card is always present. */
                if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1879,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
        return !(present_state & SDHCI_DATA_LVL_MASK);
 }
 
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&host->lock, flags);
+       host->flags |= SDHCI_HS400_TUNING;
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return 0;
+}
+
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1898,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        int tuning_loop_counter = MAX_TUNING_LOOP;
        int err = 0;
        unsigned long flags;
+       unsigned int tuning_count = 0;
+       bool hs400_tuning;
 
        sdhci_runtime_pm_get(host);
        spin_lock_irqsave(&host->lock, flags);
 
+       hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+       host->flags &= ~SDHCI_HS400_TUNING;
+
+       if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+               tuning_count = host->tuning_count;
+
        /*
         * The Host Controller needs tuning only in case of SDR104 mode
         * and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1918,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * tuning function has to be executed.
         */
        switch (host->timing) {
+       /* HS400 tuning is done in HS200 mode */
        case MMC_TIMING_MMC_HS400:
+               err = -EINVAL;
+               goto out_unlock;
+
        case MMC_TIMING_MMC_HS200:
+               /*
+                * Periodic re-tuning for HS400 is not expected to be needed, so
+                * disable it here.
+                */
+               if (hs400_tuning)
+                       tuning_count = 0;
+               break;
+
        case MMC_TIMING_UHS_SDR104:
                break;
 
@@ -1911,9 +1942,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
                /* FALLTHROUGH */
 
        default:
-               spin_unlock_irqrestore(&host->lock, flags);
-               sdhci_runtime_pm_put(host);
-               return 0;
+               goto out_unlock;
        }
 
        if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2066,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        }
 
 out:
-       /*
-        * If this is the very first time we are here, we start the retuning
-        * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
-        * flag won't be set, we check this condition before actually starting
-        * the timer.
-        */
-       if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
-           (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+       host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+       if (tuning_count) {
                host->flags |= SDHCI_USING_RETUNING_TIMER;
-               mod_timer(&host->tuning_timer, jiffies +
-                       host->tuning_count * HZ);
-               /* Tuning mode 1 limits the maximum data length to 4MB */
-               mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
-       } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
-               host->flags &= ~SDHCI_NEEDS_RETUNING;
-               /* Reload the new initial value for timer */
-               mod_timer(&host->tuning_timer, jiffies +
-                         host->tuning_count * HZ);
+               mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
        }
 
        /*
@@ -2070,6 +2086,7 @@ out:
 
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
        spin_unlock_irqrestore(&host->lock, flags);
        sdhci_runtime_pm_put(host);
 
@@ -2110,15 +2127,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
+       int present;
 
        /* First check if client has provided their own card event */
        if (host->ops->card_event)
                host->ops->card_event(host);
 
+       present = sdhci_do_get_cd(host);
+
        spin_lock_irqsave(&host->lock, flags);
 
        /* Check host->mrq first in case we are runtime suspended */
-       if (host->mrq && !sdhci_do_get_cd(host)) {
+       if (host->mrq && !present) {
                pr_err("%s: Card removed during transfer!\n",
                        mmc_hostname(host->mmc));
                pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2162,7 @@ static const struct mmc_host_ops sdhci_ops = {
        .hw_reset       = sdhci_hw_reset,
        .enable_sdio_irq = sdhci_enable_sdio_irq,
        .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
+       .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
        .execute_tuning                 = sdhci_execute_tuning,
        .card_event                     = sdhci_card_event,
        .card_busy      = sdhci_card_busy,
@@ -3260,8 +3281,9 @@ int sdhci_add_host(struct sdhci_host *host)
                mmc->max_segs = SDHCI_MAX_SEGS;
 
        /*
-        * Maximum number of sectors in one transfer. Limited by DMA boundary
-        * size (512KiB).
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
         */
        mmc->max_req_size = 524288;
 
index 1fcd556..f3470d9 100644 (file)
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
        }
 
        db->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(db->clk))
+       if (IS_ERR(db->clk)) {
+               ret = PTR_ERR(db->clk);
                goto out;
+       }
 
        clk_prepare_enable(db->clk);
 
index 3498760..760c72c 100644 (file)
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
 init_error:
        free_skbufs(dev);
 alloc_skbuf_error:
-       if (priv->phydev) {
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
 phy_error:
        return ret;
 }
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
        int ret;
        unsigned long int flags;
 
-       /* Stop and disconnect the PHY */
-       if (priv->phydev) {
+       /* Stop the PHY */
+       if (priv->phydev)
                phy_stop(priv->phydev);
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
 
        netif_stop_queue(dev);
        napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
 static int altera_tse_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct altera_tse_private *priv = netdev_priv(ndev);
+
+       if (priv->phydev)
+               phy_disconnect(priv->phydev);
 
        platform_set_drvdata(pdev, NULL);
        altera_tse_mdio_destroy(ndev);
index e398eda..c8af3ce 100644 (file)
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
        schedule_work(&alx->reset_wk);
 }
 
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
        struct alx_rrd *rrd;
        struct alx_buffer *rxb;
        struct sk_buff *skb;
        u16 length, rfd_cleaned = 0;
+       int work = 0;
 
-       while (budget > 0) {
+       while (work < budget) {
                rrd = &rxq->rrd[rxq->rrd_read_idx];
                if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
                        break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
                                  RRD_NOR) != 1) {
                        alx_schedule_reset(alx);
-                       return 0;
+                       return work;
                }
 
                rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                }
 
                napi_gro_receive(&alx->napi, skb);
-               budget--;
+               work++;
 
 next_pkt:
                if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
        if (rfd_cleaned)
                alx_refill_rx_ring(alx, GFP_ATOMIC);
 
-       return budget > 0;
+       return work;
 }
 
 static int alx_poll(struct napi_struct *napi, int budget)
 {
        struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
        struct alx_hw *hw = &alx->hw;
-       bool complete = true;
        unsigned long flags;
+       bool tx_complete;
+       int work;
 
-       complete = alx_clean_tx_irq(alx) &&
-                  alx_clean_rx_irq(alx, budget);
+       tx_complete = alx_clean_tx_irq(alx);
+       work = alx_clean_rx_irq(alx, budget);
 
-       if (!complete)
-               return 1;
+       if (!tx_complete || work == budget)
+               return budget;
 
        napi_complete(&alx->napi);
 
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
 
        alx_post_write(hw);
 
-       return 0;
+       return work;
 }
 
 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
index 553dcd8..96bf01b 100644 (file)
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
 }
 
 static void tg3_irq_quiesce(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        int i;
 
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
        tp->irq_sync = 1;
        smp_mb();
 
+       spin_unlock_bh(&tp->lock);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
+
+       spin_lock_bh(&tp->lock);
 }
 
 /* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
 
 /* tp->lock is held. */
 static int tg3_chip_reset(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        u32 val;
        void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        }
        smp_mb();
 
+       tg3_full_unlock(tp);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
 
+       tg3_full_lock(tp, 0);
+
        if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
                tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
 {
        struct tg3 *tp = (struct tg3 *) __opaque;
 
-       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
-               goto restart_timer;
-
        spin_lock(&tp->lock);
 
+       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+               spin_unlock(&tp->lock);
+               goto restart_timer;
+       }
+
        if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
            tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
        struct tg3 *tp = container_of(work, struct tg3, reset_task);
        int err;
 
+       rtnl_lock();
        tg3_full_lock(tp, 0);
 
        if (!netif_running(tp->dev)) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
+               rtnl_unlock();
                return;
        }
 
@@ -11138,6 +11154,7 @@ out:
                tg3_phy_start(tp);
 
        tg3_flag_clear(tp, RESET_TASK_PENDING);
+       rtnl_unlock();
 }
 
 static int tg3_request_irq(struct tg3 *tp, int irq_num)
index 55eb7f2..7ef55f5 100644 (file)
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
                res = PTR_ERR(lp->pclk);
                goto err_free_dev;
        }
-       clk_enable(lp->pclk);
+       clk_prepare_enable(lp->pclk);
 
        lp->hclk = ERR_PTR(-ENOENT);
        lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
 err_out_unregister_netdev:
        unregister_netdev(dev);
 err_disable_clock:
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
 err_free_dev:
        free_netdev(dev);
        return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
        kfree(lp->mii_bus->irq);
        mdiobus_free(lp->mii_bus);
        unregister_netdev(dev);
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
        free_netdev(dev);
 
        return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
                netif_stop_queue(net_dev);
                netif_device_detach(net_dev);
 
-               clk_disable(lp->pclk);
+               clk_disable_unprepare(lp->pclk);
        }
        return 0;
 }
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
        struct macb *lp = netdev_priv(net_dev);
 
        if (netif_running(net_dev)) {
-               clk_enable(lp->pclk);
+               clk_prepare_enable(lp->pclk);
 
                netif_device_attach(net_dev);
                netif_start_queue(net_dev);
index 2215d43..a936ee8 100644 (file)
@@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
         */
        n10g = 0;
        for_each_port(adapter, pidx)
-               n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+               n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
 
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
index 21dc9a2..60426cf 100644 (file)
@@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
                return v;
 
        v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+       pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+                       FW_PORT_CMD_MDIOADDR_G(v) : -1;
        pi->port_type = FW_PORT_CMD_PTYPE_G(v);
        pi->mod_type = FW_PORT_MOD_TYPE_NA;
 
index 705f334..b29e027 100644 (file)
@@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
                if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
                        netdev_err(netdev, "Unable to alloc receive buffers\n");
                        err = -ENOMEM;
-                       goto err_out_notify_unset;
+                       goto err_out_free_rq;
                }
        }
 
@@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
 
        return 0;
 
-err_out_notify_unset:
+err_out_free_rq:
+       for (i = 0; i < enic->rq_count; i++)
+               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        enic_dev_notify_unset(enic);
 err_out_free_intr:
        enic_free_intr(enic);
index a379c3e..13d00a3 100644 (file)
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                 * break out of while loop if there are no more
                 * packets waiting
                 */
-               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
-                       napi_complete(napi);
-                       int_enable = dnet_readl(bp, INTR_ENB);
-                       int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
-                       dnet_writel(bp, int_enable, INTR_ENB);
-                       return 0;
-               }
+               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+                       break;
 
                cmd_word = dnet_readl(bp, RX_LEN_FIFO);
                pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                               "size %u.\n", dev->name, pkt_len);
        }
 
-       budget -= npackets;
-
        if (npackets < budget) {
                /* We processed all packets available.  Tell NAPI it can
-                * stop polling then re-enable rx interrupts */
+                * stop polling then re-enable rx interrupts.
+                */
                napi_complete(napi);
                int_enable = dnet_readl(bp, INTR_ENB);
                int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
                dnet_writel(bp, int_enable, INTR_ENB);
-               return 0;
        }
 
-       /* There are still packets waiting */
-       return 1;
+       return npackets;
 }
 
 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
index 469691a..4013292 100644 (file)
@@ -424,6 +424,8 @@ struct bufdesc_ex {
  * (40ns * 6).
  */
 #define FEC_QUIRK_BUG_CAPTURE          (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
 
 struct fec_enet_priv_tx_q {
        int index;
index 5ebdf8d..bba8777 100644 (file)
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx28-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+                               FEC_QUIRK_SINGLE_MDIO,
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        int err = -ENXIO, i;
 
        /*
-        * The dual fec interfaces are not equivalent with enet-mac.
+        * The i.MX28 dual fec interfaces are not equal.
         * Here are the differences:
         *
         *  - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+       if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
                if (mii_cnt && fec0_mii_bus) {
                        fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        mii_cnt++;
 
        /* save fec0 mii_bus */
-       if (fep->quirks & FEC_QUIRK_ENET_MAC)
+       if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
                fec0_mii_bus = fep->mii_bus;
 
        return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
                pdev->id_entry = of_id->data;
        fep->quirks = pdev->id_entry->driver_data;
 
+       fep->netdev = ndev;
        fep->num_rx_queues = num_rx_qs;
        fep->num_tx_queues = num_tx_qs;
 
index 5b8300a..4d61ef5 100644 (file)
@@ -281,6 +281,17 @@ config I40E_DCB
 
          If unsure, say N.
 
+config I40E_FCOE
+       bool "Fibre Channel over Ethernet (FCoE)"
+       default n
+       depends on I40E && DCB && FCOE
+       ---help---
+         Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+         in the driver. This will create new netdev for exclusive FCoE
+         use with XL710 FCoE offloads enabled.
+
+         If unsure, say N.
+
 config I40EVF
        tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
        depends on PCI_MSI
index 781065e..e9c3a87 100644 (file)
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
                mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
        } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
           (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
-               !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+               (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
                /* enable/disable MDI/MDI-X auto-switching. */
                mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
                                nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
index 4b94ddb..c405819 100644 (file)
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
index 433a558..cb0de45 100644 (file)
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                if (desc_n >= ring->count || desc_n < 0) {
                        dev_info(&pf->pdev->dev,
                                 "descriptor %d not found\n", desc_n);
-                       return;
+                       goto out;
                }
                if (!is_rx_ring) {
                        txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        } else {
                dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
        }
+
+out:
        kfree(ring);
 }
 
index 045b5c4..ad802dd 100644 (file)
@@ -78,7 +78,7 @@ do {                                                            \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
 #define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
 #endif /* _I40E_OSDEP_H_ */
index 04b4414..cecb340 100644 (file)
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
        return le32_to_cpu(*(volatile __le32 *)head);
 }
 
+#define WB_STRIDE 0x3
+
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
  * @tx_ring:  tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
+       /* check to see if there are any non-cache aligned descriptors
+        * waiting to be written back, and kick the hardware to force
+        * them to be written back in case of napi polling
+        */
+       if (budget &&
+           !((i & WB_STRIDE) == WB_STRIDE) &&
+           !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+           (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+               tx_ring->arm_wb = true;
+       else
+               tx_ring->arm_wb = false;
+
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
                dev_info(tx_ring->dev,
-                        "tx hang detected on queue %d, resetting adapter\n",
+                        "tx hang detected on queue %d, reset requested\n",
                         tx_ring->queue_index);
 
-               tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+               /* do not fire the reset immediately, wait for the stack to
+                * decide we are truly stuck, also prevents every queue from
+                * simultaneously requesting a reset
+                */
 
-               /* the adapter is about to reset, no point in enabling stuff */
-               return true;
+               /* the adapter is about to reset, no point in enabling polling */
+               budget = 1;
        }
 
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                }
        }
 
-       return budget > 0;
+       return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector  on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+       u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+                 /* allow 00 to be written to the index */;
+
+       wr32(&vsi->back->hw,
+            I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+            val);
 }
 
 /**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                          skb->protocol == htons(ETH_P_8021AD))
                                          ? VLAN_HLEN : 0;
 
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(
+                                       iph->saddr, iph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, rx_udp_csum);
 
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
+
+               } /* else its GRE and so no outer UDP header */
        }
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        struct i40e_vsi *vsi = q_vector->vsi;
        struct i40e_ring *ring;
        bool clean_complete = true;
+       bool arm_wb = false;
        int budget_per_ring;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        /* Since the actual Tx work is minimal, we can give the Tx a larger
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
-       i40e_for_each_ring(ring, q_vector->tx)
+       i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+               arm_wb |= ring->arm_wb;
+       }
 
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
        /* If work not completed, return budget and polling will return */
-       if (!clean_complete)
+       if (!clean_complete) {
+               if (arm_wb)
+                       i40e_force_wb(vsi, q_vector);
                return budget;
+       }
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
        } else {
                network_hdr_len = skb_network_header_len(skb);
                this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Place RS bit on last descriptor of any packet that spans across the
         * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
         */
-#define WB_STRIDE 0x3
        if (((i & WB_STRIDE) != WB_STRIDE) &&
            (first <= &tx_ring->tx_bi[i]) &&
            (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
index e60d3ac..18b0023 100644 (file)
@@ -241,6 +241,7 @@ struct i40e_ring {
        unsigned long last_rx_timestamp;
 
        bool ring_active;               /* is ring online or not */
+       bool arm_wb;            /* do something to arm write back */
 
        /* stats structs */
        struct i40e_queue_stats stats;
index 051ea94..0f69ef8 100644 (file)
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
        u32 swmask = mask;
        u32 fwmask = mask << 16;
        s32 ret_val = 0;
-       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+       s32 i = 0, timeout = 200;
 
        while (i < timeout) {
                if (igb_get_hw_semaphore(hw)) {
index 943cbd4..03e9eb0 100644 (file)
@@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                err = mlx4_dev_cap(dev, &dev_cap);
                if (err) {
                        mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
-                       goto err_stop_fw;
+                       return err;
                }
 
                choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                                             &init_hca);
                if ((long long) icm_size < 0) {
                        err = icm_size;
-                       goto err_stop_fw;
+                       return err;
                }
 
                dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
                if (err)
-                       goto err_stop_fw;
+                       return err;
 
                err = mlx4_INIT_HCA(dev, &init_hca);
                if (err) {
@@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        err = mlx4_query_func(dev, &dev_cap);
                        if (err < 0) {
                                mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
-                               goto err_stop_fw;
+                               goto err_close;
                        } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
                                dev->caps.num_eqs = dev_cap.max_eqs;
                                dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2006,6 @@ err_free_icm:
        if (!mlx4_is_slave(dev))
                mlx4_free_icms(dev);
 
-err_stop_fw:
-       if (!mlx4_is_slave(dev)) {
-               mlx4_UNMAP_FA(dev);
-               mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-       }
        return err;
 }
 
index d6f5496..7094a9c 100644 (file)
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
        mlx4_mtt_cleanup(dev, &mr->mtt);
+       mr->mtt.order = -1;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
 
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
 {
        int err;
 
-       mpt_entry->start       = cpu_to_be64(iova);
-       mpt_entry->length      = cpu_to_be64(size);
-       mpt_entry->entity_size = cpu_to_be32(page_shift);
-
        err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
        if (err)
                return err;
 
+       mpt_entry->start       = cpu_to_be64(mr->iova);
+       mpt_entry->length      = cpu_to_be64(mr->size);
+       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
        mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
                                           MLX4_MPT_PD_FLAG_EN_INV);
        mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
index af09905..71af98b 100644 (file)
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
                                      &mgp->cmd_bus, GFP_KERNEL);
-       if (mgp->cmd == NULL)
+       if (!mgp->cmd) {
+               status = -ENOMEM;
                goto abort_with_enabled;
+       }
 
        mgp->board_span = pci_resource_len(pdev, 0);
        mgp->iomem_base = pci_resource_start(pdev, 0);
index c2f09af..4847713 100644 (file)
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 {
        int i = 0;
 
-       while (i < 10) {
-               if (i)
-                       ssleep(1);
-
+       do {
                if (ql_sem_lock(qdev,
                                QL_DRVR_SEM_MASK,
                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
                                      "driver lock acquired\n");
                        return 1;
                }
-       }
+               ssleep(1);
+       } while (++i < 10);
 
        netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
        return 0;
index 9929b97..2528c3f 100644 (file)
@@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else {
                dev_err(&pdev->dev,
                        "%s: failed. Please Reboot\n", __func__);
+               err = -ENODEV;
                goto err_out_free_hw;
        }
 
index c29ba80..37583a9 100644 (file)
@@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = {
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
 
+       .trscer_err_mask = DESC_I_RINT8,
+
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
@@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 
        if (!cd->eesr_err_check)
                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+       if (!cd->trscer_err_mask)
+               cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
 }
 
 static int sh_eth_check_reset(struct net_device *ndev)
@@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        /* Frame recv control (enable multiple-packets per rx irq) */
        sh_eth_write(ndev, RMCR_RNC, RMCR);
 
-       sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+       sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
 
        if (mdp->cd->bculr)
                sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
index 22301bf..71f5de1 100644 (file)
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
        DESC_I_RINT1 = 0x0001,
 };
 
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
 /* RPADIR */
 enum RPADIR_BIT {
        RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
        unsigned long tx_check;
        unsigned long eesr_err_check;
 
+       /* Error mask */
+       unsigned long trscer_err_mask;
+
        /* hardware features */
        unsigned long irq_flags; /* IRQ configuration flags */
        unsigned no_psr:1;      /* EtherC DO NOT have PSR */
index c560f9a..64d1cef 100644 (file)
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 
                        /* Clear all mcast from ALE */
                        cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
-                                                priv->host_port);
+                                                priv->host_port, -1);
 
                        /* Flood All Unicast Packets to Host port */
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
+       int vid;
+
+       if (priv->data.dual_emac)
+               vid = priv->slaves[priv->emac_port].port_vlan;
+       else
+               vid = priv->data.default_vlan;
 
        if (ndev->flags & IFF_PROMISC) {
                /* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
        cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
 
        /* Clear all mcast from ALE */
-       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+                                vid);
 
        if (!netdev_mc_empty(ndev)) {
                struct netdev_hw_addr *ha;
@@ -757,6 +764,14 @@ requeue:
 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
+       int value = irq - priv->irqs_table[0];
+
+       /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
+        * is to make sure we will always write the correct value to the EOI
+        * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
+        * for TX Interrupt and 3 for MISC Interrupt.
+        */
+       cpdma_ctlr_eoi(priv->dma, value);
 
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
        int                     num_tx, num_rx;
 
        num_tx = cpdma_chan_process(priv->txch, 128);
-       if (num_tx)
-               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
        num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
 
                napi_complete(napi);
                cpsw_intr_enable(priv);
-               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
                prim_cpsw = cpsw_get_slave_priv(priv, 0);
                if (prim_cpsw->irq_enabled == false) {
                        prim_cpsw->irq_enabled = true;
@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
        napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
        prim_cpsw = cpsw_get_slave_priv(priv, 0);
        if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        cpdma_chan_start(priv->txch);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
 }
 
 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
        cpsw_interrupt(ndev->irq, priv);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
 }
 #endif
 
index 097ebe7..5246b3a 100644 (file)
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 }
 
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
                if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
                        continue;
 
+               /* if vid passed is -1 then remove all multicast entry from
+                * the table irrespective of vlan id, if a valid vlan id is
+                * passed then remove only multicast added to that vlan id.
+                * if vlan id doesn't match then move on to next entry.
+                */
+               if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+                       continue;
+
                if (cpsw_ale_get_mcast(ale_entry)) {
                        u8 addr[6];
 
index c0d4127..af1e7ec 100644 (file)
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
 
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
                       int flags, u16 vid);
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
index 9c2d91e..dbcbf0c 100644 (file)
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
        lp->regs = of_iomap(op->dev.of_node, 0);
        if (!lp->regs) {
                dev_err(&op->dev, "could not map temac regs.\n");
+               rc = -ENOMEM;
                goto nodev;
        }
 
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
        np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
+               rc = -ENODEV;
                goto err_iounmap;
        }
 
index c18a0c6..a6d2860 100644 (file)
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
        lp->regs = of_iomap(op->dev.of_node, 0);
        if (!lp->regs) {
                dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+               ret = -ENOMEM;
                goto nodev;
        }
        /* Setup checksum offload, but default to off if not specified */
@@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
        np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
+               ret = -ENODEV;
                goto err_iounmap;
        }
        lp->dma_regs = of_iomap(np, 0);
index 2485879..9d4ce38 100644 (file)
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(dev, "no IRQ found\n");
+               rc = -ENXIO;
                goto error;
        }
 
index 93e2242..f7ff493 100644 (file)
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
 static void team_notify_peers_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, notify_peers.dw.work);
 
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
                schedule_delayed_work(&team->notify_peers.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+       if (val)
                schedule_delayed_work(&team->notify_peers.dw,
                                      msecs_to_jiffies(team->notify_peers.interval));
 }
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
 static void team_mcast_rejoin_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, mcast_rejoin.dw.work);
 
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+       if (val)
                schedule_delayed_work(&team->mcast_rejoin.dw,
                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 }
index dcb6d33..1e9cdca 100644 (file)
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
         awd.done = 0;
 
         urb->context = &awd;
-        status = usb_submit_urb(urb, GFP_NOIO);
+        status = usb_submit_urb(urb, GFP_ATOMIC);
         if (status) {
                 // something went wrong
                 usb_free_urb(urb);
index b8a82b8..602dc66 100644 (file)
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
 /* default ethernet address used by the modem */
 static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
 
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
 /* Make up an ethernet header if the packet doesn't have one.
  *
  * A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
        }
 
-       /* Never use the same address on both ends of the link, even
-        * if the buggy firmware told us to.
+       /* Never use the same address on both ends of the link, even if the
+        * buggy firmware told us to. Or, if device is assigned the well-known
+        * buggy firmware MAC address, replace it with a random address,
         */
-       if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+       if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+           ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
                eth_hw_addr_random(dev->net);
 
        /* make MAC addr easily distinguishable from an IP header */
index 2d1c77e..57ec23e 100644 (file)
@@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
        netif_wake_queue(netdev);
 }
 
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+                      netdev_features_t features)
+{
+       u32 mss = skb_shinfo(skb)->gso_size;
+       int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+       int offset = skb_transport_offset(skb);
+
+       if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+               features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+       else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+               features &= ~NETIF_F_GSO_MASK;
+
+       return features;
+}
+
 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
                                      struct net_device *netdev)
 {
@@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_set_mac_address    = rtl8152_set_mac_address,
        .ndo_change_mtu         = rtl8152_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_features_check     = rtl8152_features_check,
 };
 
 static void r8152b_get_version(struct r8152 *tp)
index e5be2d2..a5f9198 100644 (file)
@@ -69,8 +69,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  10
-#define IWL3160_UCODE_API_MAX  10
+#define IWL7260_UCODE_API_MAX  12
+#define IWL3160_UCODE_API_MAX  12
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   10
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
 #define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_7000         0
 
index bf0a95c..3668fc5 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  10
+#define IWL8000_UCODE_API_MAX  12
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   10
index f2a047f..1bbe4fc 100644 (file)
@@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ *     regardless of the band or the number of the probes. FW will calculate
+ *     the actual dwell time.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
@@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_LMAC_SCAN             = BIT(6),
        IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
+       IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
 };
 
 /**
index 1f2acf4..201846d 100644 (file)
@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
  * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
  * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
  *     and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
  */
 enum iwl_mvm_lmac_scan_flags {
        IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL         = BIT(0),
@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
        IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS   = BIT(4),
        IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED       = BIT(5),
        IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED     = BIT(6),
+       IWL_MVM_LMAC_SCAN_FLAG_MATCH            = BIT(9),
 };
 
 enum iwl_scan_priority {
index e5294d0..ec9a8e7 100644 (file)
@@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
  * already included in the probe template, so we need to set only
  * req->n_ssids - 1 bits in addition to the first bit.
  */
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band, int n_ssids)
 {
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+               return 10;
        if (band == IEEE80211_BAND_2GHZ)
                return 20  + 3 * (n_ssids + 1);
        return 10  + 2 * (n_ssids + 1);
 }
 
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+                                    enum ieee80211_band band)
 {
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+                       return 110;
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
 
@@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                 */
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+                               iwl_mvm_get_passive_dwell(mvm,
+                                                         IEEE80211_BAND_2GHZ);
                        params->max_out_time = passive_dwell;
                } else {
                        params->passive_fragmented = true;
@@ -348,8 +355,8 @@ not_bound:
                        params->dwell[band].passive = frag_passive_dwell;
                else
                        params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
-               params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+                               iwl_mvm_get_passive_dwell(mvm, band);
+               params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
                                                                      n_ssids);
        }
 }
@@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
 
        if (iwl_mvm_scan_pass_all(mvm, req))
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+       else
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 
        if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
index 4f15d9d..4333306 100644 (file)
@@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
        }
 
-       /* tid_tspec will default to 0 = BE when QOS isn't enabled */
-       ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+       if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+               ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       else
+               ac = tid_to_mac80211_ac[0];
+
        tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
                        TX_CMD_FLG_BT_PRIO_POS;
 
index e56e77e..917431e 100644 (file)
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
        if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
                return false;
 
-       if (!mvm->cfg->rx_with_siso_diversity)
+       if (mvm->cfg->rx_with_siso_diversity)
                return false;
 
        ieee80211_iterate_active_interfaces_atomic(
index 2f0c4b1..d5aadb0 100644 (file)
@@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        else if (cfg == &iwl7265_n_cfg)
                cfg_7265d = &iwl7265d_n_cfg;
        if (cfg_7265d &&
-           (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+           (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
                cfg = cfg_7265d;
+               iwl_trans->cfg = cfg_7265d;
+       }
 #endif
 
        pci_set_drvdata(pdev, iwl_trans);
index 846a2e6..c70efb9 100644 (file)
@@ -666,7 +666,8 @@ tx_status_ok:
 }
 
 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
-                                   u8 *entry, int rxring_idx, int desc_idx)
+                                   struct sk_buff *new_skb, u8 *entry,
+                                   int rxring_idx, int desc_idx)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        u8 tmp_one = 1;
        struct sk_buff *skb;
 
+       if (likely(new_skb)) {
+               skb = new_skb;
+               goto remap;
+       }
        skb = dev_alloc_skb(rtlpci->rxbuffersize);
        if (!skb)
                return 0;
-       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
 
+remap:
        /* just set skb->cb to mapping addr for pci_unmap_single use */
        *((dma_addr_t *)skb->cb) =
                pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        bufferaddress = *((dma_addr_t *)skb->cb);
        if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
                return 0;
+       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
        if (rtlpriv->use_new_trx_flow) {
                rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                            HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                /*rx pkt */
                struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
                                      rtlpci->rx_ring[rxring_idx].idx];
+               struct sk_buff *new_skb;
 
                if (rtlpriv->use_new_trx_flow) {
                        rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
                                 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
 
+               /* get a new skb - if fail, old one will be reused */
+               new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+               if (unlikely(!new_skb)) {
+                       pr_err("Allocation of new skb failed in %s\n",
+                              __func__);
+                       goto no_new;
+               }
                if (rtlpriv->use_new_trx_flow) {
                        buffer_desc =
                          &rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        schedule_work(&rtlpriv->works.lps_change_work);
                }
 end:
+               skb = new_skb;
+no_new:
                if (rtlpriv->use_new_trx_flow) {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
                                                 rxring_idx,
-                                              rtlpci->rx_ring[rxring_idx].idx);
+                                                rtlpci->rx_ring[rxring_idx].idx);
                } else {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+                                                rxring_idx,
                                                 rtlpci->rx_ring[rxring_idx].idx);
-
                        if (rtlpci->rx_ring[rxring_idx].idx ==
                            rtlpci->rxringcount - 1)
                                rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
                rtlpci->rx_ring[rxring_idx].idx = 0;
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
 
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
index efbaf2a..794204e 100644 (file)
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
                }
 
                queue->remaining_credit = credit_bytes;
+               queue->credit_usec = credit_usec;
 
                err = connect_rings(be, queue);
                if (err) {
index 22bcb4e..d8c1076 100644 (file)
@@ -88,10 +88,8 @@ struct netfront_cb {
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
 struct netfront_stats {
-       u64                     rx_packets;
-       u64                     tx_packets;
-       u64                     rx_bytes;
-       u64                     tx_bytes;
+       u64                     packets;
+       u64                     bytes;
        struct u64_stats_sync   syncp;
 };
 
@@ -160,7 +158,8 @@ struct netfront_info {
        struct netfront_queue *queues;
 
        /* Statistics */
-       struct netfront_stats __percpu *stats;
+       struct netfront_stats __percpu *rx_stats;
+       struct netfront_stats __percpu *tx_stats;
 
        atomic_t rx_gso_checksum_fixup;
 };
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
-       struct netfront_stats *stats = this_cpu_ptr(np->stats);
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
        struct xen_netif_tx_request *tx;
        char *data = skb->data;
        RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (notify)
                notify_remote_via_irq(queue->tx_irq);
 
-       u64_stats_update_begin(&stats->syncp);
-       stats->tx_bytes += skb->len;
-       stats->tx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_begin(&tx_stats->syncp);
+       tx_stats->bytes += skb->len;
+       tx_stats->packets++;
+       u64_stats_update_end(&tx_stats->syncp);
 
        /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
        xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 static int handle_incoming_queue(struct netfront_queue *queue,
                                 struct sk_buff_head *rxq)
 {
-       struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+       struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
        int packets_dropped = 0;
        struct sk_buff *skb;
 
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
                        continue;
                }
 
-               u64_stats_update_begin(&stats->syncp);
-               stats->rx_packets++;
-               stats->rx_bytes += skb->len;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_begin(&rx_stats->syncp);
+               rx_stats->packets++;
+               rx_stats->bytes += skb->len;
+               u64_stats_update_end(&rx_stats->syncp);
 
                /* Pass it up. */
                napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+               struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+               struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
                u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_irq(&stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+                       tx_packets = tx_stats->packets;
+                       tx_bytes = tx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
 
-                       rx_packets = stats->rx_packets;
-                       tx_packets = stats->tx_packets;
-                       rx_bytes = stats->rx_bytes;
-                       tx_bytes = stats->tx_bytes;
-               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+                       rx_packets = rx_stats->packets;
+                       rx_bytes = rx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
 
                tot->rx_packets += rx_packets;
                tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
 #endif
 };
 
+static void xennet_free_netdev(struct net_device *netdev)
+{
+       struct netfront_info *np = netdev_priv(netdev);
+
+       free_percpu(np->rx_stats);
+       free_percpu(np->tx_stats);
+       free_netdev(netdev);
+}
+
 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 {
        int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np->queues = NULL;
 
        err = -ENOMEM;
-       np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
-       if (np->stats == NULL)
+       np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->rx_stats == NULL)
+               goto exit;
+       np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->tx_stats == NULL)
                goto exit;
 
        netdev->netdev_ops      = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        return netdev;
 
  exit:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        return ERR_PTR(err);
 }
 
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
        return 0;
 
  fail:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        dev_set_drvdata(&dev->dev, NULL);
        return err;
 }
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
                info->queues = NULL;
        }
 
-       free_percpu(info->stats);
-
-       free_netdev(info->netdev);
+       xennet_free_netdev(info->netdev);
 
        return 0;
 }
index ba74f0a..3c22dbe 100644 (file)
@@ -89,6 +89,7 @@ struct rockchip_iomux {
  * @reg_pull: optional separate register for additional pull settings
  * @clk: clock of the gpio bank
  * @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
  * @pin_base: first pin number
  * @nr_pins: number of pins in this bank
  * @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
        struct regmap                   *regmap_pull;
        struct clk                      *clk;
        int                             irq;
+       u32                             saved_enables;
        u32                             pin_base;
        u8                              nr_pins;
        char                            *name;
@@ -1543,6 +1545,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+       irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 val;
+
+       irq_gc_lock(gc);
+
+       val = irq_reg_readl(gc, GPIO_INTEN);
+       val &= ~d->mask;
+       irq_reg_writel(gc, val, GPIO_INTEN);
+
+       irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 val;
+
+       irq_gc_lock(gc);
+
+       val = irq_reg_readl(gc, GPIO_INTEN);
+       val |= d->mask;
+       irq_reg_writel(gc, val, GPIO_INTEN);
+
+       irq_gc_unlock(gc);
+}
+
 static int rockchip_interrupts_register(struct platform_device *pdev,
                                                struct rockchip_pinctrl *info)
 {
@@ -1581,12 +1628,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
                gc = irq_get_domain_generic_chip(bank->domain, 0);
                gc->reg_base = bank->reg_base;
                gc->private = bank;
-               gc->chip_types[0].regs.mask = GPIO_INTEN;
+               gc->chip_types[0].regs.mask = GPIO_INTMASK;
                gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
                gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-               gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
-               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+               gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+               gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
                gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+               gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+               gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
                gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
                gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
index 7c9d513..9e5ec00 100644 (file)
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
                                   struct seq_file *s, unsigned pin_id)
 {
        unsigned long config;
-       st_pinconf_get(pctldev, pin_id, &config);
 
+       mutex_unlock(&pctldev->mutex);
+       st_pinconf_get(pctldev, pin_id, &config);
+       mutex_lock(&pctldev->mutex);
        seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
                "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
                "de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
 
 static struct irq_chip st_gpio_irqchip = {
        .name           = "GPIO",
+       .irq_disable    = st_gpio_irq_mask,
        .irq_mask       = st_gpio_irq_mask,
        .irq_unmask     = st_gpio_irq_unmask,
        .irq_set_type   = st_gpio_irq_set_type,
index 91e97ec..4d41bf7 100644 (file)
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
  */
 static inline int ap_test_config_domain(unsigned int domain)
 {
-       if (!ap_configuration)
-               return 1;
-       return ap_test_config(ap_configuration->aqm, domain);
+       if (!ap_configuration)    /* QCI not supported */
+               if (domain < 16)
+                       return 1; /* then domains 0...15 are configured */
+               else
+                       return 0;
+       else
+               return ap_test_config(ap_configuration->aqm, domain);
 }
 
 /**
index 12ca291..cce1cbc 100644 (file)
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
         * Return target busy if we've received a non-zero retry_delay_timer
         * in a FCP_RSP.
         */
-       if (time_after(jiffies, fcport->retry_delay_timestamp))
+       if (fcport->retry_delay_timestamp == 0) {
+               /* retry delay not set */
+       } else if (time_after(jiffies, fcport->retry_delay_timestamp))
                fcport->retry_delay_timestamp = 0;
        else
                goto qc24_target_busy;
index 55f6774..aebde32 100644 (file)
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                goto reject;
        }
        if (!strncmp("=All", text_ptr, 4)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+               cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
        } else if (!strncmp("=iqn.", text_ptr, 5) ||
                   !strncmp("=eui.", text_ptr, 5)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+               cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
        } else {
                pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
                goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                return -ENOMEM;
        }
        /*
-        * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+        * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
         * explicit case..
         */
-       if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+       if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
                text_ptr = strchr(text_in, '=');
                if (!text_ptr) {
                        pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
 
        spin_lock(&tiqn_lock);
        list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
-               if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+               if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
                     strcmp(tiqn->tiqn, text_ptr)) {
                        continue;
                }
@@ -3512,7 +3512,7 @@ eob:
                if (end_of_buf)
                        break;
 
-               if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+               if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
        }
        spin_unlock(&tiqn_lock);
index 09a522b..cbcff38 100644 (file)
@@ -135,8 +135,8 @@ enum cmd_flags_table {
        ICF_CONTIG_MEMORY                       = 0x00000020,
        ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
        ICF_OOO_CMDSN                           = 0x00000080,
-       IFC_SENDTARGETS_ALL                     = 0x00000100,
-       IFC_SENDTARGETS_SINGLE                  = 0x00000200,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
 };
 
 /* struct iscsi_cmd->i_state */
index 7653cfb..58f49ff 100644 (file)
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 }
 EXPORT_SYMBOL(se_dev_set_queue_depth);
 
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
-       int block_size = dev->dev_attrib.block_size;
-
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device"
-                       " fabric_max_sectors while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       if (!fabric_max_sectors) {
-               pr_err("dev[%p]: Illegal ZERO value for"
-                       " fabric_max_sectors\n", dev);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
-                       " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
-                               DA_STATUS_MAX_SECTORS_MIN);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u"
-                       " greater than DA_STATUS_MAX_SECTORS_MAX:"
-                       " %u\n", dev, fabric_max_sectors,
-                       DA_STATUS_MAX_SECTORS_MAX);
-               return -EINVAL;
-       }
-       /*
-        * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
-        */
-       if (!block_size) {
-               block_size = 512;
-               pr_warn("Defaulting to 512 for zero block_size\n");
-       }
-       fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     block_size);
-
-       dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
-       pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
-                       dev, fabric_max_sectors);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
        if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                        dev, dev->export_count);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+                       " greater than hw_max_sectors: %u\n", dev,
+                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
                return -EINVAL;
        }
 
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.unmap_granularity_alignment =
                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
-       dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
 
        xcopy_lun = &dev->xcopy_lun;
        xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors =
                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
                                         dev->dev_attrib.hw_block_size);
+       dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 
        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
        dev->creation_time = get_jiffies_64();
index c2aea09..d836de2 100644 (file)
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct fd_prot fd_prot;
        sense_reason_t rc;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
        &fileio_dev_attrib_hw_block_size.attr,
        &fileio_dev_attrib_block_size.attr,
        &fileio_dev_attrib_hw_max_sectors.attr,
-       &fileio_dev_attrib_fabric_max_sectors.attr,
        &fileio_dev_attrib_optimal_sectors.attr,
        &fileio_dev_attrib_hw_queue_depth.attr,
        &fileio_dev_attrib_queue_depth.attr,
index 3efff94..78346b8 100644 (file)
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
        q = bdev_get_queue(bd);
 
        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
-       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
        &iblock_dev_attrib_hw_block_size.attr,
        &iblock_dev_attrib_block_size.attr,
        &iblock_dev_attrib_hw_max_sectors.attr,
-       &iblock_dev_attrib_fabric_max_sectors.attr,
        &iblock_dev_attrib_optimal_sectors.attr,
        &iblock_dev_attrib_hw_queue_depth.attr,
        &iblock_dev_attrib_queue_depth.attr,
index d56f2aa..283cf78 100644 (file)
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
 
                        return 0;
                }
+       } else if (we && registered_nexus) {
+               /*
+                * Reads are allowed for Write Exclusive locks
+                * from all registrants.
+                */
+               if (cmd->data_direction == DMA_FROM_DEVICE) {
+                       pr_debug("Allowing READ CDB: 0x%02x for %s"
+                               " reservation\n", cdb[0],
+                               core_scsi3_pr_dump_type(pr_reg_type));
+
+                       return 0;
+               }
        }
        pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
                " for %s reservation\n", transport_dump_cmd_direction(cmd),
index 60ebd17..98e83ac 100644 (file)
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
        &rd_mcp_dev_attrib_hw_block_size.attr,
        &rd_mcp_dev_attrib_block_size.attr,
        &rd_mcp_dev_attrib_hw_max_sectors.attr,
-       &rd_mcp_dev_attrib_fabric_max_sectors.attr,
        &rd_mcp_dev_attrib_optimal_sectors.attr,
        &rd_mcp_dev_attrib_hw_queue_depth.attr,
        &rd_mcp_dev_attrib_queue_depth.attr,
index 11bea19..cd4bed7 100644 (file)
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
-
-               if (sectors > dev->dev_attrib.fabric_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds fabric_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.fabric_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-               if (sectors > dev->dev_attrib.hw_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds backend hw_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
                if (cmd->t_task_lba + sectors > end_lba) {
index 1307600..4c71657 100644 (file)
@@ -505,7 +505,6 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       u32 max_sectors;
        int have_tp = 0;
        int opt, min;
 
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
-                         dev->dev_attrib.hw_max_sectors);
-       put_unaligned_be32(max_sectors, &buf[8]);
+       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 8bfa61c..1157b55 100644 (file)
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
        &tcmu_dev_attrib_hw_block_size.attr,
        &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_fabric_max_sectors.attr,
        &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
        &tcmu_dev_attrib_queue_depth.attr,
index 231cabc..2c2ec76 100644 (file)
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
                        continue;
 
                result = acpi_bus_get_device(trt->source, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get source ACPI device\n");
 
                result = acpi_bus_get_device(trt->target, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get target ACPI device\n");
        }
 
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
 
                if (art->source) {
                        result = acpi_bus_get_device(art->source, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
                if (art->target) {
                        result = acpi_bus_get_device(art->target, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
        }
index 31bb553..0fe5dbb 100644 (file)
@@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev,
        int ret;
 
        adev = ACPI_COMPANION(dev);
+       if (!adev)
+               return -ENODEV;
 
        status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
        if (ACPI_FAILURE(status))
index 255201f..7cc0122 100644 (file)
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       u8 type;
        struct vfio_pci_device *vdev;
        struct iommu_group *group;
        int ret;
 
-       pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
-       if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+       if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
        group = iommu_group_get(&pdev->dev);
index 14419a8..d415d69 100644 (file)
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
                ++headcount;
                seg += in;
        }
-       heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+       heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
        *iovcount = seg;
        if (unlikely(log))
                *log_num = nlogs;
index 01c01cb..d695b16 100644 (file)
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
        return 0;
 }
 
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+       switch (attr) {
+       case VIRTIO_SCSI_S_SIMPLE:
+               return TCM_SIMPLE_TAG;
+       case VIRTIO_SCSI_S_ORDERED:
+               return TCM_ORDERED_TAG;
+       case VIRTIO_SCSI_S_HEAD:
+               return TCM_HEAD_TAG;
+       case VIRTIO_SCSI_S_ACA:
+               return TCM_ACA_TAG;
+       default:
+               break;
+       }
+       return TCM_SIMPLE_TAG;
+}
+
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
        struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
-                       cmd->tvc_task_attr, cmd->tvc_data_direction,
-                       TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-                       NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+                       vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+                       cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+                       sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+                       cmd->tvc_prot_sgl_count);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
index 2ef9529..9756f21 100644 (file)
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
 
        vp_free_vectors(vdev);
        kfree(vp_dev->vqs);
+       vp_dev->vqs = NULL;
 }
 
 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
        return 0;
 }
 
-void virtio_pci_release_dev(struct device *_d)
-{
-       /*
-        * No need for a release method as we allocate/free
-        * all devices together with the pci devices.
-        * Provide an empty one to avoid getting a warning from core.
-        */
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int virtio_pci_freeze(struct device *dev)
 {
index adddb64..5a49728 100644 (file)
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
  * - ignore the affinity request if we're using INTX
  */
 int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
 
 int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id);
index 6c76f0f..a5486e6 100644 (file)
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .set_vq_affinity = vp_set_vq_affinity,
 };
 
+static void virtio_pci_release_dev(struct device *_d)
+{
+       struct virtio_device *vdev = dev_to_virtio(_d);
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       /* As struct device is a kobject, it's not safe to
+        * free the memory (including the reference counter itself)
+        * until it's release callback. */
+       kfree(vp_dev);
+}
+
 /* the PCI probing function */
 int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
        pci_iounmap(pci_dev, vp_dev->ioaddr);
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
-       kfree(vp_dev);
 }
index 2d3e32e..8729cf6 100644 (file)
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
 {
        int ret;
        int type;
-       struct btrfs_tree_block_info *info;
        struct btrfs_extent_inline_ref *eiref;
 
        if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        }
 
        /* we can treat both ref types equally here */
-       info = (struct btrfs_tree_block_info *)(ei + 1);
        *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
-       *out_level = btrfs_tree_block_level(eb, info);
+
+       if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+               struct btrfs_tree_block_info *info;
+
+               info = (struct btrfs_tree_block_info *)(ei + 1);
+               *out_level = btrfs_tree_block_level(eb, info);
+       } else {
+               ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+               *out_level = (u8)key->offset;
+       }
 
        if (ret == 1)
                *ptr = (unsigned long)-1;
index 054577b..de4e70f 100644 (file)
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 {
        struct btrfs_delayed_node *delayed_node;
 
+       /*
+        * we don't do delayed inode updates during log recovery because it
+        * leads to enospc problems.  This means we also can't do
+        * delayed inode refs
+        */
+       if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+               return -EAGAIN;
+
        delayed_node = btrfs_get_or_create_delayed_node(inode);
        if (IS_ERR(delayed_node))
                return PTR_ERR(delayed_node);
index a80b971..1511658 100644 (file)
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
 
        ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
-       if (ret < 0)
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
                goto fail;
-       BUG_ON(ret); /* Corruption */
+       }
 
        leaf = path->nodes[0];
        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
 fail:
-       if (ret) {
+       if (ret)
                btrfs_abort_transaction(trans, root, ret);
-               return ret;
-       }
-       return 0;
+       return ret;
 
 }
 
index e687bb0..8bf326a 100644 (file)
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
 out_fail:
        btrfs_end_transaction(trans, root);
-       if (drop_on_err)
+       if (drop_on_err) {
+               inode_dec_link_count(inode);
                iput(inode);
+       }
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
index f2bb13a..9e1569f 100644 (file)
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
                ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
                                             flags, gen, mirror_num,
                                             have_csum ? csum : NULL);
-skip:
                if (ret)
                        return ret;
+skip:
                len -= l;
                logical += l;
                physical += l;
index f5013d9..c81c0e0 100644 (file)
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                }
        }
 
-       dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+       dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
             inode, ceph_vinop(inode), len, locked_page);
 
        if (len > 0) {
index e5d3ead..bed4308 100644 (file)
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
        /* fallback to generic here if not in extents fmt */
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-               return __generic_block_fiemap(inode, fieinfo, start, len,
-                                             ext4_get_block);
+               return generic_block_fiemap(inode, fieinfo, start, len,
+                       ext4_get_block);
 
        if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
                return -EBADR;
index 513c12c..8131be8 100644 (file)
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
  * we determine this extent as a data or a hole according to whether the
  * page cache has data or not.
  */
-static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
-                                    loff_t endoff, loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+                                    int whence,
+                                    struct ext4_map_blocks *map,
+                                    loff_t *offset)
 {
        struct pagevec pvec;
+       unsigned int blkbits;
        pgoff_t index;
        pgoff_t end;
+       loff_t endoff;
        loff_t startoff;
        loff_t lastoff;
        int found = 0;
 
+       blkbits = inode->i_sb->s_blocksize_bits;
        startoff = *offset;
        lastoff = startoff;
-
+       endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
 
        index = startoff >> PAGE_CACHE_SHIFT;
        end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
 {
        struct inode *inode = file->f_mapping->host;
-       struct fiemap_extent_info fie;
-       struct fiemap_extent ext[2];
-       loff_t next;
-       int i, ret = 0;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t dataoff, isize;
+       int blkbits;
+       int ret = 0;
 
        mutex_lock(&inode->i_mutex);
-       if (offset >= inode->i_size) {
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
                mutex_unlock(&inode->i_mutex);
                return -ENXIO;
        }
-       fie.fi_flags = 0;
-       fie.fi_extents_max = 2;
-       fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
-       while (1) {
-               mm_segment_t old_fs = get_fs();
-
-               fie.fi_extents_mapped = 0;
-               memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
-
-               set_fs(get_ds());
-               ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
-               set_fs(old_fs);
-               if (ret)
+
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       dataoff = offset;
+
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       if (last != start)
+                               dataoff = (loff_t)last << blkbits;
                        break;
+               }
 
-               /* No extents found, EOF */
-               if (!fie.fi_extents_mapped) {
-                       ret = -ENXIO;
+               /*
+                * If there is a delay extent at this offset,
+                * it will be as a data.
+                */
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       if (last != start)
+                               dataoff = (loff_t)last << blkbits;
                        break;
                }
-               for (i = 0; i < fie.fi_extents_mapped; i++) {
-                       next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
 
-                       if (offset < (loff_t)ext[i].fe_logical)
-                               offset = (loff_t)ext[i].fe_logical;
-                       /*
-                        * If extent is not unwritten, then it contains valid
-                        * data, mapped or delayed.
-                        */
-                       if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
-                               goto out;
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+                                                             &map, &dataoff);
+                       if (unwritten)
+                               break;
+               }
 
-                       /*
-                        * If there is a unwritten extent at this offset,
-                        * it will be as a data or a hole according to page
-                        * cache that has data or not.
-                        */
-                       if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
-                                                     next, &offset))
-                               goto out;
+               last++;
+               dataoff = (loff_t)last << blkbits;
+       } while (last <= end);
 
-                       if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
-                               ret = -ENXIO;
-                               goto out;
-                       }
-                       offset = next;
-               }
-       }
-       if (offset > inode->i_size)
-               offset = inode->i_size;
-out:
        mutex_unlock(&inode->i_mutex);
-       if (ret)
-               return ret;
 
-       return vfs_setpos(file, offset, maxsize);
+       if (dataoff > isize)
+               return -ENXIO;
+
+       return vfs_setpos(file, dataoff, maxsize);
 }
 
 /*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  */
 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
 {
        struct inode *inode = file->f_mapping->host;
-       struct fiemap_extent_info fie;
-       struct fiemap_extent ext[2];
-       loff_t next;
-       int i, ret = 0;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t holeoff, isize;
+       int blkbits;
+       int ret = 0;
 
        mutex_lock(&inode->i_mutex);
-       if (offset >= inode->i_size) {
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
                mutex_unlock(&inode->i_mutex);
                return -ENXIO;
        }
 
-       fie.fi_flags = 0;
-       fie.fi_extents_max = 2;
-       fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
-       while (1) {
-               mm_segment_t old_fs = get_fs();
-
-               fie.fi_extents_mapped = 0;
-               memset(ext, 0, sizeof(*ext));
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       holeoff = offset;
 
-               set_fs(get_ds());
-               ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
-               set_fs(old_fs);
-               if (ret)
-                       break;
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       last += ret;
+                       holeoff = (loff_t)last << blkbits;
+                       continue;
+               }
 
-               /* No extents found */
-               if (!fie.fi_extents_mapped)
-                       break;
+               /*
+                * If there is a delay extent at this offset,
+                * we will skip this extent.
+                */
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       last = es.es_lblk + es.es_len;
+                       holeoff = (loff_t)last << blkbits;
+                       continue;
+               }
 
-               for (i = 0; i < fie.fi_extents_mapped; i++) {
-                       next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
-                       /*
-                        * If extent is not unwritten, then it contains valid
-                        * data, mapped or delayed.
-                        */
-                       if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
-                               if (offset < (loff_t)ext[i].fe_logical)
-                                       goto out;
-                               offset = next;
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+                                                             &map, &holeoff);
+                       if (!unwritten) {
+                               last += ret;
+                               holeoff = (loff_t)last << blkbits;
                                continue;
                        }
-                       /*
-                        * If there is a unwritten extent at this offset,
-                        * it will be as a data or a hole according to page
-                        * cache that has data or not.
-                        */
-                       if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
-                                                     next, &offset))
-                               goto out;
-
-                       offset = next;
-                       if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
-                               goto out;
                }
-       }
-       if (offset > inode->i_size)
-               offset = inode->i_size;
-out:
+
+               /* find a hole */
+               break;
+       } while (last <= end);
+
        mutex_unlock(&inode->i_mutex);
-       if (ret)
-               return ret;
 
-       return vfs_setpos(file, offset, maxsize);
+       if (holeoff > isize)
+               holeoff = isize;
+
+       return vfs_setpos(file, holeoff, maxsize);
 }
 
 /*
index bf76f40..8a8ec62 100644 (file)
@@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb)
                return -EPERM;
 
        /*
+        * If we are not using the primary superblock/GDT copy don't resize,
+         * because the user tools have no way of handling this.  Probably a
+         * bad time to do it anyways.
+         */
+       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+               ext4_warning(sb, "won't resize using backup superblock at %llu",
+                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+               return -EPERM;
+       }
+
+       /*
         * We are not allowed to do online-resizing on a filesystem mounted
         * with error, because it can destroy the filesystem easily.
         */
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
                       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
                       gdb_num);
 
-       /*
-        * If we are not using the primary superblock/GDT copy don't resize,
-         * because the user tools have no way of handling this.  Probably a
-         * bad time to do it anyways.
-         */
-       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
-           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
-               ext4_warning(sb, "won't resize using backup superblock at %llu",
-                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
-               return -EPERM;
-       }
-
        gdb_bh = sb_bread(sb, gdblock);
        if (!gdb_bh)
                return -EIO;
index 43c92b1..74c5f53 100644 (file)
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
            EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
-               ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+               ext4_warning(sb, "metadata_csum and uninit_bg are "
                             "redundant flags; please run fsck.");
 
        /* Check for a known checksum algorithm */
index 99d440a..ee85cd4 100644 (file)
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
         * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
         * is defined as O_NONBLOCK on some platforms and not on others.
         */
-       BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
                O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
-               __FMODE_EXEC    | O_PATH        | __O_TMPFILE
+               __FMODE_EXEC    | O_PATH        | __O_TMPFILE   |
+               __FMODE_NONOTIFY
                ));
 
        fasync_cache = kmem_cache_create("fasync_cache",
index 735b8d3..59e2f90 100644 (file)
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
                        break;
        }
        trace_generic_delete_lease(inode, fl);
-       if (fl)
+       if (fl && IS_LEASE(fl))
                error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
        spin_unlock(&inode->i_lock);
        locks_dispose_list(&dispose);
index 3550a9c..c06a1ba 100644 (file)
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
                status = nfs4_setlease(dp);
                goto out;
        }
-       atomic_inc(&fp->fi_delegees);
        if (fp->fi_had_conflict) {
                status = -EAGAIN;
                goto out_unlock;
        }
+       atomic_inc(&fp->fi_delegees);
        hash_delegation_locked(dp, fp);
        status = 0;
 out_unlock:
index c991616..bff8567 100644 (file)
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
        struct fsnotify_event *kevent;
        char __user *start;
        int ret;
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        start = buf;
        group = file->private_data;
 
        pr_debug("%s: group=%p\n", __func__, group);
 
+       add_wait_queue(&group->notification_waitq, &wait);
        while (1) {
-               prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
-
                mutex_lock(&group->notification_mutex);
                kevent = get_one_event(group, count);
                mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
 
                        if (start != buf)
                                break;
-                       schedule();
+
+                       wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
                        continue;
                }
 
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
                buf += ret;
                count -= ret;
        }
+       remove_wait_queue(&group->notification_waitq, &wait);
 
-       finish_wait(&group->notification_waitq, &wait);
        if (start != buf && ret != -EFAULT)
                ret = buf - start;
        return ret;
index 79b5af5..cecd875 100644 (file)
@@ -2023,11 +2023,8 @@ leave:
        dlm_lockres_drop_inflight_ref(dlm, res);
        spin_unlock(&res->spinlock);
 
-       if (ret < 0) {
+       if (ret < 0)
                mlog_errno(ret);
-               if (newlock)
-                       dlm_lock_put(newlock);
-       }
 
        return ret;
 }
index b931e04..914c121 100644 (file)
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
                                     struct inode *inode,
                                     const char *symname);
 
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+                            struct buffer_head **bh1,
+                            struct inode *inode1,
+                            struct buffer_head **bh2,
+                            struct inode *inode2,
+                            int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
 /* An orphan dir name is an 8 byte value, printed as a hex string */
 #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
 
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
 {
        handle_t *handle;
        struct inode *inode = old_dentry->d_inode;
+       struct inode *old_dir = old_dentry->d_parent->d_inode;
        int err;
        struct buffer_head *fe_bh = NULL;
+       struct buffer_head *old_dir_bh = NULL;
        struct buffer_head *parent_fe_bh = NULL;
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
 
        dquot_initialize(dir);
 
-       err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+       err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+                       &parent_fe_bh, dir, 0);
        if (err < 0) {
                if (err != -ENOENT)
                        mlog_errno(err);
                return err;
        }
 
+       /* make sure both dirs have bhs
+        * get an extra ref on old_dir_bh if old==new */
+       if (!parent_fe_bh) {
+               if (old_dir_bh) {
+                       parent_fe_bh = old_dir_bh;
+                       get_bh(parent_fe_bh);
+               } else {
+                       mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+                       err = -EIO;
+                       goto out;
+               }
+       }
+
        if (!dir->i_nlink) {
                err = -ENOENT;
                goto out;
        }
 
-       err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+       err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
                        old_dentry->d_name.len, &old_de_ino);
        if (err) {
                err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
        ocfs2_inode_unlock(inode, 1);
 
 out:
-       ocfs2_inode_unlock(dir, 1);
+       ocfs2_double_unlock(old_dir, dir);
 
        brelse(fe_bh);
        brelse(parent_fe_bh);
+       brelse(old_dir_bh);
 
        ocfs2_free_dir_lookup_result(&lookup);
 
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
 }
 
 /*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
  * if they have the same id, then the 1st one is the only one locked.
  */
 static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct buffer_head **bh1,
                             struct inode *inode1,
                             struct buffer_head **bh2,
-                            struct inode *inode2)
+                            struct inode *inode2,
+                            int rename)
 {
        int status;
        int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                }
                /* lock id2 */
                status = ocfs2_inode_lock_nested(inode2, bh2, 1,
-                                                OI_LS_RENAME1);
+                               rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        }
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+       status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+                       rename == 1 ?  OI_LS_RENAME2 : OI_LS_PARENT);
        if (status < 0) {
                /*
                 * An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
 
        /* if old and new are the same, this'll just do one lock. */
        status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
-                                  &new_dir_bh, new_dir);
+                                  &new_dir_bh, new_dir, 1);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
index 3ca9b75..b95dc32 100644 (file)
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
 struct acpi_processor {
        acpi_handle handle;
        u32 acpi_id;
-       u32 apic_id;
-       u32 id;
+       u32 phys_id;    /* CPU hardware ID such as APIC ID for x86 */
+       u32 id;         /* CPU logical ID allocated by OS */
        u32 pblk;
        int performance_platform_limit;
        int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 #endif                         /* CONFIG_CPU_FREQ */
 
 /* in processor_core.c */
-int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int apic_id, u32 acpi_id);
+int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int phys_id, u32 acpi_id);
 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 
 /* in processor_pdc.c */
index 0884805..db284bf 100644 (file)
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 
 static inline void __tlb_reset_range(struct mmu_gather *tlb)
 {
-       tlb->start = TASK_SIZE;
-       tlb->end = 0;
+       if (tlb->fullmm) {
+               tlb->start = tlb->end = ~0;
+       } else {
+               tlb->start = TASK_SIZE;
+               tlb->end = 0;
+       }
 }
 
 /*
index 856d381..d459cd1 100644 (file)
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
-int acpi_unmap_lsapic(int cpu);
+int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
index 8aded9a..5735e71 100644 (file)
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
        unsigned long           flags;          /* BLK_MQ_F_* flags */
 
        struct request_queue    *queue;
-       unsigned int            queue_num;
        struct blk_flush_queue  *fq;
 
        void                    *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 
        unsigned int            numa_node;
-       unsigned int            cmd_size;       /* per-request extra data */
+       unsigned int            queue_num;
 
        atomic_t                nr_active;
 
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
 
+int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, int error);
 void __blk_mq_end_request(struct request *rq, int error);
 
 void blk_mq_requeue_request(struct request *rq);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
                void *priv);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
 
 /*
  * Driver command data is immediately after the request. So subtract request
index 445d592..c294e3e 100644 (file)
@@ -190,6 +190,7 @@ enum rq_flag_bits {
        __REQ_PM,               /* runtime pm request */
        __REQ_HASHED,           /* on IO scheduler merge hash */
        __REQ_MQ_INFLIGHT,      /* track inflight for MQ */
+       __REQ_NO_TIMEOUT,       /* requests may never expire */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -243,5 +244,6 @@ enum rq_flag_bits {
 #define REQ_PM                 (1ULL << __REQ_PM)
 #define REQ_HASHED             (1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT                (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT         (1ULL << __REQ_NO_TIMEOUT)
 
 #endif /* __LINUX_BLK_TYPES_H */
index 5d86416..61b19c4 100644 (file)
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
                        struct ceph_osd_data osd_data;
                } extent;
                struct {
-                       __le32 name_len;
-                       __le32 value_len;
+                       u32 name_len;
+                       u32 value_len;
                        __u8 cmp_op;       /* CEPH_OSD_CMPXATTR_OP_* */
                        __u8 cmp_mode;     /* CEPH_OSD_CMPXATTR_MODE_* */
                        struct ceph_osd_data osd_data;
index a1c81f8..33063f8 100644 (file)
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
        }
 }
 
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  * compiler is aware of some particular ordering.  One way to make the
  * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  *
  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  * data types like structs or unions. If the size of the accessed data
  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
  * compile-time warning.
  *
  * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 #define READ_ONCE(x) \
        ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
 
-#define ASSIGN_ONCE(val, x) \
-       ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index f90c028..42efe13 100644 (file)
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_CAN_WRITE         ((__force fmode_t)0x40000)
 
 /* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
+#define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
 /*
  * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
index 290db12..75ae2e2 100644 (file)
  * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
  */
 
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
 typedef enum {
-       KDB_REPEAT_NONE = 0,    /* Do not repeat this command */
-       KDB_REPEAT_NO_ARGS,     /* Repeat the command without arguments */
-       KDB_REPEAT_WITH_ARGS,   /* Repeat the command including its arguments */
-} kdb_repeat_t;
+       KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+       KDB_ENABLE_MEM_READ = (1 << 1),
+       KDB_ENABLE_MEM_WRITE = (1 << 2),
+       KDB_ENABLE_REG_READ = (1 << 3),
+       KDB_ENABLE_REG_WRITE = (1 << 4),
+       KDB_ENABLE_INSPECT = (1 << 5),
+       KDB_ENABLE_FLOW_CTRL = (1 << 6),
+       KDB_ENABLE_SIGNAL = (1 << 7),
+       KDB_ENABLE_REBOOT = (1 << 8),
+       /* User exposed values stop here, all remaining flags are
+        * exclusively used to describe a commands behaviour.
+        */
+
+       KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+       KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+       KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+                                     << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+                                     << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+                                    << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+                                   << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+                                   << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+                                        << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+       KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+       KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
 
 typedef int (*kdb_func_t)(int, const char **);
 
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
 #define KDB_BADLENGTH  (-19)
 #define KDB_NOBP       (-20)
 #define KDB_BADADDR    (-21)
+#define KDB_NOPERM     (-22)
 
 /*
  * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
 
 /* Dynamic kdb shell command registration */
 extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
-                              short, kdb_repeat_t);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+                             short, kdb_cmdflags_t);
 extern int kdb_unregister(char *);
 #else /* ! CONFIG_KGDB_KDB */
 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
 static inline void kdb_init(int level) {}
 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
                               char *help, short minlen) { return 0; }
-static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage,
-                                     char *help, short minlen,
-                                     kdb_repeat_t repeat) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+                                    char *help, short minlen,
+                                    kdb_cmdflags_t flags) { return 0; }
 static inline int kdb_unregister(char *cmd) { return 0; }
 #endif /* CONFIG_KGDB_KDB */
 enum {
index f80d019..80fc92a 100644 (file)
@@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
-  #define expand_upwards(vma, address) do { } while (0)
+  #define expand_upwards(vma, address) (0)
 #endif
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
index 375af80..f767a0d 100644 (file)
@@ -137,6 +137,7 @@ struct sdhci_host {
 #define SDHCI_SDR104_NEEDS_TUNING (1<<10)      /* SDR104/HS200 needs tuning */
 #define SDHCI_USING_RETUNING_TIMER (1<<11)     /* Host is using a retuning timer for the card */
 #define SDHCI_USE_64_BIT_DMA   (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING     (1<<13) /* Tuning for HS400 */
 
        unsigned int version;   /* SDHCI spec. version */
 
index 679e6e9..52fd8e8 100644 (file)
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is registered.
  *
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is unregistered.
  *
@@ -2085,7 +2085,7 @@ extern rwlock_t                           dev_base_lock;          /* Device list lock */
        list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_in_bond_rcu(bond, slave)       \
                for_each_netdev_rcu(&init_net, slave)   \
-                       if (netdev_master_upper_dev_get_rcu(slave) == bond)
+                       if (netdev_master_upper_dev_get_rcu(slave) == (bond))
 #define net_device_entry(lh)   list_entry(lh, struct net_device, dev_list)
 
 static inline struct net_device *next_net_device(struct net_device *dev)
index 486e84c..4f7a61c 100644 (file)
@@ -79,11 +79,6 @@ struct perf_branch_stack {
        struct perf_branch_entry        entries[0];
 };
 
-struct perf_regs {
-       __u64           abi;
-       struct pt_regs  *regs;
-};
-
 struct task_struct;
 
 /*
@@ -610,7 +605,14 @@ struct perf_sample_data {
                u32     reserved;
        }                               cpu_entry;
        struct perf_callchain_entry     *callchain;
+
+       /*
+        * regs_user may point to task_pt_regs or to regs_user_copy, depending
+        * on arch details.
+        */
        struct perf_regs                regs_user;
+       struct pt_regs                  regs_user_copy;
+
        struct perf_regs                regs_intr;
        u64                             stack_user_size;
 } ____cacheline_aligned;
index 3c73d5f..a5f98d5 100644 (file)
@@ -1,11 +1,19 @@
 #ifndef _LINUX_PERF_REGS_H
 #define _LINUX_PERF_REGS_H
 
+struct perf_regs {
+       __u64           abi;
+       struct pt_regs  *regs;
+};
+
 #ifdef CONFIG_HAVE_PERF_REGS
 #include <asm/perf_regs.h>
 u64 perf_reg_value(struct pt_regs *regs, int idx);
 int perf_reg_validate(u64 mask);
 u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy);
 #else
 static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
 {
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_NONE;
 }
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+                                     struct pt_regs *regs,
+                                     struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
 #endif /* CONFIG_HAVE_PERF_REGS */
 #endif /* _LINUX_PERF_REGS_H */
index c0c2bce..d9d7e7e 100644 (file)
@@ -37,6 +37,16 @@ struct anon_vma {
        atomic_t refcount;
 
        /*
+        * Count of child anon_vmas and VMAs which points to this anon_vma.
+        *
+        * This counter is used for making decision about reusing anon_vma
+        * instead of forking new one. See comments in function anon_vma_clone.
+        */
+       unsigned degree;
+
+       struct anon_vma *parent;        /* Parent of this anon_vma */
+
+       /*
         * NOTE: the LSB of the rb_root.rb_node is set by
         * mm_take_all_locks() _after_ taking the above lock. So the
         * rb_root must only be read/written after taking the above lock
index a219be9..0004833 100644 (file)
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
                      struct writeback_control *wbc, writepage_t writepage,
                      void *data);
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page);
 void writeback_set_ratelimit(void);
 void tag_pages_for_writeback(struct address_space *mapping,
                             pgoff_t start, pgoff_t end);
index 58d719d..29c7be8 100644 (file)
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  *
  * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
  *     driver to indicate that it requires IV generation for this
- *     particular key. Setting this flag does not necessarily mean that SKBs
- *     will have sufficient tailroom for ICV or MIC.
+ *     particular key.
  * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
  *     the driver for a TKIP key if it requires Michael MIC
  *     generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
  *     if space should be prepared for the IV, but the IV
  *     itself should not be generated. Do not set together with
- *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
- *     not necessarily mean that SKBs will have sufficient tailroom for ICV or
- *     MIC.
+ *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
  * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
  *     management frames. The flag can help drivers that have a hardware
  *     crypto implementation that doesn't deal with management frames
index 430cfaf..db81c65 100644 (file)
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
 int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
 int    se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
-int    se_dev_set_fabric_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
 
index 3247d75..186f7a9 100644 (file)
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
        TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR);           \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors);                 \
        TB_DEV_ATTR_RO(_backend, hw_max_sectors);                       \
-       DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors);                \
-       TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR);   \
        DEF_TB_DEV_ATTRIB(_backend, optimal_sectors);                   \
        TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR);      \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth);                 \
index 397fb63..4a8795a 100644 (file)
@@ -77,8 +77,6 @@
 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
 /* Default max_write_same_len, disabled by default */
 #define DA_MAX_WRITE_SAME_LEN                  0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS                  8192
 /* Use a model alias based on the configfs backend device name */
 #define DA_EMULATE_MODEL_ALIAS                 0
 /* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
        u32             hw_block_size;
        u32             block_size;
        u32             hw_max_sectors;
-       u32             fabric_max_sectors;
        u32             optimal_sectors;
        u32             hw_queue_depth;
        u32             queue_depth;
index 7543b3e..e063eff 100644 (file)
@@ -5,7 +5,7 @@
 
 /*
  * FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
+ * FMODE_NONOTIFY is 0x4000000
  * These cannot be used by userspace O_* until internal and external open
  * flags are split.
  * -Eric Paris
index 7acef41..af94f31 100644 (file)
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
        uint32_t pad;
 };
 
-#define KFD_IOC_MAGIC 'K'
+#define AMDKFD_IOCTL_BASE 'K'
+#define AMDKFD_IO(nr)                  _IO(AMDKFD_IOCTL_BASE, nr)
+#define AMDKFD_IOR(nr, type)           _IOR(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOW(nr, type)           _IOW(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOWR(nr, type)          _IOWR(AMDKFD_IOCTL_BASE, nr, type)
 
-#define KFD_IOC_GET_VERSION \
-               _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+#define AMDKFD_IOC_GET_VERSION                 \
+               AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
 
-#define KFD_IOC_CREATE_QUEUE \
-               _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+#define AMDKFD_IOC_CREATE_QUEUE                        \
+               AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
 
-#define KFD_IOC_DESTROY_QUEUE \
-       _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+#define AMDKFD_IOC_DESTROY_QUEUE               \
+               AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
 
-#define KFD_IOC_SET_MEMORY_POLICY \
-       _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+#define AMDKFD_IOC_SET_MEMORY_POLICY           \
+               AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
 
-#define KFD_IOC_GET_CLOCK_COUNTERS \
-       _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+#define AMDKFD_IOC_GET_CLOCK_COUNTERS          \
+               AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
 
-#define KFD_IOC_GET_PROCESS_APERTURES \
-       _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES       \
+               AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
 
-#define KFD_IOC_UPDATE_QUEUE \
-       _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_UPDATE_QUEUE                        \
+               AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+
+#define AMDKFD_COMMAND_START           0x01
+#define AMDKFD_COMMAND_END             0x08
 
 #endif
index 3a6dcaa..f714e86 100644 (file)
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
        OVS_PACKET_ATTR_USERDATA,    /* OVS_ACTION_ATTR_USERSPACE arg. */
        OVS_PACKET_ATTR_EGRESS_TUN_KEY,  /* Nested OVS_TUNNEL_KEY_ATTR_*
                                            attributes. */
+       OVS_PACKET_ATTR_UNUSED1,
+       OVS_PACKET_ATTR_UNUSED2,
+       OVS_PACKET_ATTR_PROBE,      /* Packet operation is a feature probe,
+                                      error logging should be suppressed. */
        __OVS_PACKET_ATTR_MAX
 };
 
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644 (file)
index 0000000..b47d9d0
--- /dev/null
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error     0
+#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr     1
+#define XEN_NMIREASON_pci_serr      (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown      2
+#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback   0
+struct xennmi_callback {
+    unsigned long handler_address;
+    unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
index 1adf62b..07ce18c 100644 (file)
@@ -27,6 +27,9 @@
  * version 2. This program is licensed "as is" without any warranty of any
  * kind, whether express or implied.
  */
+
+#define pr_fmt(fmt) "KGDB: " fmt
+
 #include <linux/pid_namespace.h>
 #include <linux/clocksource.h>
 #include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
                return err;
        err = kgdb_arch_remove_breakpoint(&tmp);
        if (err)
-               printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
-                  "memory destroyed at: %lx", addr);
+               pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
+                      addr);
        return err;
 }
 
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
                error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
                if (error) {
                        ret = error;
-                       printk(KERN_INFO "KGDB: BP install failed: %lx",
-                              kgdb_break[i].bpt_addr);
+                       pr_info("BP install failed: %lx\n",
+                               kgdb_break[i].bpt_addr);
                        continue;
                }
 
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
                        continue;
                error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error) {
-                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
-                              kgdb_break[i].bpt_addr);
+                       pr_info("BP remove failed: %lx\n",
+                               kgdb_break[i].bpt_addr);
                        ret = error;
                }
 
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
                        goto setundefined;
                error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error)
-                       printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+                       pr_err("breakpoint remove failed: %lx\n",
                               kgdb_break[i].bpt_addr);
 setundefined:
                kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
        if (print_wait) {
 #ifdef CONFIG_KGDB_KDB
                if (!dbg_kdb_mode)
-                       printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+                       pr_crit("waiting... or $3#33 for KDB\n");
 #else
-               printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+               pr_crit("Waiting for remote debugger\n");
 #endif
        }
        return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
                exception_level = 0;
                kgdb_skipexception(ks->ex_vector, ks->linux_regs);
                dbg_activate_sw_breakpoints();
-               printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
-                       addr);
+               pr_crit("re-enter error: breakpoint removed %lx\n", addr);
                WARN_ON_ONCE(1);
 
                return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
                panic("Recursive entry to debugger");
        }
 
-       printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+       pr_crit("re-enter exception: ALL breakpoints killed\n");
 #ifdef CONFIG_KGDB_KDB
        /* Allow kdb to debug itself one level */
        return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
        int cpu;
        int trace_on = 0;
        int online_cpus = num_online_cpus();
+       u64 time_left;
 
        kgdb_info[ks->cpu].enter_kgdb++;
        kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
        /*
         * Wait for the other CPUs to be notified and be waiting for us:
         */
-       while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
-                               atomic_read(&slaves_in_kgdb)) != online_cpus)
+       time_left = loops_per_jiffy * HZ;
+       while (kgdb_do_roundup && --time_left &&
+              (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+                  online_cpus)
                cpu_relax();
+       if (!time_left)
+               pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
 
        /*
         * At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
 static void sysrq_handle_dbg(int key)
 {
        if (!dbg_io_ops) {
-               printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+               pr_crit("ERROR: No KGDB I/O module available\n");
                return;
        }
        if (!kgdb_connected) {
 #ifdef CONFIG_KGDB_KDB
                if (!dbg_kdb_mode)
-                       printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+                       pr_crit("KGDB or $3#33 for KDB\n");
 #else
-               printk(KERN_CRIT "Entering KGDB\n");
+               pr_crit("Entering KGDB\n");
 #endif
        }
 
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
 {
        kgdb_break_asap = 0;
 
-       printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+       pr_crit("Waiting for connection from remote gdb...\n");
        kgdb_breakpoint();
 }
 
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
        if (dbg_io_ops) {
                spin_unlock(&kgdb_registration_lock);
 
-               printk(KERN_ERR "kgdb: Another I/O driver is already "
-                               "registered with KGDB.\n");
+               pr_err("Another I/O driver is already registered with KGDB\n");
                return -EBUSY;
        }
 
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
 
        spin_unlock(&kgdb_registration_lock);
 
-       printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
-              new_dbg_io_ops->name);
+       pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
 
        /* Arm KGDB now. */
        kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
 
        spin_unlock(&kgdb_registration_lock);
 
-       printk(KERN_INFO
-               "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+       pr_info("Unregistered I/O driver %s, debugger disabled\n",
                old_dbg_io_ops->name);
 }
 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
index b20d544..e1dbf4a 100644 (file)
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
        for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
                bp->bp_free = 1;
 
-       kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
-               "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
-               "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
+               "Set/Display breakpoints", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
+               "Display breakpoints", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
        if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
-               kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
-               "[datar [length]|dataw [length]]   Set hw brk", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("bc", kdb_bc, "<bpnum>",
-               "Clear Breakpoint", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("be", kdb_bc, "<bpnum>",
-               "Enable Breakpoint", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bd", kdb_bc, "<bpnum>",
-               "Disable Breakpoint", 0, KDB_REPEAT_NONE);
-
-       kdb_register_repeat("ss", kdb_ss, "",
-               "Single Step", 1, KDB_REPEAT_NO_ARGS);
+               kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
+               "[datar [length]|dataw [length]]   Set hw brk", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bc", kdb_bc, "<bpnum>",
+               "Clear Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+       kdb_register_flags("be", kdb_bc, "<bpnum>",
+               "Enable Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+       kdb_register_flags("bd", kdb_bc, "<bpnum>",
+               "Disable Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+
+       kdb_register_flags("ss", kdb_ss, "",
+               "Single Step", 1,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
        /*
         * Architecture dependent initialization.
         */
index 8859ca3..15e1a7a 100644 (file)
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
                ks->pass_exception = 1;
                KDB_FLAG_SET(CATASTROPHIC);
        }
+       /* set CATASTROPHIC if the system contains unresponsive processors */
+       for_each_online_cpu(i)
+               if (!kgdb_info[i].enter_kgdb)
+                       KDB_FLAG_SET(CATASTROPHIC);
        if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
                KDB_STATE_CLEAR(SSBPT);
                KDB_STATE_CLEAR(DOING_SS);
index 379650b..f191bdd 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/ctype.h>
+#include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
 #include <linux/vmalloc.h>
 #include <linux/atomic.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
 #include <linux/slab.h>
 #include "kdb_private.h"
 
+#undef MODULE_PARAM_PREFIX
+#define        MODULE_PARAM_PREFIX "kdb."
+
+static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
+module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
+
 #define GREP_LEN 256
 char kdb_grep_string[GREP_LEN];
 int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
        KDBMSG(BADLENGTH, "Invalid length field"),
        KDBMSG(NOBP, "No Breakpoint exists"),
        KDBMSG(BADADDR, "Invalid address"),
+       KDBMSG(NOPERM, "Permission denied"),
 };
 #undef KDBMSG
 
@@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu)
 }
 
 /*
+ * Check whether the flags of the current command and the permissions
+ * of the kdb console has allow a command to be run.
+ */
+static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+                                  bool no_args)
+{
+       /* permissions comes from userspace so needs massaging slightly */
+       permissions &= KDB_ENABLE_MASK;
+       permissions |= KDB_ENABLE_ALWAYS_SAFE;
+
+       /* some commands change group when launched with no arguments */
+       if (no_args)
+               permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
+
+       flags |= KDB_ENABLE_ALL;
+
+       return permissions & flags;
+}
+
+/*
  * kdbgetenv - This function will return the character string value of
  *     an environment variable.
  * Parameters:
@@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
        kdb_symtab_t symtab;
 
        /*
+        * If the enable flags prohibit both arbitrary memory access
+        * and flow control then there are no reasonable grounds to
+        * provide symbol lookup.
+        */
+       if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
+                            kdb_cmd_enabled, false))
+               return KDB_NOPERM;
+
+       /*
         * Process arguments which follow the following syntax:
         *
         *  symbol | numeric-address [+/- numeric-offset]
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
                if (!s->count)
                        s->usable = 0;
                if (s->usable)
-                       kdb_register(s->name, kdb_exec_defcmd,
-                                    s->usage, s->help, 0);
+                       /* macros are always safe because when executed each
+                        * internal command re-enters kdb_parse() and is
+                        * safety checked individually.
+                        */
+                       kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
+                                          s->help, 0,
+                                          KDB_ENABLE_ALWAYS_SAFE);
                return 0;
        }
        if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
 
        if (i < kdb_max_commands) {
                int result;
+
+               if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
+                       return KDB_NOPERM;
+
                KDB_STATE_SET(CMD);
                result = (*tp->cmd_func)(argc-1, (const char **)argv);
                if (result && ignore_errors && result > KDB_CMD_GO)
                        result = 0;
                KDB_STATE_CLEAR(CMD);
-               switch (tp->cmd_repeat) {
-               case KDB_REPEAT_NONE:
-                       argc = 0;
-                       if (argv[0])
-                               *(argv[0]) = '\0';
-                       break;
-               case KDB_REPEAT_NO_ARGS:
-                       argc = 1;
-                       if (argv[1])
-                               *(argv[1]) = '\0';
-                       break;
-               case KDB_REPEAT_WITH_ARGS:
-                       break;
-               }
+
+               if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
+                       return result;
+
+               argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
+               if (argv[argc])
+                       *(argv[argc]) = '\0';
                return result;
        }
 
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
  */
 static int kdb_sr(int argc, const char **argv)
 {
+       bool check_mask =
+           !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
+
        if (argc != 1)
                return KDB_ARGCOUNT;
+
        kdb_trap_printk++;
-       __handle_sysrq(*argv[1], false);
+       __handle_sysrq(*argv[1], check_mask);
        kdb_trap_printk--;
 
        return 0;
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
        for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
                if (!cpu_online(i)) {
                        state = 'F';    /* cpu is offline */
+               } else if (!kgdb_info[i].enter_kgdb) {
+                       state = 'D';    /* cpu is online but unresponsive */
                } else {
                        state = ' ';    /* cpu is responding to kdb */
                        if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
        /*
         * Validate cpunum
         */
-       if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+       if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
                return KDB_BADCPUNUM;
 
        dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
                        return 0;
                if (!kt->cmd_name)
                        continue;
+               if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
+                       continue;
                if (strlen(kt->cmd_usage) > 20)
                        space = "\n                                    ";
                kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
 }
 
 /*
- * kdb_register_repeat - This function is used to register a kernel
+ * kdb_register_flags - This function is used to register a kernel
  *     debugger command.
  * Inputs:
  *     cmd     Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
  *     zero for success, one if a duplicate command.
  */
 #define kdb_command_extend 50  /* arbitrary */
-int kdb_register_repeat(char *cmd,
-                       kdb_func_t func,
-                       char *usage,
-                       char *help,
-                       short minlen,
-                       kdb_repeat_t repeat)
+int kdb_register_flags(char *cmd,
+                      kdb_func_t func,
+                      char *usage,
+                      char *help,
+                      short minlen,
+                      kdb_cmdflags_t flags)
 {
        int i;
        kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
        kp->cmd_func   = func;
        kp->cmd_usage  = usage;
        kp->cmd_help   = help;
-       kp->cmd_flags  = 0;
        kp->cmd_minlen = minlen;
-       kp->cmd_repeat = repeat;
+       kp->cmd_flags  = flags;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kdb_register_repeat);
+EXPORT_SYMBOL_GPL(kdb_register_flags);
 
 
 /*
  * kdb_register - Compatibility register function for commands that do
  *     not need to specify a repeat state.  Equivalent to
- *     kdb_register_repeat with KDB_REPEAT_NONE.
+ *     kdb_register_flags with flags set to 0.
  * Inputs:
  *     cmd     Command name
  *     func    Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
             char *help,
             short minlen)
 {
-       return kdb_register_repeat(cmd, func, usage, help, minlen,
-                                  KDB_REPEAT_NONE);
+       return kdb_register_flags(cmd, func, usage, help, minlen, 0);
 }
 EXPORT_SYMBOL_GPL(kdb_register);
 
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
        for_each_kdbcmd(kp, i)
                kp->cmd_name = NULL;
 
-       kdb_register_repeat("md", kdb_md, "<vaddr>",
+       kdb_register_flags("md", kdb_md, "<vaddr>",
          "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
-                           KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
-         "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
-         "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mds", kdb_md, "<vaddr>",
-         "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
-         "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("go", kdb_go, "[<vaddr>]",
-         "Continue Execution", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("rd", kdb_rd, "",
-         "Display Registers", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
-         "Modify Registers", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("ef", kdb_ef, "<vaddr>",
-         "Display exception frame", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
-         "Stack traceback", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("btp", kdb_bt, "<pid>",
-         "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
-         "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("btc", kdb_bt, "",
-         "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
+         "Display Raw Memory", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
+         "Display Physical Memory", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mds", kdb_md, "<vaddr>",
+         "Display Memory Symbolically", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
+         "Modify Memory Contents", 0,
+         KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("go", kdb_go, "[<vaddr>]",
+         "Continue Execution", 1,
+         KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+       kdb_register_flags("rd", kdb_rd, "",
+         "Display Registers", 0,
+         KDB_ENABLE_REG_READ);
+       kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
+         "Modify Registers", 0,
+         KDB_ENABLE_REG_WRITE);
+       kdb_register_flags("ef", kdb_ef, "<vaddr>",
+         "Display exception frame", 0,
+         KDB_ENABLE_MEM_READ);
+       kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
+         "Stack traceback", 1,
+         KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+       kdb_register_flags("btp", kdb_bt, "<pid>",
+         "Display stack for process <pid>", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+         "Backtrace all processes matching state flag", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("btc", kdb_bt, "",
+         "Backtrace current process on each cpu", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("btt", kdb_bt, "<vaddr>",
          "Backtrace process given its struct task address", 0,
-                           KDB_REPEAT_NONE);
-       kdb_register_repeat("env", kdb_env, "",
-         "Show environment variables", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("set", kdb_set, "",
-         "Set environment variables", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("help", kdb_help, "",
-         "Display Help Message", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("?", kdb_help, "",
-         "Display Help Message", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
-         "Switch to new cpu", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("kgdb", kdb_kgdb, "",
-         "Enter kgdb mode", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
-         "Display active task list", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("pid", kdb_pid, "<pidnum>",
-         "Switch to another task", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("reboot", kdb_reboot, "",
-         "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+         KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+       kdb_register_flags("env", kdb_env, "",
+         "Show environment variables", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("set", kdb_set, "",
+         "Set environment variables", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("help", kdb_help, "",
+         "Display Help Message", 1,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("?", kdb_help, "",
+         "Display Help Message", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
+         "Switch to new cpu", 0,
+         KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+       kdb_register_flags("kgdb", kdb_kgdb, "",
+         "Enter kgdb mode", 0, 0);
+       kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
+         "Display active task list", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("pid", kdb_pid, "<pidnum>",
+         "Switch to another task", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("reboot", kdb_reboot, "",
+         "Reboot the machine immediately", 0,
+         KDB_ENABLE_REBOOT);
 #if defined(CONFIG_MODULES)
-       kdb_register_repeat("lsmod", kdb_lsmod, "",
-         "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("lsmod", kdb_lsmod, "",
+         "List loaded kernel modules", 0,
+         KDB_ENABLE_INSPECT);
 #endif
 #if defined(CONFIG_MAGIC_SYSRQ)
-       kdb_register_repeat("sr", kdb_sr, "<key>",
-         "Magic SysRq key", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("sr", kdb_sr, "<key>",
+         "Magic SysRq key", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 #endif
 #if defined(CONFIG_PRINTK)
-       kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
-         "Display syslog buffer", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
+         "Display syslog buffer", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 #endif
        if (arch_kgdb_ops.enable_nmi) {
-               kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
-                 "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
-       }
-       kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
-         "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
-         "Send a signal to a process", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("summary", kdb_summary, "",
-         "Summarize the system", 4, KDB_REPEAT_NONE);
-       kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
-         "Display per_cpu variables", 3, KDB_REPEAT_NONE);
-       kdb_register_repeat("grephelp", kdb_grep_help, "",
-         "Display help on | grep", 0, KDB_REPEAT_NONE);
+               kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
+                 "Disable NMI entry to KDB", 0,
+                 KDB_ENABLE_ALWAYS_SAFE);
+       }
+       kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+         "Define a set of commands, down to endefcmd", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
+         "Send a signal to a process", 0,
+         KDB_ENABLE_SIGNAL);
+       kdb_register_flags("summary", kdb_summary, "",
+         "Summarize the system", 4,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
+         "Display per_cpu variables", 3,
+         KDB_ENABLE_MEM_READ);
+       kdb_register_flags("grephelp", kdb_grep_help, "",
+         "Display help on | grep", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 }
 
 /* Execute any commands defined in kdb_cmds.  */
index 7afd3c8..eaacd16 100644 (file)
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
        kdb_func_t cmd_func;            /* Function to execute command */
        char    *cmd_usage;             /* Usage String for this command */
        char    *cmd_help;              /* Help message for this command */
-       short    cmd_flags;             /* Parsing flags */
        short    cmd_minlen;            /* Minimum legal # command
                                         * chars required */
-       kdb_repeat_t cmd_repeat;        /* Does command auto repeat on enter? */
+       kdb_cmdflags_t cmd_flags;       /* Command behaviour flags */
 } kdbtab_t;
 
 extern int kdb_bt(int, const char **); /* KDB display back trace */
index 4c1ee7f..882f835 100644 (file)
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
 }
 
 static void perf_sample_regs_user(struct perf_regs *regs_user,
-                                 struct pt_regs *regs)
+                                 struct pt_regs *regs,
+                                 struct pt_regs *regs_user_copy)
 {
-       if (!user_mode(regs)) {
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               regs_user->abi  = perf_reg_abi(current);
+       if (user_mode(regs)) {
+               regs_user->abi = perf_reg_abi(current);
                regs_user->regs = regs;
+       } else if (current->mm) {
+               perf_get_regs_user(regs_user, regs, regs_user_copy);
        } else {
                regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
                regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
        }
 
        if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
-               perf_sample_regs_user(&data->regs_user, regs);
+               perf_sample_regs_user(&data->regs_user, regs,
+                                     &data->regs_user_copy);
 
        if (sample_type & PERF_SAMPLE_REGS_USER) {
                /* regs dump ABI info */
index 1ea4369..6806c55 100644 (file)
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
 static int wait_consider_task(struct wait_opts *wo, int ptrace,
                                struct task_struct *p)
 {
+       /*
+        * We can race with wait_task_zombie() from another thread.
+        * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
+        * can't confuse the checks below.
+        */
+       int exit_state = ACCESS_ONCE(p->exit_state);
        int ret;
 
-       if (unlikely(p->exit_state == EXIT_DEAD))
+       if (unlikely(exit_state == EXIT_DEAD))
                return 0;
 
        ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
                return 0;
        }
 
-       if (unlikely(p->exit_state == EXIT_TRACE)) {
+       if (unlikely(exit_state == EXIT_TRACE)) {
                /*
                 * ptrace == 0 means we are the natural parent. In this case
                 * we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
        }
 
        /* slay zombie? */
-       if (p->exit_state == EXIT_ZOMBIE) {
+       if (exit_state == EXIT_ZOMBIE) {
                /* we don't reap group leaders with subthreads */
                if (!delay_group_leader(p)) {
                        /*
index 5cf6731..3ef3736 100644 (file)
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
                        DEBUG_LOCKS_WARN_ON(lock->owner != current);
 
                DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
-               mutex_clear_owner(lock);
        }
 
        /*
         * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
         * mutexes so that we can do it here after we've verified state.
         */
+       mutex_clear_owner(lock);
        atomic_set(&lock->count, 1);
 }
 
index b5797b7..c0accc0 100644 (file)
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
 #ifdef CONFIG_RT_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
 #endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
-#endif
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
 
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
                ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
+       }
 #ifdef CONFIG_CPUMASK_OFFSTACK
-               for_each_possible_cpu(i) {
-                       per_cpu(load_balance_mask, i) = (void *)ptr;
-                       ptr += cpumask_size();
-               }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+       for_each_possible_cpu(i) {
+               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
        }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
 
        init_rt_bandwidth(&def_rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
index e5db8c6..b52092f 100644 (file)
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 static
 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
 {
-       int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
-       int rorun = dl_se->runtime <= 0;
-
-       if (!rorun && !dmiss)
-               return 0;
-
-       /*
-        * If we are beyond our current deadline and we are still
-        * executing, then we have already used some of the runtime of
-        * the next instance. Thus, if we do not account that, we are
-        * stealing bandwidth from the system at each deadline miss!
-        */
-       if (dmiss) {
-               dl_se->runtime = rorun ? dl_se->runtime : 0;
-               dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
-       }
-
-       return 1;
+       return (dl_se->runtime <= 0);
 }
 
 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
-               replenish_dl_entity(dl_se, pi_se);
-       else
+       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
+       else if (flags & ENQUEUE_REPLENISH)
+               replenish_dl_entity(dl_se, pi_se);
 
        __enqueue_dl_entity(dl_se);
 }
index df2cdf7..40667cb 100644 (file)
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       /* init_cfs_bandwidth() was not called */
+       if (!cfs_b->throttled_cfs_rq.next)
+               return;
+
        hrtimer_cancel(&cfs_b->period_timer);
        hrtimer_cancel(&cfs_b->slack_timer);
 }
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                 * wl = S * s'_i; see (2)
                 */
                if (W > 0 && w < W)
-                       wl = (w * tg->shares) / W;
+                       wl = (w * (long)tg->shares) / W;
                else
                        wl = tg->shares;
 
index b0b1c44..3ccf5c2 100644 (file)
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
 
 static __init int kdb_ftrace_register(void)
 {
-       kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
-                           "Dump ftrace log", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+                           "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
        return 0;
 }
 
index 358eb81..c635a10 100644 (file)
@@ -73,6 +73,31 @@ config KGDB_KDB
        help
          KDB frontend for kernel
 
+config KDB_DEFAULT_ENABLE
+       hex "KDB: Select kdb command functions to be enabled by default"
+       depends on KGDB_KDB
+       default 0x1
+       help
+         Specifiers which kdb commands are enabled by default. This may
+         be set to 1 or 0 to enable all commands or disable almost all
+         commands.
+
+         Alternatively the following bitmask applies:
+
+           0x0002 - allow arbitrary reads from memory and symbol lookup
+           0x0004 - allow arbitrary writes to memory
+           0x0008 - allow current register state to be inspected
+           0x0010 - allow current register state to be modified
+           0x0020 - allow passive inspection (backtrace, process list, lsmod)
+           0x0040 - allow flow control management (breakpoint, single step)
+           0x0080 - enable signalling of processes
+           0x0100 - allow machine to be rebooted
+
+         The config option merely sets the default at boot time. Both
+         issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+          setting with kdb.cmd_enable=X kernel command line option will
+         override the default settings.
+
 config KDB_KEYBOARD
        bool "KGDB_KDB: keyboard as input device"
        depends on VT && KGDB_KDB
index 2404d03..03dd576 100644 (file)
@@ -11,6 +11,7 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 //#define DEBUG
+#include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/assoc_array_priv.h>
index 56badfc..957d3da 100644 (file)
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
        depends on !KMEMCHECK
        select PAGE_EXTENSION
        select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
-       select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
        ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
          that would result in incorrect warnings of memory corruption after
          a resume because free pages are not saved to the suspend image.
 
-config WANT_PAGE_DEBUG_FLAGS
-       bool
-
 config PAGE_POISONING
        bool
-       select WANT_PAGE_DEBUG_FLAGS
-
-config PAGE_GUARD
-       bool
-       select WANT_PAGE_DEBUG_FLAGS
index ef91e85..851924f 100644 (file)
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
        if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
                mem_cgroup_swap_statistics(from, false);
                mem_cgroup_swap_statistics(to, true);
-               /*
-                * This function is only called from task migration context now.
-                * It postpones page_counter and refcount handling till the end
-                * of task migration(mem_cgroup_clear_mc()) for performance
-                * improvement. But we cannot postpone css_get(to)  because if
-                * the process that has been moved to @to does swap-in, the
-                * refcount of @to might be decreased to 0.
-                *
-                * We are in attach() phase, so the cgroup is guaranteed to be
-                * alive, so we can just call css_get().
-                */
-               css_get(&to->css);
                return 0;
        }
        return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        if (parent_css == NULL) {
                root_mem_cgroup = memcg;
                page_counter_init(&memcg->memory, NULL);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
        }
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
 
        if (parent->use_hierarchy) {
                page_counter_init(&memcg->memory, &parent->memory);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
 
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                 */
        } else {
                page_counter_init(&memcg->memory, NULL);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
                /*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
        mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
        mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
        memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
-       memcg->soft_limit = 0;
+       memcg->soft_limit = PAGE_COUNTER_MAX;
 }
 
 #ifdef CONFIG_MMU
index ca920d1..54f3a9b 100644 (file)
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 
 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
+       if (!tlb->end)
+               return;
+
        tlb_flush(tlb);
        mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch;
 
-       for (batch = &tlb->local; batch; batch = batch->next) {
+       for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
        }
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 
 void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->end)
-               return;
-
        tlb_flush_mmu_tlbonly(tlb);
        tlb_flush_mmu_free(tlb);
 }
@@ -2137,17 +2137,24 @@ reuse:
                if (!dirty_page)
                        return ret;
 
-               /*
-                * Yes, Virginia, this is actually required to prevent a race
-                * with clear_page_dirty_for_io() from clearing the page dirty
-                * bit after it clear all dirty ptes, but before a racing
-                * do_wp_page installs a dirty pte.
-                *
-                * do_shared_fault is protected similarly.
-                */
                if (!page_mkwrite) {
-                       wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page);
+                       struct address_space *mapping;
+                       int dirtied;
+
+                       lock_page(dirty_page);
+                       dirtied = set_page_dirty(dirty_page);
+                       VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
+                       mapping = dirty_page->mapping;
+                       unlock_page(dirty_page);
+
+                       if (dirtied && mapping) {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+
                        /* file_update_time outside page_lock */
                        if (vma->vm_file)
                                file_update_time(vma->vm_file);
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
                struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
index 7b36aa7..7f684d5 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again:                    remove_next = 1 + (end > next->vm_end);
                if (exporter && exporter->anon_vma && !importer->anon_vma) {
                        int error;
 
+                       importer->anon_vma = exporter->anon_vma;
                        error = anon_vma_clone(importer, exporter);
-                       if (error)
+                       if (error) {
+                               importer->anon_vma = NULL;
                                return error;
-                       importer->anon_vma = exporter->anon_vma;
+                       }
                }
        }
 
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
index d5d81f5..6f43352 100644 (file)
@@ -1541,16 +1541,6 @@ pause:
                bdi_start_background_writeback(bdi);
 }
 
-void set_page_dirty_balance(struct page *page)
-{
-       if (set_page_dirty(page)) {
-               struct address_space *mapping = page_mapping(page);
-
-               if (mapping)
-                       balance_dirty_pages_ratelimited(mapping);
-       }
-}
-
 static DEFINE_PER_CPU(int, bdp_ratelimits);
 
 /*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
  *
- * Most callers have locked the page, which pins the address_space in memory.
- * But zap_pte_range() does not lock the page, however in that case the
- * mapping is pinned by the vma's ->vm_file reference.
- *
- * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() inside tree_lock.
+ * The caller must ensure this doesn't race with truncation.  Most will simply
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
+ * the pte lock held, which also locks out truncation.
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {
        if (!TestSetPageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
-               struct address_space *mapping2;
                unsigned long flags;
 
                if (!mapping)
                        return 1;
 
                spin_lock_irqsave(&mapping->tree_lock, flags);
-               mapping2 = page_mapping(page);
-               if (mapping2) { /* Race with truncate? */
-                       BUG_ON(mapping2 != mapping);
-                       WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
-                       account_page_dirtied(page, mapping);
-                       radix_tree_tag_set(&mapping->page_tree,
-                               page_index(page), PAGECACHE_TAG_DIRTY);
-               }
+               BUG_ON(page_mapping(page) != mapping);
+               WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+               account_page_dirtied(page, mapping);
+               radix_tree_tag_set(&mapping->page_tree, page_index(page),
+                                  PAGECACHE_TAG_DIRTY);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
                /*
                 * We carefully synchronise fault handlers against
                 * installing a dirty pte and marking the page dirty
-                * at this point. We do this by having them hold the
-                * page lock at some point after installing their
-                * pte, but before marking the page dirty.
-                * Pages are always locked coming in here, so we get
-                * the desired exclusion. See mm/memory.c:do_wp_page()
-                * for more comments.
+                * at this point.  We do this by having them hold the
+                * page lock while dirtying the page, and pages are
+                * always locked coming in here, so we get the desired
+                * exclusion.
                 */
                if (TestClearPageDirty(page)) {
                        dec_zone_page_state(page, NR_FILE_DIRTY);
index c5bc241..71cd5bd 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
        anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
        if (anon_vma) {
                atomic_set(&anon_vma->refcount, 1);
+               anon_vma->degree = 1;   /* Reference for first vma */
+               anon_vma->parent = anon_vma;
                /*
                 * Initialise the anon_vma root to point to itself. If called
                 * from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                if (likely(!vma->anon_vma)) {
                        vma->anon_vma = anon_vma;
                        anon_vma_chain_link(vma, avc, anon_vma);
+                       /* vma reference or self-parent link for new root */
+                       anon_vma->degree++;
                        allocated = NULL;
                        avc = NULL;
                }
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
 /*
  * Attach the anon_vmas from src to dst.
  * Returns 0 on success, -ENOMEM on failure.
+ *
+ * If dst->anon_vma is NULL this function tries to find and reuse existing
+ * anon_vma which has no vmas and only one child anon_vma. This prevents
+ * degradation of anon_vma hierarchy to endless linear chain in case of
+ * constantly forking task. On the other hand, an anon_vma with more than one
+ * child isn't reused even if there was no alive vma, thus rmap walker has a
+ * good chance of avoiding scanning the whole hierarchy when it searches where
+ * page is mapped.
  */
 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
                anon_vma = pavc->anon_vma;
                root = lock_anon_vma_root(root, anon_vma);
                anon_vma_chain_link(dst, avc, anon_vma);
+
+               /*
+                * Reuse existing anon_vma if its degree lower than two,
+                * that means it has no vma and only one anon_vma child.
+                *
+                * Do not chose parent anon_vma, otherwise first child
+                * will always reuse it. Root anon_vma is never reused:
+                * it has self-parent reference and at least one child.
+                */
+               if (!dst->anon_vma && anon_vma != src->anon_vma &&
+                               anon_vma->degree < 2)
+                       dst->anon_vma = anon_vma;
        }
+       if (dst->anon_vma)
+               dst->anon_vma->degree++;
        unlock_anon_vma_root(root);
        return 0;
 
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        if (!pvma->anon_vma)
                return 0;
 
+       /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
+       vma->anon_vma = NULL;
+
        /*
         * First, attach the new VMA to the parent VMA's anon_vmas,
         * so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        if (error)
                return error;
 
+       /* An existing anon_vma has been reused, all done then. */
+       if (vma->anon_vma)
+               return 0;
+
        /* Then add our own anon_vma. */
        anon_vma = anon_vma_alloc();
        if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
         * lock any of the anon_vmas in this anon_vma tree.
         */
        anon_vma->root = pvma->anon_vma->root;
+       anon_vma->parent = pvma->anon_vma;
        /*
         * With refcounts, an anon_vma can stay around longer than the
         * process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        vma->anon_vma = anon_vma;
        anon_vma_lock_write(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
+       anon_vma->parent->degree++;
        anon_vma_unlock_write(anon_vma);
 
        return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
                 * Leave empty anon_vmas on the list - we'll need
                 * to free them outside the lock.
                 */
-               if (RB_EMPTY_ROOT(&anon_vma->rb_root))
+               if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+                       anon_vma->parent->degree--;
                        continue;
+               }
 
                list_del(&avc->same_vma);
                anon_vma_chain_free(avc);
        }
+       if (vma->anon_vma)
+               vma->anon_vma->degree--;
        unlock_anon_vma_root(root);
 
        /*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
                struct anon_vma *anon_vma = avc->anon_vma;
 
+               BUG_ON(anon_vma->degree);
                put_anon_vma(anon_vma);
 
                list_del(&avc->same_vma);
index bd9a72b..ab2505c 100644 (file)
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
                return false;
 
        /*
-        * There is a potential race between when kswapd checks its watermarks
-        * and a process gets throttled. There is also a potential race if
-        * processes get throttled, kswapd wakes, a large process exits therby
-        * balancing the zones that causes kswapd to miss a wakeup. If kswapd
-        * is going to sleep, no process should be sleeping on pfmemalloc_wait
-        * so wake them now if necessary. If necessary, processes will wake
-        * kswapd and get throttled again
+        * The throttled processes are normally woken up in balance_pgdat() as
+        * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+        * race between when kswapd checks the watermarks and a process gets
+        * throttled. There is also a potential race if processes get
+        * throttled, kswapd wakes, a large process exits thereby balancing the
+        * zones, which causes kswapd to exit balance_pgdat() before reaching
+        * the wake up checks. If kswapd is going to sleep, no process should
+        * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+        * the wake up is premature, processes will wake kswapd and get
+        * throttled again. The difference from wake ups in balance_pgdat() is
+        * that here we are under prepare_to_wait().
         */
-       if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
-               wake_up(&pgdat->pfmemalloc_wait);
-               return false;
-       }
+       if (waitqueue_active(&pgdat->pfmemalloc_wait))
+               wake_up_all(&pgdat->pfmemalloc_wait);
 
        return pgdat_balanced(pgdat, order, classzone_idx);
 }
index ab6bb2a..b24e4bb 100644 (file)
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                if (orig_initialized)
                        atomic_dec(&bat_priv->mcast.num_disabled);
                orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
-       /* If mcast support is being switched off increase the disabled
-        * mcast node counter.
+       /* If mcast support is being switched off or if this is an initial
+        * OGM without mcast support then increase the disabled mcast
+        * node counter.
         */
        } else if (!orig_mcast_enabled &&
-                  orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+                  (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+                   !orig_initialized)) {
                atomic_inc(&bat_priv->mcast.num_disabled);
                orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
        }
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
 {
        struct batadv_priv *bat_priv = orig->bat_priv;
 
-       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+           orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
                atomic_dec(&bat_priv->mcast.num_disabled);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
index 8d04d17..fab47f1 100644 (file)
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
        if (!bat_priv->nc.decoding_hash)
                goto err;
 
-       batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+       batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
                                   &batadv_nc_decoding_hash_lock_class_key);
 
        INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
index 6a48451..bea8198 100644 (file)
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 
        batadv_frag_purge_orig(orig_node, NULL);
 
-       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
-                                 "originator timed out");
-
        if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
                orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
 
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
        atomic_set(&orig_node->last_ttvn, 0);
        orig_node->tt_buff = NULL;
        orig_node->tt_buff_len = 0;
+       orig_node->last_seen = jiffies;
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
 #ifdef CONFIG_BATMAN_ADV_MCAST
@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
                        if (batadv_purge_orig_node(bat_priv, orig_node)) {
                                batadv_gw_node_delete(bat_priv, orig_node);
                                hlist_del_rcu(&orig_node->hash_entry);
+                               batadv_tt_global_del_orig(orig_node->bat_priv,
+                                                         orig_node, -1,
+                                                         "originator timed out");
                                batadv_orig_node_free_ref(orig_node);
                                continue;
                        }
index 35f76f2..6648f32 100644 (file)
@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
 
        router = batadv_orig_router_get(orig_node, recv_if);
 
+       if (!router)
+               return router;
+
        /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
         * and if activated.
         */
-       if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) ||
-           !router)
+       if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
                return router;
 
        /* bonding: loop through the list of possible routers found
index 1f1de71..e2aa7be 100644 (file)
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
        dst = NULL;
 
        if (is_broadcast_ether_addr(dest)) {
-               if (p->flags & BR_PROXYARP &&
+               if (IS_ENABLED(CONFIG_INET) &&
+                   p->flags & BR_PROXYARP &&
                    skb->protocol == htons(ETH_P_ARP))
                        br_do_proxy_arp(skb, br, vid);
 
index 1584581..ba6eb17 100644 (file)
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
        int ret;
        char tmp_enc[40];
        __le32 tmp[5] = {
-               16u, msg->hdr.crc, msg->footer.front_crc,
+               cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
                msg->footer.middle_crc, msg->footer.data_crc,
        };
        ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
index a83062c..f2148e2 100644 (file)
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
        if (src_len != sizeof(u32) + dst_len)
                return -EINVAL;
 
-       buf_len = le32_to_cpu(*(u32 *)src);
+       buf_len = le32_to_cpu(*(__le32 *)src);
        if (buf_len != dst_len)
                return -EINVAL;
 
index 8e38f17..8d614c9 100644 (file)
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
                        case NDTPA_BASE_REACHABLE_TIME:
                                NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
                                              nla_get_msecs(tbp[i]));
+                               /* update reachable_time as well, otherwise, the change will
+                                * only be effective after the next time neigh_periodic_work
+                                * decides to recompute it (can be multiple minutes)
+                                */
+                               p->reachable_time =
+                                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
                                break;
                        case NDTPA_GC_STALETIME:
                                NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+                                         void __user *buffer,
+                                         size_t *lenp, loff_t *ppos)
+{
+       struct neigh_parms *p = ctl->extra2;
+       int ret;
+
+       if (strcmp(ctl->procname, "base_reachable_time") == 0)
+               ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+       else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+               ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+       else
+               ret = -1;
+
+       if (write && ret == 0) {
+               /* update reachable_time as well, otherwise, the change will
+                * only be effective after the next time neigh_periodic_work
+                * decides to recompute it
+                */
+               p->reachable_time =
+                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+       }
+       return ret;
+}
+
 #define NEIGH_PARMS_DATA_OFFSET(index) \
        (&((struct neigh_parms *) 0)->data[index])
 
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
                /* ReachableTime (in milliseconds) */
                t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+       } else {
+               /* Those handlers will update p->reachable_time after
+                * base_reachable_time(_ms) is set to ensure the new timer starts being
+                * applied after the next neighbour update instead of waiting for
+                * neigh_periodic_work to update its value (can be multiple minutes)
+                * So any handler that replaces them should do this as well
+                */
+               /* ReachableTime */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+                       neigh_proc_base_reachable_time;
+               /* ReachableTime (in milliseconds) */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+                       neigh_proc_base_reachable_time;
        }
 
        /* Don't export sysctls to unprivileged users */
index ff2d23d..6ecfce6 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
 
        memset(&mr, 0, sizeof(mr));
        if (priv->sreg_proto_min) {
-               mr.range[0].min.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               mr.range[0].max.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               mr.range[0].min.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               mr.range[0].max.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 7f18262..65caf8b 100644 (file)
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
                        break;
 
-               if (tso_segs == 1) {
+               if (tso_segs == 1 || !max_segs) {
                        if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
                                                     (tcp_skb_is_last(sk, skb) ?
                                                      nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                }
 
                limit = mss_now;
-               if (tso_segs > 1 && !tcp_urg_mode(tp))
+               if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
                                                    min_t(unsigned int,
                                                          cwnd_quota,
index 2433a6b..11820b6 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
 
        memset(&range, 0, sizeof(range));
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 0bb7038..bd4e46e 100644 (file)
@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
        if (!ret) {
                key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                        sdata->crypto_tx_tailroom_needed_cnt--;
 
                WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
        sta = key->sta;
        sdata = key->sdata;
 
-       if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+       if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+             (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+             (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                increment_tailroom_need_count(sdata);
 
        ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
        if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
                key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                        increment_tailroom_need_count(key->sdata);
        }
 
index 1d5341f..5d3daae 100644 (file)
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct nf_conn *ct;
        struct net *net;
 
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct ip_vs_conn *n_cp;
        struct net *net;
 
+       /* no diff required for incoming packets */
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       /* no diff required for incoming packets */
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
index a116748..46d1b26 100644 (file)
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-       /* We have to check the DYING flag inside the lock to prevent
-          a race against nf_ct_get_next_corpse() possibly called from
-          user context, else we insert an already 'dead' hash, blocking
-          further use of that particular connection -JM */
+       /* We have to check the DYING flag after unlink to prevent
+        * a race against nf_ct_get_next_corpse() possibly called from
+        * user context, else we insert an already 'dead' hash, blocking
+        * further use of that particular connection -JM.
+        */
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
-       if (unlikely(nf_ct_is_dying(ct))) {
-               nf_conntrack_double_unlock(hash, reply_hash);
-               local_bh_enable();
-               return NF_ACCEPT;
-       }
+       if (unlikely(nf_ct_is_dying(ct)))
+               goto out;
 
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
-       nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        return NF_ACCEPT;
 
 out:
+       nf_ct_add_to_dying_list(ct);
        nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
        local_bh_enable();
index 129a8da..3b3ddb4 100644 (file)
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
        struct nft_chain *chain, *nc;
        struct nft_set *set, *ns;
 
-       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+       list_for_each_entry(chain, &ctx->table->chains, list) {
                ctx->chain = chain;
 
                err = nft_delrule_by_chain(ctx);
                if (err < 0)
                        goto out;
-
-               err = nft_delchain(ctx);
-               if (err < 0)
-                       goto out;
        }
 
        list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
                        goto out;
        }
 
+       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+               ctx->chain = chain;
+
+               err = nft_delchain(ctx);
+               if (err < 0)
+                       goto out;
+       }
+
        err = nft_deltable(ctx);
 out:
        return err;
index cde4a67..c421d94 100644 (file)
@@ -321,7 +321,8 @@ replay:
                nlh = nlmsg_hdr(skb);
                err = 0;
 
-               if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+               if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+                   skb->len < nlh->nlmsg_len) {
                        err = -EINVAL;
                        goto ack;
                }
@@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group)
        int type;
 
        if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
-               return -EINVAL;
+               return 0;
 
        type = nfnl_group2type[group];
 
index afe2b0b..aff54fb 100644 (file)
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
        }
 
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 4e9a5f0..b07349e 100644 (file)
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        struct vport *input_vport;
        int len;
        int err;
-       bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool log = !a[OVS_PACKET_ATTR_PROBE];
 
        err = -EINVAL;
        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
        [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
index 70bef2a..da2fae0 100644 (file)
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 {
        struct flow_stats *stats;
        int node = numa_node_id();
+       int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
 
        stats = rcu_dereference(flow->stats[node]);
 
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
                                if (likely(new_stats)) {
                                        new_stats->used = jiffies;
                                        new_stats->packet_count = 1;
-                                       new_stats->byte_count = skb->len;
+                                       new_stats->byte_count = len;
                                        new_stats->tcp_flags = tcp_flags;
                                        spin_lock_init(&new_stats->lock);
 
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 
        stats->used = jiffies;
        stats->packet_count++;
-       stats->byte_count += skb->len;
+       stats->byte_count += len;
        stats->tcp_flags |= tcp_flags;
 unlock:
        spin_unlock(&stats->lock);
index 53f3ebb..2034c6d 100644 (file)
@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
        stats = this_cpu_ptr(vport->percpu_stats);
        u64_stats_update_begin(&stats->syncp);
        stats->rx_packets++;
-       stats->rx_bytes += skb->len;
+       stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
        u64_stats_update_end(&stats->syncp);
 
        OVS_CB(skb)->input_vport = vport;
index 6880f34..9cfe2e1 100644 (file)
@@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -EINVAL;
        if (sock->type == SOCK_DGRAM) {
                offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
-               if (unlikely(offset) < 0)
+               if (unlikely(offset < 0))
                        goto out_free;
        } else {
                if (ll_header_truncated(dev, len))
index 1cb6124..4439ac4 100644 (file)
@@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
        struct kvec *head = buf->head;
        struct kvec *tail = buf->tail;
        int fraglen;
-       int new, old;
+       int new;
 
        if (len > buf->len) {
                WARN_ON_ONCE(1);
@@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
        buf->len -= fraglen;
 
        new = buf->page_base + buf->page_len;
-       old = new + fraglen;
-       xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
+
+       xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
 
        if (buf->page_len) {
                xdr->p = page_address(*xdr->page_ptr);
index 96ceefe..a9e174f 100644 (file)
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
        struct sk_buff *skb;
 
        skb_queue_walk(&bcl->outqueue, skb) {
-               if (more(buf_seqno(skb), after))
+               if (more(buf_seqno(skb), after)) {
+                       tipc_link_retransmit(bcl, skb, mod(to - after));
                        break;
+               }
        }
-       tipc_link_retransmit(bcl, skb, mod(to - after));
 }
 
 /**
index 1bca180..627f8cb 100644 (file)
@@ -42,19 +42,19 @@ __clean-files       := $(extra-y) $(extra-m) $(extra-)       \
 
 __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
 
-# as clean-files is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# clean-files is given relative to the current directory, unless it
+# starts with $(objtree)/ (which means "./", so do not add "./" unless
+# you want to delete a file from the toplevel object directory).
 
 __clean-files   := $(wildcard                                               \
-                   $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
-                  $(filter /%, $(__clean-files)))
+                  $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
+                  $(filter $(objtree)/%, $(__clean-files)))
 
-# as clean-dirs is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# same as clean-files
 
 __clean-dirs    := $(wildcard                                               \
-                   $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs)))    \
-                  $(filter /%, $(clean-dirs)))
+                  $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs)))    \
+                  $(filter $(objtree)/%, $(clean-dirs)))
 
 # ==========================================================================
 
index 9609a7f..c795237 100644 (file)
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               key_user_put(key->user);
-
                /* now throw away the key memory */
                if (key->type->destroy)
                        key->type->destroy(key);
 
+               key_user_put(key->user);
+
                kfree(key->description);
 
 #ifdef KEY_DEBUGGING
index 255dabc..2a85e42 100644 (file)
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        spin_lock_irq(&efw->lock);
 
        t = (struct snd_efw_transaction *)data;
-       length = min_t(size_t, t->length * sizeof(t->length), length);
+       length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
 
        if (efw->push_ptr < efw->pull_ptr)
                capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
index 5f13d2d..b422e40 100644 (file)
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
+MODULE_ALIAS("snd-hda-codec-id:10de0072");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index 4f6413e..605d140 100644 (file)
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
                        spec->gpio_mask;
        }
        if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
-               spec->gpio_mask &= spec->gpio_mask;
-       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
                spec->gpio_dir &= spec->gpio_mask;
+       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+               spec->gpio_data &= spec->gpio_mask;
        if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
                spec->eapd_mask &= spec->gpio_mask;
        if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
index 2728447..327f864 100644 (file)
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
                return -EINVAL;
        }
 
-       if (cdev->n_streams < 2) {
+       if (cdev->n_streams < 1) {
                dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
                return -EINVAL;
        }
index 6f80360..0b0112c 100644 (file)
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
         *
         * TODO: Hook into free() and add that check there as well.
         */
-       debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+       debug_check_no_locks_freed(mutex, sizeof(*mutex));
        __del_lock(__get_lock(mutex));
        return ll_pthread_mutex_destroy(mutex);
 }
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
 {
        try_init_preload();
 
-       debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+       debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
        __del_lock(__get_lock(rwlock));
        return ll_pthread_rwlock_destroy(rwlock);
 }
index e7417fe..747f861 100644 (file)
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
                if (nr_samples > 0) {
                        total_nr_samples += nr_samples;
                        hists__collapse_resort(hists, NULL);
-                       hists__output_resort(hists);
+                       hists__output_resort(hists, NULL);
 
                        if (symbol_conf.event_group &&
                            !perf_evsel__is_group_leader(pos))
index 1ce425d..1fd96c1 100644 (file)
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
        return __hist_entry__cmp_compute(p_left, p_right, c);
 }
 
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+                   struct hist_entry *right __maybe_unused)
+{
+       return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+       if (sort_compute)
+               return 0;
+
+       if (left->stat.period == right->stat.period)
+               return 0;
+       return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
 static void insert_hist_entry_by_compute(struct rb_root *root,
                                         struct hist_entry *he,
                                         int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
                hists__precompute(hists);
                hists__compute_resort(hists);
        } else {
-               hists__output_resort(hists);
+               hists__output_resort(hists, NULL);
        }
 
        hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
        fmt->header = hpp__header;
        fmt->width  = hpp__width;
        fmt->entry  = hpp__entry_global;
+       fmt->cmp    = hist_entry__cmp_nop;
+       fmt->collapse = hist_entry__cmp_nop;
 
        /* TODO more colors */
        switch (idx) {
        case PERF_HPP_DIFF__BASELINE:
                fmt->color = hpp__color_baseline;
+               fmt->sort  = hist_entry__cmp_baseline;
                break;
        case PERF_HPP_DIFF__DELTA:
                fmt->color = hpp__color_delta;
+               fmt->sort  = hist_entry__cmp_delta;
                break;
        case PERF_HPP_DIFF__RATIO:
                fmt->color = hpp__color_ratio;
+               fmt->sort  = hist_entry__cmp_ratio;
                break;
        case PERF_HPP_DIFF__WEIGHTED_DIFF:
                fmt->color = hpp__color_wdiff;
+               fmt->sort  = hist_entry__cmp_wdiff;
                break;
        default:
+               fmt->sort  = hist_entry__cmp_nop;
                break;
        }
 
        init_header(d, dfmt);
        perf_hpp__column_register(fmt);
+       perf_hpp__register_sort_field(fmt);
 }
 
 static void ui_init(void)
index 011195e..198f3c3 100644 (file)
@@ -19,7 +19,9 @@
 int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        int i;
-       const struct option list_options[] = {
+       bool raw_dump = false;
+       struct option list_options[] = {
+               OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
                OPT_END()
        };
        const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                NULL
        };
 
+       set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+
        argc = parse_options(argc, argv, list_options, list_usage,
                             PARSE_OPT_STOP_AT_NON_OPTION);
 
        setup_pager();
 
+       if (raw_dump) {
+               print_events(NULL, true);
+               return 0;
+       }
+
        if (argc == 0) {
                print_events(NULL, false);
                return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        print_hwcache_events(NULL, false);
                else if (strcmp(argv[i], "pmu") == 0)
                        print_pmu_events(NULL, false);
-               else if (strcmp(argv[i], "--raw-dump") == 0)
-                       print_events(NULL, true);
                else {
                        char *sep = strchr(argv[i], ':'), *s;
                        int sep_idx;
index 3936760..072ae8a 100644 (file)
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
        ui_progress__finish();
 }
 
+static void report__output_resort(struct report *rep)
+{
+       struct ui_progress prog;
+       struct perf_evsel *pos;
+
+       ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
+
+       evlist__for_each(rep->session->evlist, pos)
+               hists__output_resort(evsel__hists(pos), &prog);
+
+       ui_progress__finish();
+}
+
 static int __cmd_report(struct report *rep)
 {
        int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
        if (session_done())
                return 0;
 
+       /*
+        * recalculate number of entries after collapsing since it
+        * might be changed during the collapse phase.
+        */
+       rep->nr_entries = 0;
+       evlist__for_each(session->evlist, pos)
+               rep->nr_entries += evsel__hists(pos)->nr_entries;
+
        if (rep->nr_entries == 0) {
                ui__error("The %s file has no samples!\n", file->path);
                return 0;
        }
 
-       evlist__for_each(session->evlist, pos)
-               hists__output_resort(evsel__hists(pos));
+       report__output_resort(rep);
 
        return report__browse_hists(rep);
 }
index 0aa7747..961cea1 100644 (file)
@@ -285,7 +285,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        hists__output_recalc_col_len(hists, top->print_entries - printed);
        putchar('\n');
@@ -554,7 +554,7 @@ static void perf_top__sort_new_samples(void *arg)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 }
 
 static void *display_thread_tui(void *arg)
index 614d5c4..8d110de 100644 (file)
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
         * function since TEST_ASSERT_VAL() returns in case of failure.
         */
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
         *   30.00%    10.00%     perf  perf           [.] cmd_record
         *   20.00%     0.00%     bash  libc           [.] malloc
         *   10.00%    10.00%     bash  [kernel]       [k] page_fault
-        *   10.00%    10.00%     perf  [kernel]       [k] schedule
-        *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
+        *   10.00%    10.00%     bash  bash           [.] xmalloc
         *   10.00%    10.00%     perf  [kernel]       [k] page_fault
-        *   10.00%    10.00%     perf  libc           [.] free
         *   10.00%    10.00%     perf  libc           [.] malloc
-        *   10.00%    10.00%     bash  bash           [.] xmalloc
+        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *   10.00%    10.00%     perf  libc           [.] free
+        *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
         */
        struct result expected[] = {
                { 7000, 2000, "perf", "perf",     "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                { 3000, 1000, "perf", "perf",     "cmd_record" },
                { 2000,    0, "bash", "libc",     "malloc" },
                { 1000, 1000, "bash", "[kernel]", "page_fault" },
-               { 1000, 1000, "perf", "[kernel]", "schedule" },
-               { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
+               { 1000, 1000, "bash", "bash",     "xmalloc" },
                { 1000, 1000, "perf", "[kernel]", "page_fault" },
+               { 1000, 1000, "perf", "[kernel]", "schedule" },
                { 1000, 1000, "perf", "libc",     "free" },
                { 1000, 1000, "perf", "libc",     "malloc" },
-               { 1000, 1000, "bash", "bash",     "xmalloc" },
+               { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
        };
 
        symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  malloc
         *                  main
         *
-        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *   10.00%    10.00%     bash  bash           [.] xmalloc
         *              |
-        *              --- schedule
-        *                  run_command
+        *              --- xmalloc
+        *                  malloc
+        *                  xmalloc     <--- NOTE: there's a cycle
+        *                  malloc
+        *                  xmalloc
         *                  main
         *
         *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  run_command
         *                  main
         *
+        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *              |
+        *              --- schedule
+        *                  run_command
+        *                  main
+        *
         *   10.00%    10.00%     perf  libc           [.] free
         *              |
         *              --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  run_command
         *                  main
         *
-        *   10.00%    10.00%     bash  bash           [.] xmalloc
-        *              |
-        *              --- xmalloc
-        *                  malloc
-        *                  xmalloc     <--- NOTE: there's a cycle
-        *                  malloc
-        *                  xmalloc
-        *                  main
-        *
         */
        struct result expected[] = {
                { 7000, 2000, "perf", "perf",     "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                { 3000, 1000, "perf", "perf",     "cmd_record" },
                { 2000,    0, "bash", "libc",     "malloc" },
                { 1000, 1000, "bash", "[kernel]", "page_fault" },
-               { 1000, 1000, "perf", "[kernel]", "schedule" },
+               { 1000, 1000, "bash", "bash",     "xmalloc" },
                { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
                { 1000, 1000, "perf", "[kernel]", "page_fault" },
+               { 1000, 1000, "perf", "[kernel]", "schedule" },
                { 1000, 1000, "perf", "libc",     "free" },
                { 1000, 1000, "perf", "libc",     "malloc" },
-               { 1000, 1000, "bash", "bash",     "xmalloc" },
        };
        struct callchain_result expected_callchain[] = {
                {
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "bash",     "main" }, },
                },
                {
-                       3, {    { "[kernel]", "schedule" },
-                               { "perf",     "run_command" },
-                               { "perf",     "main" }, },
+                       6, {    { "bash",     "xmalloc" },
+                               { "libc",     "malloc" },
+                               { "bash",     "xmalloc" },
+                               { "libc",     "malloc" },
+                               { "bash",     "xmalloc" },
+                               { "bash",     "main" }, },
                },
                {
                        3, {    { "[kernel]", "sys_perf_event_open" },
@@ -638,6 +641,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "perf",     "main" }, },
                },
                {
+                       3, {    { "[kernel]", "schedule" },
+                               { "perf",     "run_command" },
+                               { "perf",     "main" }, },
+               },
+               {
                        4, {    { "libc",     "free" },
                                { "perf",     "cmd_record" },
                                { "perf",     "run_command" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "perf",     "run_command" },
                                { "perf",     "main" }, },
                },
-               {
-                       6, {    { "bash",     "xmalloc" },
-                               { "libc",     "malloc" },
-                               { "bash",     "xmalloc" },
-                               { "libc",     "malloc" },
-                               { "bash",     "xmalloc" },
-                               { "bash",     "main" }, },
-               },
        };
 
        symbol_conf.use_callchain = true;
index 74f257a..59e53db 100644 (file)
@@ -138,7 +138,7 @@ int test__hists_filter(void)
                struct hists *hists = evsel__hists(evsel);
 
                hists__collapse_resort(hists, NULL);
-               hists__output_resort(hists);
+               hists__output_resort(hists, NULL);
 
                if (verbose > 2) {
                        pr_info("Normal histogram\n");
index a748f2b..f554761 100644 (file)
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
index e6bb04b..788506e 100644 (file)
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
        bool need_percent;
 
        node = rb_first(root);
-       need_percent = !!rb_next(node);
+       need_percent = node && rb_next(node);
 
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
index dc0d095..482adae 100644 (file)
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
                if (ret)
                        return ret;
 
+               if (a->thread != b->thread || !symbol_conf.use_callchain)
+                       return 0;
+
                ret = b->callchain->max_depth - a->callchain->max_depth;
        }
        return ret;
index 2f61256..3c38f25 100644 (file)
@@ -1,5 +1,8 @@
 #include <signal.h>
 #include <stdbool.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
 
 #include "../../util/cache.h"
 #include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
        return SLkp_getkey();
 }
 
+#ifdef HAVE_BACKTRACE_SUPPORT
+static void ui__signal_backtrace(int sig)
+{
+       void *stackdump[32];
+       size_t size;
+
+       ui__exit(false);
+       psignal(sig, "perf");
+
+       printf("-------- backtrace --------\n");
+       size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+       backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+
+       exit(0);
+}
+#else
+# define ui__signal_backtrace  ui__signal
+#endif
+
 static void ui__signal(int sig)
 {
        ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
        ui_browser__init();
        tui_progress__init();
 
-       signal(SIGSEGV, ui__signal);
-       signal(SIGFPE, ui__signal);
+       signal(SIGSEGV, ui__signal_backtrace);
+       signal(SIGFPE, ui__signal_backtrace);
        signal(SIGINT, ui__signal);
        signal(SIGQUIT, ui__signal);
        signal(SIGTERM, ui__signal);
index 64b377e..14e7a12 100644 (file)
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
 
        return bf;
 }
+
+static void free_callchain_node(struct callchain_node *node)
+{
+       struct callchain_list *list, *tmp;
+       struct callchain_node *child;
+       struct rb_node *n;
+
+       list_for_each_entry_safe(list, tmp, &node->val, list) {
+               list_del(&list->list);
+               free(list);
+       }
+
+       n = rb_first(&node->rb_root_in);
+       while (n) {
+               child = container_of(n, struct callchain_node, rb_node_in);
+               n = rb_next(n);
+               rb_erase(&child->rb_node_in, &node->rb_root_in);
+
+               free_callchain_node(child);
+               free(child);
+       }
+}
+
+void free_callchain(struct callchain_root *root)
+{
+       if (!symbol_conf.use_callchain)
+               return;
+
+       free_callchain_node(&root->node);
+}
index dbc08cf..c0ec1ac 100644 (file)
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
 char *callchain_list__sym_name(struct callchain_list *cl,
                               char *bf, size_t bfsize, bool show_dso);
 
+void free_callchain(struct callchain_root *root);
+
 #endif /* __PERF_CALLCHAIN_H */
index 6e88b9e..1823955 100644 (file)
@@ -6,6 +6,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "annotate.h"
+#include "ui/progress.h"
 #include <math.h>
 
 static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
        size_t callchain_size = 0;
        struct hist_entry *he;
 
-       if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+       if (symbol_conf.use_callchain)
                callchain_size = sizeof(struct callchain_root);
 
        he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
        iter->he = he;
        he_cache[iter->curr++] = he;
 
-       callchain_append(he->callchain, &callchain_cursor, sample->period);
+       hist_entry__append_callchain(he, sample);
 
        /*
         * We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
        iter->he = he;
        he_cache[iter->curr++] = he;
 
-       callchain_append(he->callchain, &cursor, sample->period);
+       if (symbol_conf.use_callchain)
+               callchain_append(he->callchain, &cursor, sample->period);
        return 0;
 }
 
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
        zfree(&he->mem_info);
        zfree(&he->stat_acc);
        free_srcline(he->srcline);
+       free_callchain(he->callchain);
        free(he);
 }
 
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
                else
                        p = &(*p)->rb_right;
        }
+       hists->nr_entries++;
 
        rb_link_node(&he->rb_node_in, parent, p);
        rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
        if (!sort__need_collapse)
                return;
 
+       hists->nr_entries = 0;
+
        root = hists__get_rotate_entries_in(hists);
+
        next = rb_first(root);
 
        while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
        rb_insert_color(&he->rb_node, entries);
 }
 
-void hists__output_resort(struct hists *hists)
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
 {
        struct rb_root *root;
        struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
 
                if (!n->filtered)
                        hists__calc_col_len(hists, n);
+
+               if (prog)
+                       ui_progress__update(prog, 1);
        }
 }
 
index d0ef9a1..46bd503 100644 (file)
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
                              struct hists *hists);
 void hist_entry__free(struct hist_entry *);
 
-void hists__output_resort(struct hists *hists);
+void hists__output_resort(struct hists *hists, struct ui_progress *prog);
 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
 
 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
index 28eb141..7f9b863 100644 (file)
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
        }
 
        if (ntevs == 0) {       /* No error but failed to find probe point. */
-               pr_warning("Probe point '%s' not found.\n",
+               pr_warning("Probe point '%s' not found in debuginfo.\n",
                           synthesize_perf_probe_point(&pev->point));
-               return -ENOENT;
+               if (need_dwarf)
+                       return -ENOENT;
+               return 0;
        }
        /* Error path : ntevs < 0 */
        pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
index c7918f8..b5247d7 100644 (file)
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
        int ret = 0;
 
 #if _ELFUTILS_PREREQ(0, 142)
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+
        /* Get the call frame information from this dwarf */
-       pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
+       elf = dwarf_getelf(dbg->dbg);
+       if (elf == NULL)
+               return -EINVAL;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               return -EINVAL;
+
+       if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+           shdr.sh_type == SHT_PROGBITS) {
+               pf->cfi = dwarf_getcfi_elf(elf);
+       } else {
+               pf->cfi = dwarf_getcfi(dbg->dbg);
+       }
 #endif
 
        off = 0;
index d273624..e238c95 100644 (file)
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
 }
 
 static int check_execveat_invoked_rc(int fd, const char *path, int flags,
-                                    int expected_rc)
+                                    int expected_rc, int expected_rc2)
 {
        int status;
        int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
                        child, status);
                return 1;
        }
-       if (WEXITSTATUS(status) != expected_rc) {
-               printf("[FAIL] (child %d exited with %d not %d)\n",
-                       child, WEXITSTATUS(status), expected_rc);
+       if ((WEXITSTATUS(status) != expected_rc) &&
+           (WEXITSTATUS(status) != expected_rc2)) {
+               printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+                       child, WEXITSTATUS(status), expected_rc, expected_rc2);
                return 1;
        }
        printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
 
 static int check_execveat(int fd, const char *path, int flags)
 {
-       return check_execveat_invoked_rc(fd, path, flags, 99);
+       return check_execveat_invoked_rc(fd, path, flags, 99, 99);
 }
 
 static char *concat(const char *left, const char *right)
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         * Execute as a long pathname relative to ".".  If this is a script,
         * the interpreter will launch but fail to open the script because its
         * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+        *
+        * The failure code is usually 127 (POSIX: "If a command is not found,
+        * the exit status shall be 127."), but some systems give 126 (POSIX:
+        * "If the command name is found, but it is not an executable utility,
+        * the exit status shall be 126."), so allow either.
         */
        if (is_script)
-               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+                                                 127, 126);
        else
                fail += check_execveat(dot_dfd, longpath, 0);
 
index 94dae65..8519e9e 100644 (file)
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
 {
        struct mq_attr attr;
        char *option, *next_option;
-       int i, cpu;
+       int i, cpu, rc;
        struct sigaction sa;
        poptContext popt_context;
-       char rc;
        void *retval;
 
        main_thread = pthread_self();
index 4c4b1f6..077828c 100644 (file)
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
 
 all: $(BINARIES)
 %: %.c
-       $(CC) $(CFLAGS) -o $@ $^
+       $(CC) $(CFLAGS) -o $@ $^ -lrt
 
 run_tests: all
        @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)