OSDN Git Service

Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Feb 2019 17:02:03 +0000 (09:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Feb 2019 17:02:03 +0000 (09:02 -0800)
Pull cpu hotplug fixes from Thomas Gleixner:
 "Two fixes for the cpu hotplug machinery:

   - Replace the overly clever 'SMT disabled by BIOS' detection logic as
     it breaks KVM scenarios and prevents speculation control updates
     when the Hyperthreads are brought online late after boot.

   - Remove a redundant invocation of the speculation control update
     function"

* 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  cpu/hotplug: Fix "SMT disabled by BIOS" detection for KVM
  x86/speculation: Remove redundant arch_smt_update() invocation

213 files changed:
Documentation/devicetree/bindings/Makefile
Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
Documentation/sysctl/fs.txt
MAINTAINERS
arch/arm/mach-cns3xxx/pcie.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/hyp-stub.S
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/mm/flush.c
arch/c6x/include/asm/Kbuild
arch/c6x/include/uapi/asm/Kbuild
arch/h8300/include/asm/Kbuild
arch/h8300/include/uapi/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/hexagon/include/uapi/asm/Kbuild
arch/m68k/include/asm/Kbuild
arch/m68k/include/uapi/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/microblaze/include/uapi/asm/Kbuild
arch/openrisc/include/asm/Kbuild
arch/openrisc/include/uapi/asm/Kbuild
arch/riscv/Kconfig
arch/riscv/configs/defconfig
arch/riscv/include/asm/page.h
arch/riscv/include/asm/processor.h
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/setup.c
arch/riscv/kernel/smpboot.c
arch/riscv/mm/init.c
arch/unicore32/include/asm/Kbuild
arch/unicore32/include/uapi/asm/Kbuild
arch/x86/include/asm/page_64_types.h
arch/x86/lib/iomem.c
arch/xtensa/Kconfig
arch/xtensa/boot/dts/Makefile
arch/xtensa/configs/audio_kc705_defconfig
arch/xtensa/configs/cadence_csp_defconfig
arch/xtensa/configs/generic_kc705_defconfig
arch/xtensa/configs/nommu_kc705_defconfig
arch/xtensa/configs/smp_lx200_defconfig
arch/xtensa/kernel/head.S
arch/xtensa/kernel/smp.c
arch/xtensa/kernel/time.c
block/blk-core.c
block/blk-flush.c
drivers/base/power/runtime.c
drivers/clk/clk.c
drivers/clk/imx/clk-frac-pll.c
drivers/clk/mmp/clk-of-mmp2.c
drivers/clk/qcom/gcc-sdm845.c
drivers/clk/ti/divider.c
drivers/cpuidle/poll_state.c
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
drivers/firmware/efi/arm-runtime.c
drivers/gpio/gpio-altera-a10sr.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-pcf857x.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpiolib.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/ide/ide-atapi.c
drivers/ide/ide-io.c
drivers/ide/ide-park.c
drivers/ide/ide-probe.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_std_types_device.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hns/hns_roce_srq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/flow.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/input/serio/olpc_apsp.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/mtk_iommu_v1.c
drivers/irqchip/irq-xtensa-mx.c
drivers/irqchip/irq-xtensa-pic.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/mfd/Kconfig
drivers/mmc/host/bcm2835.c
drivers/mmc/host/mtk-sd.c
drivers/net/caif/caif_serial.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/ti/cpmac.c
drivers/pci/controller/dwc/pci-imx6.c
drivers/pci/controller/dwc/pcie-armada8k.c
drivers/platform/x86/Kconfig
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/53c700.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/scsi_debug.c
drivers/tty/serial/earlycon-riscv-sbi.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vsock.c
fs/autofs/expire.c
fs/autofs/inode.c
fs/btrfs/ctree.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/cifs/cifsfs.h
fs/cifs/file.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/dcache.c
fs/drop_caches.c
fs/gfs2/rgrp.c
fs/iomap.c
fs/nfs/super.c
fs/nfs/write.c
fs/proc/generic.c
fs/proc/internal.h
fs/proc/proc_net.c
include/dt-bindings/clock/marvell,mmp2.h
include/linux/dcache.h
include/linux/fs.h
include/linux/ide.h
include/linux/memory_hotplug.h
include/linux/pm_runtime.h
include/linux/sched/coredump.h
include/net/tls.h
include/rdma/ib_verbs.h
include/uapi/rdma/hns-abi.h
init/Kconfig
kernel/events/core.c
kernel/exit.c
kernel/sched/psi.c
kernel/workqueue.c
kernel/workqueue_internal.h
lib/test_kmod.c
mm/hugetlb.c
mm/kasan/Makefile
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c
mm/oom_kill.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/decnet/dn_dev.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv6/ip6mr.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nfnetlink_osf.c
net/netfilter/nft_compat.c
net/netrom/nr_timer.c
net/rose/rose_route.c
net/tls/tls_sw.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
security/apparmor/domain.c
security/apparmor/lsm.c
sound/core/pcm_lib.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/perf/builtin-script.c
tools/perf/ui/browsers/annotate.c
tools/perf/util/cpumap.c
tools/perf/util/ordered-events.c
tools/perf/util/setup.py
tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
tools/testing/selftests/ir/Makefile
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/xfrm_policy.sh
tools/testing/selftests/proc/.gitignore
tools/testing/selftests/proc/Makefile
tools/testing/selftests/proc/setns-dcache.c [new file with mode: 0644]
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/timers/Makefile

index 6e5cef0..50daa0b 100644 (file)
@@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA)
 quiet_cmd_mk_schema = SCHEMA  $@
       cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
 
-DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml')
+DT_DOCS = $(shell \
+       cd $(srctree)/$(src) && \
+       find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
+       )
+
 DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
 
 extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
index 3660341..0e72183 100644 (file)
@@ -4,14 +4,10 @@ Required properties:
 - compatible : "olpc,ap-sp"
 - reg : base address and length of SoC's WTM registers
 - interrupts : SP-AP interrupt
-- clocks : phandle + clock-specifier for the clock that drives the WTM
-- clock-names:  should be "sp"
 
 Example:
        ap-sp@d4290000 {
                compatible = "olpc,ap-sp";
                reg = <0xd4290000 0x1000>;
                interrupts = <40>;
-               clocks = <&soc_clocks MMP2_CLK_SP>;
-               clock-names = "sp";
        }
index 819caf8..58649bd 100644 (file)
@@ -56,26 +56,32 @@ of any kernel data structures.
 
 dentry-state:
 
-From linux/fs/dentry.c:
+From linux/include/linux/dcache.h:
 --------------------------------------------------------------
-struct {
+struct dentry_stat_t dentry_stat {
         int nr_dentry;
         int nr_unused;
         int age_limit;         /* age in seconds */
         int want_pages;        /* pages requested by system */
-        int dummy[2];
-} dentry_stat = {0, 0, 45, 0,};
--------------------------------------------------------------- 
-
-Dentries are dynamically allocated and deallocated, and
-nr_dentry seems to be 0 all the time. Hence it's safe to
-assume that only nr_unused, age_limit and want_pages are
-used. Nr_unused seems to be exactly what its name says.
+        int nr_negative;       /* # of unused negative dentries */
+        int dummy;             /* Reserved for future use */
+};
+--------------------------------------------------------------
+
+Dentries are dynamically allocated and deallocated.
+
+nr_dentry shows the total number of dentries allocated (active
++ unused). nr_unused shows the number of dentries that are not
+actively used, but are saved in the LRU list for future reuse.
+
 Age_limit is the age in seconds after which dcache entries
 can be reclaimed when memory is short and want_pages is
 nonzero when shrink_dcache_pages() has been called and the
 dcache isn't pruned yet.
 
+nr_negative shows the number of unused dentries that are also
+negative dentries which do not mapped to actual files.
+
 ==============================================================
 
 dquot-max & dquot-nr:
index 9f64f8d..2d3c191 100644 (file)
@@ -16673,6 +16673,24 @@ T:     git git://linuxtv.org/media_tree.git
 S:     Maintained
 F:     drivers/media/tuners/tuner-xc2028.*
 
+XDP (eXpress Data Path)
+M:     Alexei Starovoitov <ast@kernel.org>
+M:     Daniel Borkmann <daniel@iogearbox.net>
+M:     David S. Miller <davem@davemloft.net>
+M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jesper Dangaard Brouer <hawk@kernel.org>
+M:     John Fastabend <john.fastabend@gmail.com>
+L:     netdev@vger.kernel.org
+L:     xdp-newbies@vger.kernel.org
+S:     Supported
+F:     net/core/xdp.c
+F:     include/net/xdp.h
+F:     kernel/bpf/devmap.c
+F:     kernel/bpf/cpumap.c
+F:     include/trace/events/xdp.h
+K:     xdp
+N:     xdp
+
 XDP SOCKETS (AF_XDP)
 M:     Björn Töpel <bjorn.topel@intel.com>
 M:     Magnus Karlsson <magnus.karlsson@intel.com>
index 318394e..95a11d5 100644 (file)
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
        } else /* remote PCI bus */
                base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
 
-       return base + (where & 0xffc) + (devfn << 12);
+       return base + where + (devfn << 12);
 }
 
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        u32 mask = (0x1ull << (size * 8)) - 1;
        int shift = (where % 4) * 8;
 
-       ret = pci_generic_config_read32(bus, devfn, where, size, val);
+       ret = pci_generic_config_read(bus, devfn, where, size, val);
 
        if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
            (where & 0xffc) == PCI_CLASS_REVISION)
index 29cdc99..9859e11 100644 (file)
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
                dcache_clean_range(__idmap_text_start, __idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
-               if (el2_reset_needed())
+               if (el2_reset_needed()) {
                        dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+                       dcache_clean_range(__hyp_text_start, __hyp_text_end);
+               }
 
                /* make the crash dump kernel image protected again */
                crash_post_resume();
index e1261fb..17f325b 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/virt.h>
 
        .text
+       .pushsection    .hyp.text, "ax"
+
        .align 11
 
 ENTRY(__hyp_stub_vectors)
index ba6b417..b09b6f7 100644 (file)
@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
+       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
 
        /*
         * Try to map the FDT early. If this fails, we simply bail,
index 2a5b338..f17afb9 100644 (file)
@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
            addr < (unsigned long)__entry_text_end) ||
            (addr >= (unsigned long)__idmap_text_start &&
            addr < (unsigned long)__idmap_text_end) ||
+           (addr >= (unsigned long)__hyp_text_start &&
+           addr < (unsigned long)__hyp_text_end) ||
            !!search_exception_tables(addr))
                return true;
 
        if (!is_kernel_in_hyp_mode()) {
-               if ((addr >= (unsigned long)__hyp_text_start &&
-                   addr < (unsigned long)__hyp_text_end) ||
-                   (addr >= (unsigned long)__hyp_idmap_text_start &&
+               if ((addr >= (unsigned long)__hyp_idmap_text_start &&
                    addr < (unsigned long)__hyp_idmap_text_end))
                        return true;
        }
index 30695a8..5c9073b 100644 (file)
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
                __clean_dcache_area_pou(kaddr, len);
                __flush_icache_all();
        } else {
-               flush_icache_range(addr, addr + len);
+               /*
+                * Don't issue kick_all_cpus_sync() after I-cache invalidation
+                * for user mappings.
+                */
+               __flush_icache_range(addr, addr + len);
        }
 }
 
index 33a2c94..63b4a17 100644 (file)
@@ -30,6 +30,7 @@ generic-y += pgalloc.h
 generic-y += preempt.h
 generic-y += segment.h
 generic-y += serial.h
+generic-y += shmparam.h
 generic-y += tlbflush.h
 generic-y += topology.h
 generic-y += trace_clock.h
index 6c6f630..0febf1a 100644 (file)
@@ -1,5 +1,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += kvm_para.h
-generic-y += shmparam.h
 generic-y += ucontext.h
index cd400d3..961c1dc 100644 (file)
@@ -40,6 +40,7 @@ generic-y += preempt.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += serial.h
+generic-y += shmparam.h
 generic-y += sizes.h
 generic-y += spinlock.h
 generic-y += timex.h
index 6c6f630..0febf1a 100644 (file)
@@ -1,5 +1,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += kvm_para.h
-generic-y += shmparam.h
 generic-y += ucontext.h
index 47c4da3..b25fd42 100644 (file)
@@ -30,6 +30,7 @@ generic-y += rwsem.h
 generic-y += sections.h
 generic-y += segment.h
 generic-y += serial.h
+generic-y += shmparam.h
 generic-y += sizes.h
 generic-y += topology.h
 generic-y += trace_clock.h
index 61d955c..c1b06dc 100644 (file)
@@ -1,4 +1,3 @@
 include include/uapi/asm-generic/Kbuild.asm
 
-generic-y += shmparam.h
 generic-y += ucontext.h
index 9f1dd26..95f8f63 100644 (file)
@@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += sections.h
+generic-y += shmparam.h
 generic-y += spinlock.h
 generic-y += topology.h
 generic-y += trace_clock.h
index b8b3525..960bf1e 100644 (file)
@@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generated-y += unistd_32.h
 generic-y += kvm_para.h
-generic-y += shmparam.h
index 9c7d1d2..791cc8d 100644 (file)
@@ -26,6 +26,7 @@ generic-y += parport.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += serial.h
+generic-y += shmparam.h
 generic-y += syscalls.h
 generic-y += topology.h
 generic-y += trace_clock.h
index 28823e3..97823ec 100644 (file)
@@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generated-y += unistd_32.h
 generic-y += kvm_para.h
-generic-y += shmparam.h
 generic-y += ucontext.h
index eb87cd8..1f04844 100644 (file)
@@ -34,6 +34,7 @@ generic-y += qrwlock_types.h
 generic-y += qrwlock.h
 generic-y += sections.h
 generic-y += segment.h
+generic-y += shmparam.h
 generic-y += string.h
 generic-y += switch_to.h
 generic-y += topology.h
index 6c6f630..0febf1a 100644 (file)
@@ -1,5 +1,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += kvm_para.h
-generic-y += shmparam.h
 generic-y += ucontext.h
index feeeaa6..515fc3c 100644 (file)
@@ -103,7 +103,7 @@ choice
        prompt "Base ISA"
        default ARCH_RV64I
        help
-         This selects the base ISA that this kernel will traget and must match
+         This selects the base ISA that this kernel will target and must match
          the target platform.
 
 config ARCH_RV32I
index f399659..2fd3461 100644 (file)
@@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_SMP=y
-CONFIG_PCI=y
-CONFIG_PCIE_XILINX=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_NET=y
@@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
@@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_SIFIVE_PLIC=y
-CONFIG_RAS=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=y
@@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y
 CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_RCU_TRACE is not set
index 06cfbb3..2a546a5 100644 (file)
@@ -80,7 +80,7 @@ typedef struct page *pgtable_t;
 #define __pgd(x)       ((pgd_t) { (x) })
 #define __pgprot(x)    ((pgprot_t) { (x) })
 
-#ifdef CONFIG_64BITS
+#ifdef CONFIG_64BIT
 #define PTE_FMT "%016lx"
 #else
 #define PTE_FMT "%08lx"
index 0531f49..ce70bce 100644 (file)
@@ -22,7 +22,7 @@
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE >> 1)
+#define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE / 3)
 
 #define STACK_TOP              TASK_SIZE
 #define STACK_TOP_MAX          STACK_TOP
index 6a92a2f..dac9834 100644 (file)
@@ -39,6 +39,7 @@ void asm_offsets(void)
        OFFSET(TASK_STACK, task_struct, stack);
        OFFSET(TASK_TI, task_struct, thread_info);
        OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+       OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
        OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
        OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
        OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
index 355166f..fd9b57c 100644 (file)
@@ -144,6 +144,10 @@ _save_context:
        REG_L x2,  PT_SP(sp)
        .endm
 
+#if !IS_ENABLED(CONFIG_PREEMPT)
+.set resume_kernel, restore_all
+#endif
+
 ENTRY(handle_exception)
        SAVE_ALL
 
@@ -228,7 +232,7 @@ ret_from_exception:
        REG_L s0, PT_SSTATUS(sp)
        csrc sstatus, SR_SIE
        andi s0, s0, SR_SPP
-       bnez s0, restore_all
+       bnez s0, resume_kernel
 
 resume_userspace:
        /* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
        RESTORE_ALL
        sret
 
+#if IS_ENABLED(CONFIG_PREEMPT)
+resume_kernel:
+       REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+       bnez s0, restore_all
+need_resched:
+       REG_L s0, TASK_TI_FLAGS(tp)
+       andi s0, s0, _TIF_NEED_RESCHED
+       beqz s0, restore_all
+       call preempt_schedule_irq
+       j need_resched
+#endif
+
 work_pending:
        /* Enter slow path for supplementary processing */
        la ra, ret_from_exception
index 6e079e9..7756431 100644 (file)
@@ -181,7 +181,7 @@ static void __init setup_bootmem(void)
        BUG_ON(mem_size == 0);
 
        set_max_mapnr(PFN_DOWN(mem_size));
-       max_low_pfn = memblock_end_of_DRAM();
+       max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
 
 #ifdef CONFIG_BLK_DEV_INITRD
        setup_initrd();
index fc185ec..18cda0e 100644 (file)
@@ -57,15 +57,12 @@ void __init setup_smp(void)
 
        while ((dn = of_find_node_by_type(dn, "cpu"))) {
                hart = riscv_of_processor_hartid(dn);
-               if (hart < 0) {
-                       of_node_put(dn);
+               if (hart < 0)
                        continue;
-               }
 
                if (hart == cpuid_to_hartid_map(0)) {
                        BUG_ON(found_boot_cpu);
                        found_boot_cpu = 1;
-                       of_node_put(dn);
                        continue;
                }
 
@@ -73,7 +70,6 @@ void __init setup_smp(void)
                set_cpu_possible(cpuid, true);
                set_cpu_present(cpuid, true);
                cpuid++;
-               of_node_put(dn);
        }
 
        BUG_ON(!found_boot_cpu);
index 1d9bfaf..658ebf6 100644 (file)
@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
 #ifdef CONFIG_ZONE_DMA32
-       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
+                       (unsigned long) PFN_PHYS(max_low_pfn)));
 #endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
index 1372553..1d1544b 100644 (file)
@@ -28,6 +28,7 @@ generic-y += preempt.h
 generic-y += sections.h
 generic-y += segment.h
 generic-y += serial.h
+generic-y += shmparam.h
 generic-y += sizes.h
 generic-y += syscalls.h
 generic-y += topology.h
index 6c6f630..0febf1a 100644 (file)
@@ -1,5 +1,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += kvm_para.h
-generic-y += shmparam.h
 generic-y += ucontext.h
index 8f65728..0ce558a 100644 (file)
@@ -7,7 +7,11 @@
 #endif
 
 #ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
 #define KASAN_STACK_ORDER 1
+#endif
 #else
 #define KASAN_STACK_ORDER 0
 #endif
index 6689467..df50451 100644 (file)
@@ -2,8 +2,11 @@
 #include <linux/module.h>
 #include <linux/io.h>
 
+#define movs(type,to,from) \
+       asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
+
 /* Originally from i386/string.h */
-static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
+static __always_inline void rep_movs(void *to, const void *from, size_t n)
 {
        unsigned long d0, d1, d2;
        asm volatile("rep ; movsl\n\t"
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
 
 void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
 {
-       __iomem_memcpy(to, (const void *)from, n);
+       if (unlikely(!n))
+               return;
+
+       /* Align any unaligned source IO */
+       if (unlikely(1 & (unsigned long)from)) {
+               movs("b", to, from);
+               n--;
+       }
+       if (n > 1 && unlikely(2 & (unsigned long)from)) {
+               movs("w", to, from);
+               n-=2;
+       }
+       rep_movs(to, (const void *)from, n);
 }
 EXPORT_SYMBOL(memcpy_fromio);
 
 void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
 {
-       __iomem_memcpy((void *)to, (const void *) from, n);
+       if (unlikely(!n))
+               return;
+
+       /* Align any unaligned destination IO */
+       if (unlikely(1 & (unsigned long)to)) {
+               movs("b", to, from);
+               n--;
+       }
+       if (n > 1 && unlikely(2 & (unsigned long)to)) {
+               movs("w", to, from);
+               n-=2;
+       }
+       rep_movs((void *)to, (const void *) from, n);
 }
 EXPORT_SYMBOL(memcpy_toio);
 
index 20a0756..ce91682 100644 (file)
@@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI
          If unsure, say N.
 
 config XTENSA_UNALIGNED_USER
-       bool "Unaligned memory access in use space"
+       bool "Unaligned memory access in user space"
        help
          The Xtensa architecture currently does not handle unaligned
          memory accesses in hardware but through an exception handler.
@@ -451,7 +451,7 @@ config USE_OF
        help
          Include support for flattened device tree machine descriptions.
 
-config BUILTIN_DTB
+config BUILTIN_DTB_SOURCE
        string "DTB to build into the kernel image"
        depends on OF
 
index f8052ba..0b8d00c 100644 (file)
@@ -7,9 +7,9 @@
 #
 #
 
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
-ifneq ($(CONFIG_BUILTIN_DTB),"")
-obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
 endif
 
 # for CONFIG_OF_ALL_DTBS test
index 2bf964d..f378e56 100644 (file)
@@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
 CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="kc705"
+CONFIG_BUILTIN_DTB_SOURCE="kc705"
 # CONFIG_COMPACTION is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_PM=y
index 3221b70..62f32a9 100644 (file)
@@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y
 # CONFIG_PCI is not set
 CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="csp"
+CONFIG_BUILTIN_DTB_SOURCE="csp"
 # CONFIG_COMPACTION is not set
 CONFIG_XTFPGA_LCD=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
index 985fa85..8bebe07 100644 (file)
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
 CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="kc705"
+CONFIG_BUILTIN_DTB_SOURCE="kc705"
 # CONFIG_COMPACTION is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_NET=y
index f3fc4f9..933ab2a 100644 (file)
@@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
 CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="kc705_nommu"
+CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
 CONFIG_BINFMT_FLAT=y
 CONFIG_NET=y
 CONFIG_PACKET=y
index 11fed6c..e29c5b1 100644 (file)
@@ -33,11 +33,12 @@ CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
 # CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
 CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
 CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="lx200mx"
+CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
 # CONFIG_COMPACTION is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_NET=y
index da08e75..7f00971 100644 (file)
@@ -276,12 +276,13 @@ should_never_return:
 
        movi    a2, cpu_start_ccount
 1:
+       memw
        l32i    a3, a2, 0
        beqi    a3, 0, 1b
        movi    a3, 0
        s32i    a3, a2, 0
-       memw
 1:
+       memw
        l32i    a3, a2, 0
        beqi    a3, 0, 1b
        wsr     a3, ccount
@@ -317,11 +318,13 @@ ENTRY(cpu_restart)
        rsr     a0, prid
        neg     a2, a0
        movi    a3, cpu_start_id
+       memw
        s32i    a2, a3, 0
 #if XCHAL_DCACHE_IS_WRITEBACK
        dhwbi   a3, 0
 #endif
 1:
+       memw
        l32i    a2, a3, 0
        dhi     a3, 0
        bne     a2, a0, 1b
index 932d646..be1f280 100644 (file)
@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned i;
 
-       for (i = 0; i < max_cpus; ++i)
+       for_each_possible_cpu(i)
                set_cpu_present(i, true);
 }
 
@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
        pr_info("%s: Core Count = %d\n", __func__, ncpus);
        pr_info("%s: Core Id = %d\n", __func__, core_id);
 
+       if (ncpus > NR_CPUS) {
+               ncpus = NR_CPUS;
+               pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+       }
+
        for (i = 0; i < ncpus; ++i)
                set_cpu_possible(i, true);
 }
@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
        int i;
 
 #ifdef CONFIG_HOTPLUG_CPU
-       cpu_start_id = cpu;
-       system_flush_invalidate_dcache_range(
-                       (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+       WRITE_ONCE(cpu_start_id, cpu);
+       /* Pairs with the third memw in the cpu_restart */
+       mb();
+       system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+                                            sizeof(cpu_start_id));
 #endif
        smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
 
@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
                        ccount = get_ccount();
                while (!ccount);
 
-               cpu_start_ccount = ccount;
+               WRITE_ONCE(cpu_start_ccount, ccount);
 
-               while (time_before(jiffies, timeout)) {
+               do {
+                       /*
+                        * Pairs with the first two memws in the
+                        * .Lboot_secondary.
+                        */
                        mb();
-                       if (!cpu_start_ccount)
-                               break;
-               }
+                       ccount = READ_ONCE(cpu_start_ccount);
+               } while (ccount && time_before(jiffies, timeout));
 
-               if (cpu_start_ccount) {
+               if (ccount) {
                        smp_call_function_single(0, mx_cpu_stop,
-                                       (void *)cpu, 1);
-                       cpu_start_ccount = 0;
+                                                (void *)cpu, 1);
+                       WRITE_ONCE(cpu_start_ccount, 0);
                        return -EIO;
                }
        }
@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
                        __func__, cpu, idle, start_info.stack);
 
+       init_completion(&cpu_running);
        ret = boot_secondary(cpu, idle);
        if (ret == 0) {
                wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
        while (time_before(jiffies, timeout)) {
                system_invalidate_dcache_range((unsigned long)&cpu_start_id,
-                               sizeof(cpu_start_id));
-               if (cpu_start_id == -cpu) {
+                                              sizeof(cpu_start_id));
+               /* Pairs with the second memw in the cpu_restart */
+               mb();
+               if (READ_ONCE(cpu_start_id) == -cpu) {
                        platform_cpu_kill(cpu);
                        return;
                }
index fd524a5..378186b 100644 (file)
@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
                container_of(evt, struct ccount_timer, evt);
 
        if (timer->irq_enabled) {
-               disable_irq(evt->irq);
+               disable_irq_nosync(evt->irq);
                timer->irq_enabled = 0;
        }
        return 0;
index 3c5f61c..6b78ec5 100644 (file)
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
        kblockd_schedule_work(&q->timeout_work);
 }
 
+static void blk_timeout_work(struct work_struct *work)
+{
+}
+
 /**
  * blk_alloc_queue_node - allocate a request queue
  * @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
                    laptop_mode_timer_fn, 0);
        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
-       INIT_WORK(&q->timeout_work, NULL);
+       INIT_WORK(&q->timeout_work, blk_timeout_work);
        INIT_LIST_HEAD(&q->icq_list);
 #ifdef CONFIG_BLK_CGROUP
        INIT_LIST_HEAD(&q->blkg_list);
index a3fc719..6e0f2d9 100644 (file)
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
        blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 
-       blk_mq_run_hw_queue(hctx, true);
+       blk_mq_sched_restart(hctx);
 }
 
 /**
index 457be03..0ea2139 100644 (file)
@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
 {
        int autosuspend_delay;
        u64 last_busy, expires = 0;
-       u64 now = ktime_to_ns(ktime_get());
+       u64 now = ktime_get_mono_fast_ns();
 
        if (!dev->power.use_autosuspend)
                goto out;
@@ -909,7 +909,7 @@ static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
         * If 'expires' is after the current time, we've been called
         * too early.
         */
-       if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
+       if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
                dev->power.timer_expires = 0;
                rpm_suspend(dev, dev->power.timer_autosuspends ?
                    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -928,7 +928,7 @@ static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
 int pm_schedule_suspend(struct device *dev, unsigned int delay)
 {
        unsigned long flags;
-       ktime_t expires;
+       u64 expires;
        int retval;
 
        spin_lock_irqsave(&dev->power.lock, flags);
@@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
        /* Other scheduled or pending requests need to be canceled. */
        pm_runtime_cancel_pending(dev);
 
-       expires = ktime_add(ktime_get(), ms_to_ktime(delay));
-       dev->power.timer_expires = ktime_to_ns(expires);
+       expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
+       dev->power.timer_expires = expires;
        dev->power.timer_autosuspends = 0;
        hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
 
index 6ccdbed..d2477a5 100644 (file)
@@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core,
        if (!parent)
                return -EINVAL;
 
-       for (i = 0; i < core->num_parents; i++)
-               if (clk_core_get_parent_by_index(core, i) == parent)
+       for (i = 0; i < core->num_parents; i++) {
+               if (core->parents[i] == parent)
+                       return i;
+
+               if (core->parents[i])
+                       continue;
+
+               /* Fallback to comparing globally unique names */
+               if (!strcmp(parent->name, core->parent_names[i])) {
+                       core->parents[i] = parent;
                        return i;
+               }
+       }
 
        return -EINVAL;
 }
index 0026c39..76b9eb1 100644 (file)
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct clk_frac_pll *pll = to_clk_frac_pll(hw);
        u32 val, divfi, divff;
-       u64 temp64 = parent_rate;
+       u64 temp64;
        int ret;
 
        parent_rate *= 8;
        rate *= 2;
        divfi = rate / parent_rate;
-       temp64 *= rate - divfi;
+       temp64 = parent_rate * divfi;
+       temp64 = rate - temp64;
        temp64 *= PLL_FRAC_DENOM;
        do_div(temp64, parent_rate);
        divff = temp64;
index 61fefc0..d083b86 100644 (file)
@@ -53,7 +53,6 @@
 #define APMU_DISP1     0x110
 #define APMU_CCIC0     0x50
 #define APMU_CCIC1     0xf4
-#define APMU_SP                0x68
 #define MPMU_UART_PLL  0x14
 
 struct mmp2_clk_unit {
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
        .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
 };
 
-static DEFINE_SPINLOCK(sp_lock);
-
 static struct mmp_param_mux_clk apmu_mux_clks[] = {
        {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
        {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
        {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
        {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
        {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
-       {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
 };
 
 static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
index c782e62..58fa5c2 100644 (file)
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
        "core_bi_pll_test_se",
 };
 
-static const char * const gcc_parent_names_7[] = {
-       "bi_tcxo",
+static const char * const gcc_parent_names_7_ao[] = {
+       "bi_tcxo_ao",
        "gpll0",
        "gpll0_out_even",
        "core_bi_pll_test_se",
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
        "core_bi_pll_test_se",
 };
 
+static const char * const gcc_parent_names_8_ao[] = {
+       "bi_tcxo_ao",
+       "gpll0",
+       "core_bi_pll_test_se",
+};
+
 static const struct parent_map gcc_parent_map_10[] = {
        { P_BI_TCXO, 0 },
        { P_GPLL0_OUT_MAIN, 1 },
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
        .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gcc_cpuss_ahb_clk_src",
-               .parent_names = gcc_parent_names_7,
+               .parent_names = gcc_parent_names_7_ao,
                .num_parents = 4,
                .ops = &clk_rcg2_ops,
        },
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
        .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gcc_cpuss_rbcpr_clk_src",
-               .parent_names = gcc_parent_names_8,
+               .parent_names = gcc_parent_names_8_ao,
                .num_parents = 3,
                .ops = &clk_rcg2_ops,
        },
index 8d77090..0241450 100644 (file)
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
        num_dividers = i;
 
        tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
-       if (!tmp)
+       if (!tmp) {
+               *table = ERR_PTR(-ENOMEM);
                return -ENOMEM;
+       }
 
        valid_div = 0;
        *width = 0;
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
 {
        struct clk_omap_divider *div;
        struct clk_omap_reg *reg;
+       int ret;
 
        if (!setup)
                return NULL;
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
                div->flags |= CLK_DIVIDER_POWER_OF_TWO;
 
        div->table = _get_div_table_from_setup(setup, &div->width);
+       if (IS_ERR(div->table)) {
+               ret = PTR_ERR(div->table);
+               kfree(div);
+               return ERR_PTR(ret);
+       }
+
 
        div->shift = setup->bit_shift;
        div->latch = -EINVAL;
index b17d153..23a1b27 100644 (file)
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
        local_irq_enable();
        if (!current_set_polling_and_test()) {
                unsigned int loop_count = 0;
-               u64 limit = TICK_USEC;
+               u64 limit = TICK_NSEC;
                int i;
 
                for (i = 1; i < drv->state_count; i++) {
index fe070d7..4c97478 100644 (file)
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
        struct nitrox_device *ndev = cmdq->ndev;
        struct nitrox_softreq *sr;
        int req_completed = 0, err = 0, budget;
+       completion_t callback;
+       void *cb_arg;
 
        /* check all pending requests */
        budget = atomic_read(&cmdq->pending_count);
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
                smp_mb__after_atomic();
                /* remove from response list */
                response_list_del(sr, cmdq);
-
                /* ORH error code */
                err = READ_ONCE(*sr->resp.orh) & 0xff;
-
-               if (sr->callback)
-                       sr->callback(sr->cb_arg, err);
+               callback = sr->callback;
+               cb_arg = sr->cb_arg;
                softreq_destroy(sr);
+               if (callback)
+                       callback(cb_arg, err);
 
                req_completed++;
        }
index 23ea1ed..352bd24 100644 (file)
@@ -37,8 +37,9 @@ extern u64 efi_system_table;
 static struct ptdump_info efi_ptdump_info = {
        .mm             = &efi_mm,
        .markers        = (struct addr_marker[]){
-               { 0,            "UEFI runtime start" },
-               { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }
+               { 0,                            "UEFI runtime start" },
+               { DEFAULT_MAP_WINDOW_64,        "UEFI runtime end" },
+               { -1,                           NULL }
        },
        .base_addr      = 0,
 };
index 6b11f13..7f9e030 100644 (file)
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
 static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
                                            unsigned int nr, int value)
 {
-       if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+       if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
+               altr_a10sr_gpio_set(gc, nr, value);
                return 0;
+       }
        return -EINVAL;
 }
 
index e0d6a0a..e41223c 100644 (file)
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
 
 static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
 {
-       return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+       struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
+
+       switch (sprd_eic->type) {
+       case SPRD_EIC_DEBOUNCE:
+               return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+       case SPRD_EIC_ASYNC:
+               return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
+       case SPRD_EIC_SYNC:
+               return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
+       default:
+               return -ENOTSUPP;
+       }
 }
 
 static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
index adf72dd..68a35b6 100644 (file)
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
  */
 struct pcf857x {
        struct gpio_chip        chip;
+       struct irq_chip         irqchip;
        struct i2c_client       *client;
        struct mutex            lock;           /* protect 'out' */
        unsigned                out;            /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
        mutex_unlock(&gpio->lock);
 }
 
-static struct irq_chip pcf857x_irq_chip = {
-       .name           = "pcf857x",
-       .irq_enable     = pcf857x_irq_enable,
-       .irq_disable    = pcf857x_irq_disable,
-       .irq_ack        = noop,
-       .irq_mask       = noop,
-       .irq_unmask     = noop,
-       .irq_set_wake   = pcf857x_irq_set_wake,
-       .irq_bus_lock           = pcf857x_irq_bus_lock,
-       .irq_bus_sync_unlock    = pcf857x_irq_bus_sync_unlock,
-};
-
 /*-------------------------------------------------------------------------*/
 
 static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
 
        /* Enable irqchip if we have an interrupt */
        if (client->irq) {
+               gpio->irqchip.name = "pcf857x",
+               gpio->irqchip.irq_enable = pcf857x_irq_enable,
+               gpio->irqchip.irq_disable = pcf857x_irq_disable,
+               gpio->irqchip.irq_ack = noop,
+               gpio->irqchip.irq_mask = noop,
+               gpio->irqchip.irq_unmask = noop,
+               gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
+               gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
+               gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
                status = gpiochip_irqchip_add_nested(&gpio->chip,
-                                                    &pcf857x_irq_chip,
+                                                    &gpio->irqchip,
                                                     0, handle_level_irq,
                                                     IRQ_TYPE_NONE);
                if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
                if (status)
                        goto fail;
 
-               gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+               gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
                                            client->irq);
                gpio->irq_parent = client->irq;
        }
index 1b79ebc..541fa6a 100644 (file)
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
        struct vf610_gpio_port *port;
        struct resource *iores;
        struct gpio_chip *gc;
+       int i;
        int ret;
 
        port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
        if (ret < 0)
                return ret;
 
+       /* Mask all GPIO interrupts */
+       for (i = 0; i < gc->ngpio; i++)
+               vf610_gpio_writel(0, port->base + PORT_PCR(i));
+
        /* Clear the interrupt status register for all GPIO's */
        vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
index 1651d7f..d1adfdf 100644 (file)
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
        /* Do not leak kernel stack to userspace */
        memset(&ge, 0, sizeof(ge));
 
-       ge.timestamp = le->timestamp;
+       /*
+        * We may be running from a nested threaded interrupt in which case
+        * we didn't get the timestamp from lineevent_irq_handler().
+        */
+       if (!le->timestamp)
+               ge.timestamp = ktime_get_real_ns();
+       else
+               ge.timestamp = le->timestamp;
 
        if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
            && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
index c39f89d..2dc628d 100644 (file)
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
 
        ret = i3c_master_retrieve_dev_info(newdev);
        if (ret)
-               goto err_free_dev;
+               goto err_detach_dev;
 
        olddev = i3c_master_search_i3c_dev_duplicate(newdev);
        if (olddev) {
index f8c00b9..bb03079 100644 (file)
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
        spin_unlock_irqrestore(&master->xferqueue.lock, flags);
 }
 
-static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
-                                      struct dw_i3c_xfer *xfer)
+static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
+                                             struct dw_i3c_xfer *xfer)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&master->xferqueue.lock, flags);
        if (master->xferqueue.cur == xfer) {
                u32 status;
 
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
        } else {
                list_del_init(&xfer->node);
        }
+}
+
+static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
+                                      struct dw_i3c_xfer *xfer)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&master->xferqueue.lock, flags);
+       dw_i3c_master_dequeue_xfer_locked(master, xfer);
        spin_unlock_irqrestore(&master->xferqueue.lock, flags);
 }
 
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
        complete(&xfer->comp);
 
        if (ret < 0) {
-               dw_i3c_master_dequeue_xfer(master, xfer);
+               dw_i3c_master_dequeue_xfer_locked(master, xfer);
                writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
                       master->regs + DEVICE_CTRL);
        }
index da58020..33a28cd 100644 (file)
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
 
 int ide_queue_sense_rq(ide_drive_t *drive, void *special)
 {
-       struct request *sense_rq = drive->sense_rq;
+       ide_hwif_t *hwif = drive->hwif;
+       struct request *sense_rq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&hwif->lock, flags);
 
        /* deferred failure from ide_prep_sense() */
        if (!drive->sense_rq_armed) {
                printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
                       drive->name);
+               spin_unlock_irqrestore(&hwif->lock, flags);
                return -ENOMEM;
        }
 
+       sense_rq = drive->sense_rq;
        ide_req(sense_rq)->special = special;
        drive->sense_rq_armed = false;
 
        drive->hwif->rq = NULL;
 
        ide_insert_request_head(drive, sense_rq);
+       spin_unlock_irqrestore(&hwif->lock, flags);
        return 0;
 }
 EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
index 8445b48..b137f27 100644 (file)
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
        }
 
        if (!blk_update_request(rq, error, nr_bytes)) {
-               if (rq == drive->sense_rq)
+               if (rq == drive->sense_rq) {
                        drive->sense_rq = NULL;
+                       drive->sense_rq_active = false;
+               }
 
                __blk_mq_end_request(rq, error);
                return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
                blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
 }
 
-/*
- * Issue a new request to a device.
- */
-blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
-                         const struct blk_mq_queue_data *bd)
+blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
+                         bool local_requeue)
 {
-       ide_drive_t     *drive = hctx->queue->queuedata;
-       ide_hwif_t      *hwif = drive->hwif;
+       ide_hwif_t *hwif = drive->hwif;
        struct ide_host *host = hwif->host;
-       struct request  *rq = bd->rq;
        ide_startstop_t startstop;
 
        if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ide_lock_host(host, hwif))
                return BLK_STS_DEV_RESOURCE;
 
-       blk_mq_start_request(rq);
-
        spin_lock_irq(&hwif->lock);
 
        if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
                drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
 
                /*
-                * we know that the queue isn't empty, but this can happen
-                * if ->prep_rq() decides to kill a request
-                */
-               if (!rq) {
-                       rq = bd->rq;
-                       if (!rq) {
-                               ide_unlock_port(hwif);
-                               goto out;
-                       }
-               }
-
-               /*
                 * Sanity: don't accept a request that isn't a PM request
                 * if we are currently power managed. This is very important as
                 * blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
                }
        } else {
 plug_device:
+               if (local_requeue)
+                       list_add(&rq->queuelist, &drive->rq_list);
                spin_unlock_irq(&hwif->lock);
                ide_unlock_host(host);
-               ide_requeue_and_plug(drive, rq);
+               if (!local_requeue)
+                       ide_requeue_and_plug(drive, rq);
                return BLK_STS_OK;
        }
 
@@ -573,6 +559,26 @@ out:
        return BLK_STS_OK;
 }
 
+/*
+ * Issue a new request to a device.
+ */
+blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
+                         const struct blk_mq_queue_data *bd)
+{
+       ide_drive_t *drive = hctx->queue->queuedata;
+       ide_hwif_t *hwif = drive->hwif;
+
+       spin_lock_irq(&hwif->lock);
+       if (drive->sense_rq_active) {
+               spin_unlock_irq(&hwif->lock);
+               return BLK_STS_DEV_RESOURCE;
+       }
+       spin_unlock_irq(&hwif->lock);
+
+       blk_mq_start_request(bd->rq);
+       return ide_issue_rq(drive, bd->rq, false);
+}
+
 static int drive_is_ready(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
 
 void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
 {
-       ide_hwif_t *hwif = drive->hwif;
-       unsigned long flags;
-
-       spin_lock_irqsave(&hwif->lock, flags);
+       drive->sense_rq_active = true;
        list_add_tail(&rq->queuelist, &drive->rq_list);
-       spin_unlock_irqrestore(&hwif->lock, flags);
-
        kblockd_schedule_work(&drive->rq_work);
 }
 EXPORT_SYMBOL_GPL(ide_insert_request_head);
index 102aa3b..8af7af6 100644 (file)
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
        scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
        scsi_req(rq)->cmd_len = 1;
        ide_req(rq)->type = ATA_PRIV_MISC;
+       spin_lock_irq(&hwif->lock);
        ide_insert_request_head(drive, rq);
+       spin_unlock_irq(&hwif->lock);
 
 out:
        return;
index 63627be..5aeaca2 100644 (file)
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
        ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
+       blk_status_t ret;
        LIST_HEAD(list);
 
-       spin_lock_irq(&hwif->lock);
-       if (!list_empty(&drive->rq_list))
-               list_splice_init(&drive->rq_list, &list);
-       spin_unlock_irq(&hwif->lock);
+       blk_mq_quiesce_queue(drive->queue);
 
-       while (!list_empty(&list)) {
-               rq = list_first_entry(&list, struct request, queuelist);
+       ret = BLK_STS_OK;
+       spin_lock_irq(&hwif->lock);
+       while (!list_empty(&drive->rq_list)) {
+               rq = list_first_entry(&drive->rq_list, struct request, queuelist);
                list_del_init(&rq->queuelist);
-               blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
+
+               spin_unlock_irq(&hwif->lock);
+               ret = ide_issue_rq(drive, rq, true);
+               spin_lock_irq(&hwif->lock);
        }
+       spin_unlock_irq(&hwif->lock);
+
+       blk_mq_unquiesce_queue(drive->queue);
+
+       if (ret != BLK_STS_OK)
+               kblockd_schedule_work(&drive->rq_work);
 }
 
 static const u8 ide_hwif_to_major[] =
index 3cd830d..6167343 100644 (file)
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
 #endif
 
 struct ib_device *ib_device_get_by_index(u32 ifindex);
-void ib_device_put(struct ib_device *device);
 /* RDMA device netlink */
 void nldev_init(void);
 void nldev_exit(void);
index 8872453..238ec42 100644 (file)
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index)
        down_read(&lists_rwsem);
        device = __ib_device_get_by_index(index);
        if (device) {
-               /* Do not return a device if unregistration has started. */
-               if (!refcount_inc_not_zero(&device->refcount))
+               if (!ib_device_try_get(device))
                        device = NULL;
        }
        up_read(&lists_rwsem);
        return device;
 }
 
+/**
+ * ib_device_put - Release IB device reference
+ * @device: device whose reference to be released
+ *
+ * ib_device_put() releases reference to the IB device to allow it to be
+ * unregistered and eventually free.
+ */
 void ib_device_put(struct ib_device *device)
 {
        if (refcount_dec_and_test(&device->refcount))
                complete(&device->unreg_completion);
 }
+EXPORT_SYMBOL(ib_device_put);
 
 static struct ib_device *__ib_device_get_by_name(const char *name)
 {
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size)
        rwlock_init(&device->client_data_lock);
        INIT_LIST_HEAD(&device->client_data_list);
        INIT_LIST_HEAD(&device->port_list);
-       refcount_set(&device->refcount, 1);
        init_completion(&device->unreg_completion);
 
        return device;
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name,
                goto cg_cleanup;
        }
 
+       refcount_set(&device->refcount, 1);
        device->reg_state = IB_DEV_REGISTERED;
 
        list_for_each_entry(client, &client_list, list)
index a4ec430..acb882f 100644 (file)
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
        umem->writable   = 1;
        umem->is_odp = 1;
        odp_data->per_mm = per_mm;
+       umem->owning_mm  = per_mm->mm;
+       mmgrab(umem->owning_mm);
 
        mutex_init(&odp_data->umem_mutex);
        init_completion(&odp_data->notifier_completion);
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
 out_page_list:
        vfree(odp_data->page_list);
 out_odp_data:
+       mmdrop(umem->owning_mm);
        kfree(odp_data);
        return ERR_PTR(ret);
 }
index 2890a77..5f36683 100644 (file)
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref)
        if (atomic_dec_and_test(&file->device->refcount))
                ib_uverbs_comp_dev(file->device);
 
+       if (file->async_file)
+               kref_put(&file->async_file->ref,
+                        ib_uverbs_release_async_event_file);
        put_device(&file->device->dev);
        kfree(file);
 }
@@ -964,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
 
                /* Get an arbitrary mm pointer that hasn't been cleaned yet */
                mutex_lock(&ufile->umap_lock);
-               if (!list_empty(&ufile->umaps)) {
-                       mm = list_first_entry(&ufile->umaps,
-                                             struct rdma_umap_priv, list)
-                                    ->vma->vm_mm;
-                       mmget(mm);
+               while (!list_empty(&ufile->umaps)) {
+                       int ret;
+
+                       priv = list_first_entry(&ufile->umaps,
+                                               struct rdma_umap_priv, list);
+                       mm = priv->vma->vm_mm;
+                       ret = mmget_not_zero(mm);
+                       if (!ret) {
+                               list_del_init(&priv->list);
+                               mm = NULL;
+                               continue;
+                       }
+                       break;
                }
                mutex_unlock(&ufile->umap_lock);
                if (!mm)
@@ -1096,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
        list_del_init(&file->list);
        mutex_unlock(&file->device->lists_mutex);
 
-       if (file->async_file)
-               kref_put(&file->async_file->ref,
-                        ib_uverbs_release_async_event_file);
-
        kref_put(&file->ref, ib_uverbs_release_file);
 
        return 0;
index 5030ec4..2a3f2f0 100644 (file)
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
        struct uverbs_attr_bundle *attrs)
 {
-       struct ib_device *ib_dev = attrs->ufile->device->ib_dev;
+       struct ib_device *ib_dev;
        struct ib_port_attr attr = {};
        struct ib_uverbs_query_port_resp_ex resp = {};
+       struct ib_ucontext *ucontext;
        int ret;
        u8 port_num;
 
+       ucontext = ib_uverbs_get_ucontext(attrs);
+       if (IS_ERR(ucontext))
+               return PTR_ERR(ucontext);
+       ib_dev = ucontext->device;
+
        /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
        if (!ib_dev->ops.query_port)
                return -EOPNOTSUPP;
index c22ebc7..f9a7e9d 100644 (file)
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
                vmf = 1;
                break;
        case STATUS:
-               if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
+               if (flags & VM_WRITE) {
                        ret = -EPERM;
                        goto done;
                }
index 88242fe..bf96067 100644 (file)
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
            opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
                wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
                wc.wc_flags = IB_WC_WITH_IMM;
-               tlen -= sizeof(u32);
        } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
                wc.ex.imm_data = 0;
                wc.wc_flags = 0;
index 960b194..12deacf 100644 (file)
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
                                   struct ib_udata *udata)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+       struct hns_roce_ib_create_srq_resp resp = {};
        struct hns_roce_srq *srq;
        int srq_desc_size;
        int srq_buf_size;
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 
        srq->event = hns_roce_ib_srq_event;
        srq->ibsrq.ext.xrc.srq_num = srq->srqn;
+       resp.srqn = srq->srqn;
 
        if (udata) {
-               if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
+               if (ib_copy_to_udata(udata, &resp,
+                                    min(udata->outlen, sizeof(resp)))) {
                        ret = -EFAULT;
-                       goto err_wrid;
+                       goto err_srqc_alloc;
                }
        }
 
        return &srq->ibsrq;
 
+err_srqc_alloc:
+       hns_roce_srq_free(hr_dev, srq);
+
 err_wrid:
        kvfree(srq->wrid);
 
index 25439da..936ee13 100644 (file)
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 
        sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
        if (sqp->tx_ring[wire_tx_ix].ah)
-               rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
+               mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
        sqp->tx_ring[wire_tx_ix].ah = ah;
        ib_dma_sync_single_for_cpu(&dev->ib_dev,
                                   sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
                if (wc.status == IB_WC_SUCCESS) {
                        switch (wc.opcode) {
                        case IB_WC_SEND:
-                               rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
+                               mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
                                              (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
                                sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
                                        = NULL;
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
                                 " status = %d, wrid = 0x%llx\n",
                                 ctx->slave, wc.status, wc.wr_id);
                        if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
-                               rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
+                               mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
                                              (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
                                sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
                                        = NULL;
index e8a1e44..798591a 100644 (file)
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
                UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
        UAPI_DEF_CHAIN_OBJ_TREE(
                UVERBS_OBJECT_FLOW,
-               &mlx5_ib_fs,
-               UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
+               &mlx5_ib_fs),
        UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
                                &mlx5_ib_flow_actions),
        {},
index 01e0f62..4ee3296 100644 (file)
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
        struct prefetch_mr_work *w =
                container_of(work, struct prefetch_mr_work, work);
 
-       if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED)
+       if (ib_device_try_get(&w->dev->ib_dev)) {
                mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
                                         w->num_sge);
-
+               ib_device_put(&w->dev->ib_dev);
+       }
+       put_device(&w->dev->ib_dev.dev);
        kfree(w);
 }
 
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
                return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
                                                num_sge);
 
-       if (dev->ib_dev.reg_state != IB_DEV_REGISTERED)
-               return -ENODEV;
-
        work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
        if (!work)
                return -ENOMEM;
 
        memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
 
+       get_device(&dev->ib_dev.dev);
        work->dev = dev;
        work->pf_flags = pf_flags;
        work->num_sge = num_sge;
index dd2ae64..7db778d 100644 (file)
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                }
 
                if (!check_flags_mask(ucmd.flags,
+                                     MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
+                                     MLX5_QP_FLAG_BFREG_INDEX |
+                                     MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
+                                     MLX5_QP_FLAG_SCATTER_CQE |
                                      MLX5_QP_FLAG_SIGNATURE |
-                                             MLX5_QP_FLAG_SCATTER_CQE |
-                                             MLX5_QP_FLAG_TUNNEL_OFFLOADS |
-                                             MLX5_QP_FLAG_BFREG_INDEX |
-                                             MLX5_QP_FLAG_TYPE_DCT |
-                                             MLX5_QP_FLAG_TYPE_DCI |
-                                             MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
-                                             MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
+                                     MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
+                                     MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+                                     MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+                                     MLX5_QP_FLAG_TYPE_DCI |
+                                     MLX5_QP_FLAG_TYPE_DCT))
                        return -EINVAL;
 
                err = get_qp_user_index(to_mucontext(pd->uobject->context),
index 868da0e..445ea19 100644 (file)
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
            opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
                wc.ex.imm_data = ohdr->u.ud.imm_data;
                wc.wc_flags = IB_WC_WITH_IMM;
-               tlen -= sizeof(u32);
        } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
                wc.ex.imm_data = 0;
                wc.wc_flags = 0;
index a1bd8cf..c6cc3e4 100644 (file)
@@ -2910,6 +2910,8 @@ send:
                        goto op_err;
                if (!ret)
                        goto rnr_nak;
+               if (wqe->length > qp->r_len)
+                       goto inv_err;
                break;
 
        case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3078,7 +3080,10 @@ op_err:
        goto err;
 
 inv_err:
-       send_status = IB_WC_REM_INV_REQ_ERR;
+       send_status =
+               sqp->ibqp.qp_type == IB_QPT_RC ?
+                       IB_WC_REM_INV_REQ_ERR :
+                       IB_WC_SUCCESS;
        wc.status = IB_WC_LOC_QP_OP_ERR;
        goto err;
 
index 1da119d..73e808c 100644 (file)
@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
        struct list_head     list;
        struct net_device   *dev;
        struct ipoib_neigh  *neigh;
-       struct ipoib_path   *path;
        struct ipoib_tx_buf *tx_ring;
        unsigned int         tx_head;
        unsigned int         tx_tail;
index 0428e01..aa9dcfc 100644 (file)
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
 
        neigh->cm = tx;
        tx->neigh = neigh;
-       tx->path = path;
        tx->dev = dev;
        list_add(&tx->list, &priv->cm.start_list);
        set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                                neigh->daddr + QPN_AND_OPTIONS_OFFSET);
                        goto free_neigh;
                }
-               memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
+               memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
 
                spin_unlock_irqrestore(&priv->lock, flags);
                netif_tx_unlock_bh(dev);
index bae0822..a7cfab3 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/clk.h>
 
 /*
  * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller.
@@ -75,7 +74,6 @@ struct olpc_apsp {
        struct serio *kbio;
        struct serio *padio;
        void __iomem *base;
-       struct clk *clk;
        int open_count;
        int irq;
 };
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port)
        struct olpc_apsp *priv = port->port_data;
        unsigned int tmp;
        unsigned long l;
-       int error;
 
        if (priv->open_count++ == 0) {
-               error = clk_prepare_enable(priv->clk);
-               if (error)
-                       return error;
-
                l = readl(priv->base + COMMAND_FIFO_STATUS);
                if (!(l & CMD_STS_MASK)) {
                        dev_err(priv->dev, "SP cannot accept commands.\n");
-                       clk_disable_unprepare(priv->clk);
                        return -EIO;
                }
 
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port)
                /* Disable interrupt 0 */
                tmp = readl(priv->base + PJ_INTERRUPT_MASK);
                writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK);
-
-               clk_disable_unprepare(priv->clk);
        }
 }
 
@@ -208,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
        if (priv->irq < 0)
                return priv->irq;
 
-       priv->clk = devm_clk_get(&pdev->dev, "sp");
-       if (IS_ERR(priv->clk))
-               return PTR_ERR(priv->clk);
-
        /* KEYBOARD */
        kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
        if (!kb_serio)
index 87ba23a..2a7b78b 100644 (file)
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
 
 static void do_detach(struct iommu_dev_data *dev_data)
 {
+       struct protection_domain *domain = dev_data->domain;
        struct amd_iommu *iommu;
        u16 alias;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
        alias = dev_data->alias;
 
-       /* decrease reference counters */
-       dev_data->domain->dev_iommu[iommu->index] -= 1;
-       dev_data->domain->dev_cnt                 -= 1;
-
        /* Update data structures */
        dev_data->domain = NULL;
        list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
 
        /* Flush the DTE entry */
        device_flush_dte(dev_data);
+
+       /* Flush IOTLB */
+       domain_flush_tlb_pde(domain);
+
+       /* Wait for the flushes to finish */
+       domain_flush_complete(domain);
+
+       /* decrease reference counters - needs to happen after the flushes */
+       domain->dev_iommu[iommu->index] -= 1;
+       domain->dev_cnt                 -= 1;
 }
 
 /*
@@ -2617,13 +2624,13 @@ out_unmap:
                        bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
                        iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
 
-                       if (--mapped_pages)
+                       if (--mapped_pages == 0)
                                goto out_free_iova;
                }
        }
 
 out_free_iova:
-       free_iova_fast(&dma_dom->iovad, address, npages);
+       free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
 
 out_err:
        return 0;
index 2bd9ac2..1457f93 100644 (file)
@@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
        struct iommu_resv_region *entry, *next;
 
        list_for_each_entry_safe(entry, next, head, list) {
-               if (entry->type == IOMMU_RESV_RESERVED)
+               if (entry->type == IOMMU_RESV_MSI)
                        kfree(entry);
        }
 }
index 730f7da..7e0df67 100644 (file)
@@ -441,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev)
                iommu_spec.args_count = count;
 
                mtk_iommu_create_mapping(dev, &iommu_spec);
+
+               /* dev->iommu_fwspec might have changed */
+               fwspec = dev_iommu_fwspec_get(dev);
+
                of_node_put(iommu_spec.np);
        }
 
index 5385f57..2793333 100644 (file)
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
        unsigned int mask = 1u << d->hwirq;
 
        if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
-                               XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
-               set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
-                                       HW_IRQ_MX_BASE), MIENG);
-       } else {
-               mask = __this_cpu_read(cached_irq_mask) & ~mask;
-               __this_cpu_write(cached_irq_mask, mask);
-               xtensa_set_sr(mask, intenable);
+                   XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+               unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
+
+               if (ext_irq >= HW_IRQ_MX_BASE) {
+                       set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
+                       return;
+               }
        }
+       mask = __this_cpu_read(cached_irq_mask) & ~mask;
+       __this_cpu_write(cached_irq_mask, mask);
+       xtensa_set_sr(mask, intenable);
 }
 
 static void xtensa_mx_irq_unmask(struct irq_data *d)
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
        unsigned int mask = 1u << d->hwirq;
 
        if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
-                               XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
-               set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
-                                       HW_IRQ_MX_BASE), MIENGSET);
-       } else {
-               mask |= __this_cpu_read(cached_irq_mask);
-               __this_cpu_write(cached_irq_mask, mask);
-               xtensa_set_sr(mask, intenable);
+                   XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+               unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
+
+               if (ext_irq >= HW_IRQ_MX_BASE) {
+                       set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
+                       return;
+               }
        }
+       mask |= __this_cpu_read(cached_irq_mask);
+       __this_cpu_write(cached_irq_mask, mask);
+       xtensa_set_sr(mask, intenable);
 }
 
 static void xtensa_mx_irq_enable(struct irq_data *d)
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d)
 
 static int xtensa_mx_irq_retrigger(struct irq_data *d)
 {
-       xtensa_set_sr(1 << d->hwirq, intset);
+       unsigned int mask = 1u << d->hwirq;
+
+       if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
+               return 0;
+       xtensa_set_sr(mask, intset);
        return 1;
 }
 
index c200234..ab12328 100644 (file)
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d)
 
 static int xtensa_irq_retrigger(struct irq_data *d)
 {
-       xtensa_set_sr(1 << d->hwirq, intset);
+       unsigned int mask = 1u << d->hwirq;
+
+       if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
+               return 0;
+       xtensa_set_sr(mask, intset);
        return 1;
 }
 
index ec3a5ef..cbbe6b6 100644 (file)
@@ -1935,12 +1935,14 @@ out:
 }
 
 static struct stripe_head *
-r5c_recovery_alloc_stripe(struct r5conf *conf,
-                         sector_t stripe_sect)
+r5c_recovery_alloc_stripe(
+               struct r5conf *conf,
+               sector_t stripe_sect,
+               int noblock)
 {
        struct stripe_head *sh;
 
-       sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+       sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
        if (!sh)
                return NULL;  /* no more stripe available */
 
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                                stripe_sect);
 
                if (!sh) {
-                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
+                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
                        /*
                         * cannot get stripe from raid5_get_active_stripe
                         * try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                r5c_recovery_replay_stripes(
                                        cached_stripe_list, ctx);
                                sh = r5c_recovery_alloc_stripe(
-                                       conf, stripe_sect);
+                                       conf, stripe_sect, 1);
                        }
                        if (!sh) {
+                               int new_size = conf->min_nr_stripes * 2;
                                pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
                                        mdname(mddev),
-                                       conf->min_nr_stripes * 2);
-                               raid5_set_cache_size(mddev,
-                                                    conf->min_nr_stripes * 2);
-                               sh = r5c_recovery_alloc_stripe(conf,
-                                                              stripe_sect);
+                                       new_size);
+                               ret = raid5_set_cache_size(mddev, new_size);
+                               if (conf->min_nr_stripes <= new_size / 2) {
+                                       pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
+                                               mdname(mddev),
+                                               ret,
+                                               new_size,
+                                               conf->min_nr_stripes,
+                                               conf->max_nr_stripes);
+                                       return -ENOMEM;
+                               }
+                               sh = r5c_recovery_alloc_stripe(
+                                       conf, stripe_sect, 0);
                        }
                        if (!sh) {
                                pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
-                                      mdname(mddev));
+                                       mdname(mddev));
                                return -ENOMEM;
                        }
                        list_add_tail(&sh->lru, cached_stripe_list);
index 4990f03..cecea90 100644 (file)
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
 int
 raid5_set_cache_size(struct mddev *mddev, int size)
 {
+       int result = 0;
        struct r5conf *conf = mddev->private;
 
        if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
 
        mutex_lock(&conf->cache_size_mutex);
        while (size > conf->max_nr_stripes)
-               if (!grow_one_stripe(conf, GFP_KERNEL))
+               if (!grow_one_stripe(conf, GFP_KERNEL)) {
+                       conf->min_nr_stripes = conf->max_nr_stripes;
+                       result = -ENOMEM;
                        break;
+               }
        mutex_unlock(&conf->cache_size_mutex);
 
-       return 0;
+       return result;
 }
 EXPORT_SYMBOL(raid5_set_cache_size);
 
index f461460..76f9909 100644 (file)
@@ -1419,7 +1419,7 @@ config MFD_TPS65217
 
 config MFD_TPS68470
        bool "TI TPS68470 Power Management / LED chips"
-       depends on ACPI && I2C=y
+       depends on ACPI && PCI && I2C=y
        select MFD_CORE
        select REGMAP_I2C
        select I2C_DESIGNWARE_PLATFORM
index 5029352..c9e7aa5 100644 (file)
@@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev)
 
 err:
        dev_dbg(dev, "%s -> err %d\n", __func__, ret);
+       if (host->dma_chan_rxtx)
+               dma_release_channel(host->dma_chan_rxtx);
        mmc_free_host(mmc);
 
        return ret;
index 8afeaf8..833ef05 100644 (file)
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
 
        if (timing == MMC_TIMING_MMC_HS400 &&
            host->dev_comp->hs400_tune)
-               sdr_set_field(host->base + PAD_CMD_TUNE,
+               sdr_set_field(host->base + tune_reg,
                              MSDC_PAD_TUNE_CMDRRDLY,
                              host->hs400_cmd_int_delay);
        dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
index a0f954f..44e6c7b 100644 (file)
@@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser)
                if (skb->len == 0) {
                        struct sk_buff *tmp = skb_dequeue(&ser->head);
                        WARN_ON(tmp != skb);
-                       if (in_interrupt())
-                               dev_kfree_skb_irq(skb);
-                       else
-                               kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                }
        }
        /* Send flow off if queue is empty */
index 2caa8c8..1bfc5ff 100644 (file)
@@ -664,7 +664,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
        if (port < 9)
                return 0;
 
-       return mv88e6390_serdes_irq_setup(chip, port);
+       return mv88e6390x_serdes_irq_setup(chip, port);
 }
 
 void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
index 4f11f98..1827ef1 100644 (file)
@@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev,
                if (skb) {
                        dev->stats.tx_packets++;
                        dev->stats.tx_bytes += skb->len;
-                       dev_kfree_skb_irq(skb);
+                       dev_consume_skb_irq(skb);
                        info->skb = NULL;
                }
 
index 0fb986b..0ae723f 100644 (file)
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
                        & 0xffff;
 
        if (inuse) { /* Tx FIFO is not empty */
-               ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+               ready = max_t(int,
+                             priv->tx_prod - priv->tx_cons - inuse - 1, 0);
        } else {
                /* Check for buffered last packet */
                status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
index a90080f..e548c0a 100644 (file)
@@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev)
                        pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
                                        lp->tx_skbuff[tx_index]->len,
                                        PCI_DMA_TODEVICE);
-                       dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
+                       dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
                        lp->tx_skbuff[tx_index] = NULL;
                        lp->tx_dma_addr[tx_index] = 0;
                }
index 6a8e256..4d3855c 100644 (file)
@@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
 
                if (bp->tx_bufs[bp->tx_empty]) {
                        ++dev->stats.tx_packets;
-                       dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
+                       dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
                }
                bp->tx_bufs[bp->tx_empty] = NULL;
                bp->tx_fullup = 0;
index f448089..97ab0dd 100644 (file)
@@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp)
                bytes_compl += skb->len;
                pkts_compl++;
 
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
        }
 
        netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
@@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
 
                skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
-               dev_kfree_skb_any(skb);
+               dev_consume_skb_any(skb);
                skb = bounce_skb;
        }
 
index 3d45f4c..9bbaad9 100644 (file)
 #define MACB_CAPS_JUMBO                                0x00000020
 #define MACB_CAPS_GEM_HAS_PTP                  0x00000040
 #define MACB_CAPS_BD_RD_PREFETCH               0x00000080
+#define MACB_CAPS_NEEDS_RSTONUBR               0x00000100
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
@@ -1214,6 +1215,8 @@ struct macb {
 
        int     rx_bd_rd_prefetch;
        int     tx_bd_rd_prefetch;
+
+       u32     rx_intr_mask;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
index 66cc792..2b28826 100644 (file)
@@ -56,8 +56,7 @@
 /* level of occupied TX descriptors under which we wake up TX process */
 #define MACB_TX_WAKEUP_THRESH(bp)      (3 * (bp)->tx_ring_size / 4)
 
-#define MACB_RX_INT_FLAGS      (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
-                                | MACB_BIT(ISR_ROVR))
+#define MACB_RX_INT_FLAGS      (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
 #define MACB_TX_ERR_FLAGS      (MACB_BIT(ISR_TUND)                     \
                                        | MACB_BIT(ISR_RLE)             \
                                        | MACB_BIT(TXERR))
@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
                                queue_writel(queue, ISR, MACB_BIT(RCOMP));
                        napi_reschedule(napi);
                } else {
-                       queue_writel(queue, IER, MACB_RX_INT_FLAGS);
+                       queue_writel(queue, IER, bp->rx_intr_mask);
                }
        }
 
@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data)
        u32 ctrl;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+               queue_writel(queue, IDR, bp->rx_intr_mask |
                                         MACB_TX_INT_FLAGS |
                                         MACB_BIT(HRESP));
        }
@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data)
 
                /* Enable interrupts */
                queue_writel(queue, IER,
-                            MACB_RX_INT_FLAGS |
+                            bp->rx_intr_mask |
                             MACB_TX_INT_FLAGS |
                             MACB_BIT(HRESP));
        }
@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                            (unsigned int)(queue - bp->queues),
                            (unsigned long)status);
 
-               if (status & MACB_RX_INT_FLAGS) {
+               if (status & bp->rx_intr_mask) {
                        /* There's no point taking any more interrupts
                         * until we have processed the buffers. The
                         * scheduling call may fail if the poll routine
                         * is already scheduled, so disable interrupts
                         * now.
                         */
-                       queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
+                       queue_writel(queue, IDR, bp->rx_intr_mask);
                        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
                                queue_writel(queue, ISR, MACB_BIT(RCOMP));
 
@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                /* There is a hardware issue under heavy load where DMA can
                 * stop, this causes endless "used buffer descriptor read"
                 * interrupts but it can be cleared by re-enabling RX. See
-                * the at91 manual, section 41.3.1 or the Zynq manual
-                * section 16.7.4 for details.
+                * the at91rm9200 manual, section 41.3.1 or the Zynq manual
+                * section 16.7.4 for details. RXUBR is only enabled for
+                * these two versions.
                 */
                if (status & MACB_BIT(RXUBR)) {
                        ctrl = macb_readl(bp, NCR);
@@ -2259,7 +2259,7 @@ static void macb_init_hw(struct macb *bp)
 
                /* Enable interrupts */
                queue_writel(queue, IER,
-                            MACB_RX_INT_FLAGS |
+                            bp->rx_intr_mask |
                             MACB_TX_INT_FLAGS |
                             MACB_BIT(HRESP));
        }
@@ -3907,6 +3907,7 @@ static const struct macb_config sama5d4_config = {
 };
 
 static const struct macb_config emac_config = {
+       .caps = MACB_CAPS_NEEDS_RSTONUBR,
        .clk_init = at91ether_clk_init,
        .init = at91ether_init,
 };
@@ -3928,7 +3929,8 @@ static const struct macb_config zynqmp_config = {
 };
 
 static const struct macb_config zynq_config = {
-       .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
+       .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
+               MACB_CAPS_NEEDS_RSTONUBR,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -4083,6 +4085,10 @@ static int macb_probe(struct platform_device *pdev)
                                                macb_dma_desc_get_size(bp);
        }
 
+       bp->rx_intr_mask = MACB_RX_INT_FLAGS;
+       if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
+               bp->rx_intr_mask |= MACB_BIT(RXUBR);
+
        mac = of_get_mac_address(np);
        if (mac) {
                ether_addr_copy(bp->dev->dev_addr, mac);
index 5b33238..60e7d7a 100644 (file)
@@ -2418,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
 out_notify_fail:
        (void)cancel_work_sync(&priv->service_task);
 out_read_prop_fail:
+       /* safe for ACPI FW */
+       of_node_put(to_of_node(priv->fwnode));
        free_netdev(ndev);
        return ret;
 }
@@ -2447,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
        set_bit(NIC_STATE_REMOVING, &priv->state);
        (void)cancel_work_sync(&priv->service_task);
 
+       /* safe for ACPI FW */
+       of_node_put(to_of_node(priv->fwnode));
+
        free_netdev(ndev);
        return 0;
 }
index 8e9b958..ce15d23 100644 (file)
@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
  */
 static int hns_nic_nway_reset(struct net_device *netdev)
 {
-       int ret = 0;
        struct phy_device *phy = netdev->phydev;
 
-       if (netif_running(netdev)) {
-               /* if autoneg is disabled, don't restart auto-negotiation */
-               if (phy && phy->autoneg == AUTONEG_ENABLE)
-                       ret = genphy_restart_aneg(phy);
-       }
+       if (!netif_running(netdev))
+               return 0;
 
-       return ret;
+       if (!phy)
+               return -EOPNOTSUPP;
+
+       if (phy->autoneg != AUTONEG_ENABLE)
+               return -EINVAL;
+
+       return genphy_restart_aneg(phy);
 }
 
 static u32
index 017e084..baf5cc2 100644 (file)
@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
                }
 
                hns_mdio_cmd_write(mdio_dev, is_c45,
-                                  MDIO_C45_WRITE_ADDR, phy_id, devad);
+                                  MDIO_C45_READ, phy_id, devad);
        }
 
        /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
index d719668..9292975 100644 (file)
@@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
                                                dev->stats.tx_aborted_errors++;
                                }
 
-                               dev_kfree_skb_irq(skb);
+                               dev_consume_skb_irq(skb);
 
                                tx_cmd->cmd.command = 0; /* Mark free */
                                break;
index 8cfd2ec..01819e5 100644 (file)
@@ -950,7 +950,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
                __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
 
        return 0;
index 0473621..f2573c2 100644 (file)
@@ -1126,9 +1126,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep = rpriv->rep;
-       int ret;
+       int ret, pf_num;
+
+       ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
+       if (ret)
+               return ret;
+
+       if (rep->vport == FDB_UPLINK_VPORT)
+               ret = snprintf(buf, len, "p%d", pf_num);
+       else
+               ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
 
-       ret = snprintf(buf, len, "%d", rep->vport - 1);
        if (ret >= len)
                return -EOPNOTSUPP;
 
@@ -1285,6 +1293,18 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
        return 0;
 }
 
+static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+                                       __be16 vlan_proto)
+{
+       netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
+
+       if (vlan != 0)
+               return -EOPNOTSUPP;
+
+       /* allow setting 0-vid for compatibility with libvirt */
+       return 0;
+}
+
 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
        .switchdev_port_attr_get        = mlx5e_attr_get,
 };
@@ -1319,6 +1339,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
        .ndo_set_vf_rate         = mlx5e_set_vf_rate,
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
+       .ndo_set_vf_vlan         = mlx5e_uplink_rep_set_vf_vlan,
 };
 
 bool mlx5e_eswitch_rep(struct net_device *netdev)
index a44ea7b..5b492b6 100644 (file)
@@ -1134,13 +1134,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
        int err = 0;
        u8 *smac_v;
 
-       if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
-               mlx5_core_warn(esw->dev,
-                              "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
-                              vport->vport);
-               return -EPERM;
-       }
-
        esw_vport_cleanup_ingress_rules(esw, vport);
 
        if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1728,7 +1721,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        int vport_num;
        int err;
 
-       if (!MLX5_ESWITCH_MANAGER(dev))
+       if (!MLX5_VPORT_MANAGER(dev))
                return 0;
 
        esw_info(dev,
@@ -1797,7 +1790,7 @@ abort:
 
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 {
-       if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
+       if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
                return;
 
        esw_info(esw->dev, "cleanup\n");
@@ -1827,13 +1820,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        mutex_lock(&esw->state_lock);
        evport = &esw->vports[vport];
 
-       if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
+       if (evport->info.spoofchk && !is_valid_ether_addr(mac))
                mlx5_core_warn(esw->dev,
-                              "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+                              "Set invalid MAC while spoofchk is on, vport(%d)\n",
                               vport);
-               err = -EPERM;
-               goto unlock;
-       }
 
        err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
        if (err) {
@@ -1979,6 +1969,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
        evport = &esw->vports[vport];
        pschk = evport->info.spoofchk;
        evport->info.spoofchk = spoofchk;
+       if (pschk && !is_valid_ether_addr(evport->info.mac))
+               mlx5_core_warn(esw->dev,
+                              "Spoofchk in set while MAC is invalid, vport(%d)\n",
+                              evport->vport);
        if (evport->enabled && esw->mode == SRIOV_LEGACY)
                err = esw_vport_ingress_config(esw, evport);
        if (err)
index 3a6baed..2d22338 100644 (file)
@@ -616,6 +616,27 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
        }
 }
 
+int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
+{
+       struct mlx5_lag *ldev;
+       int n;
+
+       ldev = mlx5_lag_dev_get(dev);
+       if (!ldev) {
+               mlx5_core_warn(dev, "no lag device, can't get pf num\n");
+               return -EINVAL;
+       }
+
+       for (n = 0; n < MLX5_MAX_PORTS; n++)
+               if (ldev->pf[n].dev == dev) {
+                       *pf_num = n;
+                       return 0;
+               }
+
+       mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
+       return -EINVAL;
+}
+
 /* Must be called with intf_mutex held */
 void mlx5_lag_remove(struct mlx5_core_dev *dev)
 {
index c68dcea..5300b0b 100644 (file)
@@ -187,6 +187,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
                    MLX5_CAP_GEN(dev, lag_master);
 }
 
+int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
+
 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
 void mlx5_lag_update(struct mlx5_core_dev *dev);
 
index 388f205..370ca94 100644 (file)
@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *
 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
 {
        struct mlx5_core_rsc_common *common;
+       unsigned long flags;
 
-       spin_lock(&table->lock);
+       spin_lock_irqsave(&table->lock, flags);
 
        common = radix_tree_lookup(&table->tree, rsn);
        if (common)
                atomic_inc(&common->refcount);
 
-       spin_unlock(&table->lock);
+       spin_unlock_irqrestore(&table->lock, flags);
 
        return common;
 }
index 8f65514..2ecaaaa 100644 (file)
@@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
 
 /* get pq index according to PQ_FLAGS */
 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
-                                          u32 pq_flags)
+                                          unsigned long pq_flags)
 {
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 
        /* Can't have multiple flags set here */
-       if (bitmap_weight((unsigned long *)&pq_flags,
+       if (bitmap_weight(&pq_flags,
                          sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
-               DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+               DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
                goto err;
        }
 
        if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
-               DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
+               DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
                goto err;
        }
 
index 67c02ea..e68ca83 100644 (file)
@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
                          (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
                           !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
 
+               SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+                         (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+                          !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
                SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
                          !!(accept_filter & QED_ACCEPT_BCAST));
 
@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
                return rc;
        }
 
+       if (p_params->update_ctl_frame_check) {
+               p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+               p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+       }
+
        /* Update mcast bins for VFs, PF doesn't use this functionality */
        qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
 
@@ -2688,7 +2697,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
        if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
                accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
                                                 QED_ACCEPT_MCAST_UNMATCHED;
-               accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+               accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+                                                QED_ACCEPT_MCAST_UNMATCHED;
        } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
                accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
                accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
index 8d80f10..7127d5a 100644 (file)
@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
        struct qed_rss_params           *rss_params;
        struct qed_filter_accept_flags  accept_flags;
        struct qed_sge_tpa_params       *sge_tpa_params;
+       u8                              update_ctl_frame_check;
+       u8                              mac_chk_en;
+       u8                              ethtype_chk_en;
 };
 
 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
index d9237c6..b5f419b 100644 (file)
@@ -2451,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
 {
        struct qed_ll2_tx_pkt_info pkt;
        const skb_frag_t *frag;
+       u8 flags = 0, nr_frags;
        int rc = -EINVAL, i;
        dma_addr_t mapping;
        u16 vlan = 0;
-       u8 flags = 0;
 
        if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
                DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
                return -EINVAL;
        }
 
-       if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+       /* Cache number of fragments from SKB since SKB may be freed by
+        * the completion routine after calling qed_ll2_prepare_tx_packet()
+        */
+       nr_frags = skb_shinfo(skb)->nr_frags;
+
+       if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
                DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
-                      1 + skb_shinfo(skb)->nr_frags);
+                      1 + nr_frags);
                return -EINVAL;
        }
 
@@ -2485,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
        }
 
        memset(&pkt, 0, sizeof(pkt));
-       pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+       pkt.num_of_bds = 1 + nr_frags;
        pkt.vlan = vlan;
        pkt.bd_flags = flags;
        pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2496,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
            test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
                pkt.remove_stag = true;
 
+       /* qed_ll2_prepare_tx_packet() may actually send the packet if
+        * there are no fragments in the skb and subsequently the completion
+        * routine may run and free the SKB, so no dereferencing the SKB
+        * beyond this point unless skb has any fragments.
+        */
        rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
                                       &pkt, 1);
        if (rc)
                goto err;
 
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+       for (i = 0; i < nr_frags; i++) {
                frag = &skb_shinfo(skb)->frags[i];
 
                mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
index ca6290f..71a7af1 100644 (file)
@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
        params.vport_id = vf->vport_id;
        params.max_buffers_per_cqe = start->max_buffers_per_cqe;
        params.mtu = vf->mtu;
-       params.check_mac = true;
+
+       /* Non trusted VFs should enable control frame filtering */
+       params.check_mac = !vf->p_vf_info.is_trusted_configured;
 
        rc = qed_sp_eth_vport_start(p_hwfn, &params);
        if (rc) {
@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
                params.opaque_fid = vf->opaque_fid;
                params.vport_id = vf->vport_id;
 
+               params.update_ctl_frame_check = 1;
+               params.mac_chk_en = !vf_info->is_trusted_configured;
+
                if (vf_info->rx_accept_mode & mask) {
                        flags->update_rx_mode_config = 1;
                        flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
                }
 
                if (flags->update_rx_mode_config ||
-                   flags->update_tx_mode_config)
+                   flags->update_tx_mode_config ||
+                   params.update_ctl_frame_check)
                        qed_sp_vport_update(hwfn, &params,
                                            QED_SPQ_MODE_EBLOCK, NULL);
        }
index b6cccf4..5dda547 100644 (file)
@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
        struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
        struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
        struct vf_pf_resc_request *p_resc;
+       u8 retry_cnt = VF_ACQUIRE_THRESH;
        bool resources_acquired = false;
        struct vfpf_acquire_tlv *req;
        int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
 
                /* send acquire request */
                rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+               /* Re-try acquire in case of vf-pf hw channel timeout */
+               if (retry_cnt && rc == -EBUSY) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "VF retrying to acquire due to VPC timeout\n");
+                       retry_cnt--;
+                       continue;
+               }
+
                if (rc)
                        goto exit;
 
index 44f6e48..4f910c4 100644 (file)
@@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp)
                        }
                        bytes_compl += skb->len;
                        pkts_compl++;
-                       dev_kfree_skb_irq(skb);
+                       dev_consume_skb_irq(skb);
                }
 
                cp->tx_skb[tx_tail] = NULL;
index 7b92336..3b174ea 100644 (file)
@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
        }
 
        ret = phy_power_on(bsp_priv, true);
-       if (ret)
+       if (ret) {
+               gmac_clk_enable(bsp_priv, false);
                return ret;
+       }
 
        pm_runtime_enable(dev);
        pm_runtime_get_sync(dev);
index 810dfc7..e2d47b2 100644 (file)
@@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
                        netdev_dbg(dev, "sent 0x%p, len=%d\n",
                                   desc->skb, desc->skb->len);
 
-               dev_kfree_skb_irq(desc->skb);
+               dev_consume_skb_irq(desc->skb);
                desc->skb = NULL;
                if (__netif_subqueue_stopped(dev, queue))
                        netif_wake_subqueue(dev, queue);
index 52e47da..80f8430 100644 (file)
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev)
        imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
        if (IS_ERR(imx6_pcie->pd_pcie))
                return PTR_ERR(imx6_pcie->pd_pcie);
+       /* Do nothing when power domain missing */
+       if (!imx6_pcie->pd_pcie)
+               return 0;
        link = device_link_add(dev, imx6_pcie->pd_pcie,
                        DL_FLAG_STATELESS |
                        DL_FLAG_PM_RUNTIME |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev)
        if (IS_ERR(imx6_pcie->pd_pcie_phy))
                return PTR_ERR(imx6_pcie->pd_pcie_phy);
 
-       device_link_add(dev, imx6_pcie->pd_pcie_phy,
+       link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
                        DL_FLAG_STATELESS |
                        DL_FLAG_PM_RUNTIME |
                        DL_FLAG_RPM_ACTIVE);
-       if (IS_ERR(link)) {
-               dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
-               return PTR_ERR(link);
+       if (!link) {
+               dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+               return -EINVAL;
        }
 
        return 0;
index b171b6b..0c389a3 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/resource.h>
 #include <linux/of_pci.h>
 #include <linux/of_irq.h>
-#include <linux/gpio/consumer.h>
 
 #include "pcie-designware.h"
 
@@ -30,7 +29,6 @@ struct armada8k_pcie {
        struct dw_pcie *pci;
        struct clk *clk;
        struct clk *clk_reg;
-       struct gpio_desc *reset_gpio;
 };
 
 #define PCIE_VENDOR_REGS_OFFSET                0x8000
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
        struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
 
-       if (pcie->reset_gpio) {
-               /* assert and then deassert the reset signal */
-               gpiod_set_value_cansleep(pcie->reset_gpio, 1);
-               msleep(100);
-               gpiod_set_value_cansleep(pcie->reset_gpio, 0);
-       }
        dw_pcie_setup_rc(pp);
        armada8k_pcie_establish_link(pcie);
 
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
                goto fail_clkreg;
        }
 
-       /* Get reset gpio signal and hold asserted (logically high) */
-       pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
-                                                  GPIOD_OUT_HIGH);
-       if (IS_ERR(pcie->reset_gpio)) {
-               ret = PTR_ERR(pcie->reset_gpio);
-               goto fail_clkreg;
-       }
-
        platform_set_drvdata(pdev, pcie);
 
        ret = armada8k_add_pcie_port(pcie, pdev);
index 5e2109c..b5e9db8 100644 (file)
@@ -905,6 +905,7 @@ config TOSHIBA_WMI
 config ACPI_CMPC
        tristate "CMPC Laptop Extras"
        depends on ACPI && INPUT
+       depends on BACKLIGHT_LCD_SUPPORT
        depends on RFKILL || RFKILL=n
        select BACKLIGHT_CLASS_DEVICE
        help
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL
 config SAMSUNG_Q10
        tristate "Samsung Q10 Extras"
        depends on ACPI
+       depends on BACKLIGHT_LCD_SUPPORT
        select BACKLIGHT_CLASS_DEVICE
        ---help---
          This driver provides support for backlight control on Samsung Q10
index 9cf30d1..e390f8c 100644 (file)
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
                goto failed;
 
        /* report size limit per scatter-gather segment */
-       adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
        adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 
        adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
index 00acc71..f4f6a07 100644 (file)
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
        .max_sectors             = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
                                     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
                                   /* GCD, adjusted later */
+       /* report size limit per scatter-gather segment */
+       .max_segment_size        = ZFCP_QDIO_SBALE_LEN,
        .dma_boundary            = ZFCP_QDIO_SBALE_LEN - 1,
        .shost_attrs             = zfcp_sysfs_shost_attrs,
        .sdev_attrs              = zfcp_sysfs_sdev_attrs,
index 128d658..16957d7 100644 (file)
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
        if(tpnt->sdev_attrs == NULL)
                tpnt->sdev_attrs = NCR_700_dev_attrs;
 
-       memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+       memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
                                 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
        if(memory == NULL) {
                printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
index 350257c..bc9f2a2 100644 (file)
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
                return NULL;
        }
 
+       cmgr->hba = hba;
        cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
                                  GFP_KERNEL);
        if (!cmgr->free_list) {
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
                goto mem_err;
        }
 
-       cmgr->hba = hba;
        cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
 
        for (i = 0; i < arr_sz; i++)  {
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
 
        /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
        mem_size = num_ios * sizeof(struct io_bdt *);
-       cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+       cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
        if (!cmgr->io_bdt_pool) {
                printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
                goto mem_err;
index be83590..ff943f4 100644 (file)
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
            fc_frame_payload_op(fp) != ELS_LS_ACC) {
                FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
                fc_lport_error(lport, fp);
-               goto err;
+               goto out;
        }
 
        flp = fc_frame_payload_get(fp, sizeof(*flp));
        if (!flp) {
                FC_LPORT_DBG(lport, "FLOGI bad response\n");
                fc_lport_error(lport, fp);
-               goto err;
+               goto out;
        }
 
        mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
                FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
                             "lport->mfs:%hu\n", mfs, lport->mfs);
                fc_lport_error(lport, fp);
-               goto err;
+               goto out;
        }
 
        if (mfs <= lport->mfs) {
index 661512b..e27f4df 100644 (file)
@@ -62,7 +62,7 @@
 
 /* make sure inq_product_rev string corresponds to this version */
 #define SDEBUG_VERSION "0188"  /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20180128";
+static const char *sdebug_version_date = "20190125";
 
 #define MY_NAME "scsi_debug"
 
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
                (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 }
 
-static void *fake_store(unsigned long long lba)
+static void *lba2fake_store(unsigned long long lba)
 {
        lba = do_div(lba, sdebug_store_sectors);
 
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
        return ret;
 }
 
-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into fake_store(lba,num) and return true. If comparison fails then
+/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
  * return false. */
 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
 {
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
                if (sdt->app_tag == cpu_to_be16(0xffff))
                        continue;
 
-               ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+               ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
                if (ret) {
                        dif_errors++;
                        return ret;
@@ -3261,10 +3261,12 @@ err_out:
 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
                           u32 ei_lba, bool unmap, bool ndob)
 {
+       int ret;
        unsigned long iflags;
        unsigned long long i;
-       int ret;
-       u64 lba_off;
+       u32 lb_size = sdebug_sector_size;
+       u64 block, lbaa;
+       u8 *fs1p;
 
        ret = check_device_access_params(scp, lba, num);
        if (ret)
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
                unmap_region(lba, num);
                goto out;
        }
-
-       lba_off = lba * sdebug_sector_size;
+       lbaa = lba;
+       block = do_div(lbaa, sdebug_store_sectors);
        /* if ndob then zero 1 logical block, else fetch 1 logical block */
+       fs1p = fake_storep + (block * lb_size);
        if (ndob) {
-               memset(fake_storep + lba_off, 0, sdebug_sector_size);
+               memset(fs1p, 0, lb_size);
                ret = 0;
        } else
-               ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
-                                         sdebug_sector_size);
+               ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
 
        if (-1 == ret) {
                write_unlock_irqrestore(&atomic_rw, iflags);
                return DID_ERROR << 16;
-       } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
+       } else if (sdebug_verbose && !ndob && (ret < lb_size))
                sdev_printk(KERN_INFO, scp->device,
                            "%s: %s: lb size=%u, IO sent=%d bytes\n",
-                           my_name, "write same",
-                           sdebug_sector_size, ret);
+                           my_name, "write same", lb_size, ret);
 
        /* Copy first sector to remaining blocks */
-       for (i = 1 ; i < num ; i++)
-               memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
-                      fake_storep + lba_off,
-                      sdebug_sector_size);
-
+       for (i = 1 ; i < num ; i++) {
+               lbaa = lba + i;
+               block = do_div(lbaa, sdebug_store_sectors);
+               memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+       }
        if (scsi_debug_lbp())
                map_region(lba, num);
 out:
index e1a551a..ce81523 100644 (file)
 #include <linux/serial_core.h>
 #include <asm/sbi.h>
 
-static void sbi_console_write(struct console *con,
-                             const char *s, unsigned int n)
+static void sbi_putc(struct uart_port *port, int c)
 {
-       int i;
+       sbi_console_putchar(c);
+}
 
-       for (i = 0; i < n; ++i)
-               sbi_console_putchar(s[i]);
+static void sbi_console_write(struct console *con,
+                             const char *s, unsigned n)
+{
+       struct earlycon_device *dev = con->data;
+       uart_console_write(&dev->port, s, n, sbi_putc);
 }
 
 static int __init early_sbi_setup(struct earlycon_device *device,
index bca86bf..df51a35 100644 (file)
@@ -1337,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
                n->vqs[i].rx_ring = NULL;
                vhost_net_buf_init(&n->vqs[i].rxq);
        }
-       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+                      UIO_MAXIOV + VHOST_NET_BATCH);
 
        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
        vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
index 344684f..23593cb 100644 (file)
@@ -1627,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                vqs[i] = &vs->vqs[i].vq;
                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
-       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
 
        vhost_scsi_init_inflight(vs, NULL);
 
index 15a216c..24a129f 100644 (file)
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
                vq->indirect = kmalloc_array(UIO_MAXIOV,
                                             sizeof(*vq->indirect),
                                             GFP_KERNEL);
-               vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
+               vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
                                        GFP_KERNEL);
-               vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
+               vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
                                          GFP_KERNEL);
                if (!vq->indirect || !vq->log || !vq->heads)
                        goto err_nomem;
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 }
 
 void vhost_dev_init(struct vhost_dev *dev,
-                   struct vhost_virtqueue **vqs, int nvqs)
+                   struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
 {
        struct vhost_virtqueue *vq;
        int i;
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
        dev->iotlb = NULL;
        dev->mm = NULL;
        dev->worker = NULL;
+       dev->iov_limit = iov_limit;
        init_llist_head(&dev->work_list);
        init_waitqueue_head(&dev->wait);
        INIT_LIST_HEAD(&dev->read_list);
index 1b675da..9490e7d 100644 (file)
@@ -170,9 +170,11 @@ struct vhost_dev {
        struct list_head read_list;
        struct list_head pending_list;
        wait_queue_head_t wait;
+       int iov_limit;
 };
 
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+                   int nvqs, int iov_limit);
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
index 3fbc068..bb5fc0e 100644 (file)
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
        vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
        vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
 
-       vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+       vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
 
        file->private_data = vsock;
        spin_lock_init(&vsock->send_pkt_list_lock);
index d441244..28d9c2b 100644 (file)
@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
        pkt.len = dentry->d_name.len;
        memcpy(pkt.name, dentry->d_name.name, pkt.len);
        pkt.name[pkt.len] = '\0';
-       dput(dentry);
 
        if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
                ret = -EFAULT;
@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
        complete_all(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
 
+       dput(dentry);
+
        return ret;
 }
 
index 0e8ea2d..078992e 100644 (file)
@@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
        }
        root_inode = autofs_get_inode(s, S_IFDIR | 0755);
        root = d_make_root(root_inode);
-       if (!root)
+       if (!root) {
+               ret = -ENOMEM;
                goto fail_ino;
+       }
        pipe = NULL;
 
        root->d_fsdata = ino;
index f64aad6..5a6c39b 100644 (file)
@@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+static struct extent_buffer *alloc_tree_block_no_bg_flush(
+                                         struct btrfs_trans_handle *trans,
+                                         struct btrfs_root *root,
+                                         u64 parent_start,
+                                         const struct btrfs_disk_key *disk_key,
+                                         int level,
+                                         u64 hint,
+                                         u64 empty_size)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct extent_buffer *ret;
+
+       /*
+        * If we are COWing a node/leaf from the extent, chunk, device or free
+        * space trees, make sure that we do not finish block group creation of
+        * pending block groups. We do this to avoid a deadlock.
+        * COWing can result in allocation of a new chunk, and flushing pending
+        * block groups (btrfs_create_pending_block_groups()) can be triggered
+        * when finishing allocation of a new chunk. Creation of a pending block
+        * group modifies the extent, chunk, device and free space trees,
+        * therefore we could deadlock with ourselves since we are holding a
+        * lock on an extent buffer that btrfs_create_pending_block_groups() may
+        * try to COW later.
+        * For similar reasons, we also need to delay flushing pending block
+        * groups when splitting a leaf or node, from one of those trees, since
+        * we are holding a write lock on it and its parent or when inserting a
+        * new root node for one of those trees.
+        */
+       if (root == fs_info->extent_root ||
+           root == fs_info->chunk_root ||
+           root == fs_info->dev_root ||
+           root == fs_info->free_space_root)
+               trans->can_flush_pending_bgs = false;
+
+       ret = btrfs_alloc_tree_block(trans, root, parent_start,
+                                    root->root_key.objectid, disk_key, level,
+                                    hint, empty_size);
+       trans->can_flush_pending_bgs = true;
+
+       return ret;
+}
+
 /*
  * does the dirty work in cow of a single block.  The parent block (if
  * supplied) is updated to point to the new cow copy.  The new buffer is marked
@@ -1015,28 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
        if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
                parent_start = parent->start;
 
-       /*
-        * If we are COWing a node/leaf from the extent, chunk, device or free
-        * space trees, make sure that we do not finish block group creation of
-        * pending block groups. We do this to avoid a deadlock.
-        * COWing can result in allocation of a new chunk, and flushing pending
-        * block groups (btrfs_create_pending_block_groups()) can be triggered
-        * when finishing allocation of a new chunk. Creation of a pending block
-        * group modifies the extent, chunk, device and free space trees,
-        * therefore we could deadlock with ourselves since we are holding a
-        * lock on an extent buffer that btrfs_create_pending_block_groups() may
-        * try to COW later.
-        */
-       if (root == fs_info->extent_root ||
-           root == fs_info->chunk_root ||
-           root == fs_info->dev_root ||
-           root == fs_info->free_space_root)
-               trans->can_flush_pending_bgs = false;
-
-       cow = btrfs_alloc_tree_block(trans, root, parent_start,
-                       root->root_key.objectid, &disk_key, level,
-                       search_start, empty_size);
-       trans->can_flush_pending_bgs = true;
+       cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
+                                          level, search_start, empty_size);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -3345,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        else
                btrfs_node_key(lower, &lower_key, 0);
 
-       c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-                                  &lower_key, level, root->node->start, 0);
+       c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
+                                        root->node->start, 0);
        if (IS_ERR(c))
                return PTR_ERR(c);
 
@@ -3475,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        mid = (c_nritems + 1) / 2;
        btrfs_node_key(c, &disk_key, mid);
 
-       split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-                       &disk_key, level, c->start, 0);
+       split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
+                                            c->start, 0);
        if (IS_ERR(split))
                return PTR_ERR(split);
 
@@ -4260,8 +4282,8 @@ again:
        else
                btrfs_item_key(l, &disk_key, mid);
 
-       right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-                       &disk_key, 0, l->start, 0);
+       right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
+                                            l->start, 0);
        if (IS_ERR(right))
                return PTR_ERR(right);
 
index c5586ff..0a3f122 100644 (file)
@@ -1621,6 +1621,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                                flags | SB_RDONLY, device_name, data);
                        if (IS_ERR(mnt_root)) {
                                root = ERR_CAST(mnt_root);
+                               kfree(subvol_name);
                                goto out;
                        }
 
@@ -1630,12 +1631,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                        if (error < 0) {
                                root = ERR_PTR(error);
                                mntput(mnt_root);
+                               kfree(subvol_name);
                                goto out;
                        }
                }
        }
        if (IS_ERR(mnt_root)) {
                root = ERR_CAST(mnt_root);
+               kfree(subvol_name);
                goto out;
        }
 
index 127fa15..4ec2b66 100644 (file)
@@ -850,14 +850,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_trans_release_chunk_metadata(trans);
 
-       if (lock && should_end_transaction(trans) &&
-           READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
-               spin_lock(&info->trans_lock);
-               if (cur_trans->state == TRANS_STATE_RUNNING)
-                       cur_trans->state = TRANS_STATE_BLOCKED;
-               spin_unlock(&info->trans_lock);
-       }
-
        if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
                if (throttle)
                        return btrfs_commit_transaction(trans);
@@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 }
 
+/*
+ * Release reserved delayed ref space of all pending block groups of the
+ * transaction and remove them from the list
+ */
+static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
+{
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+       struct btrfs_block_group_cache *block_group, *tmp;
+
+       list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+               btrfs_delayed_refs_rsv_release(fs_info, 1);
+               list_del_init(&block_group->bg_list);
+       }
+}
+
 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
        /*
@@ -2270,6 +2277,7 @@ scrub_continue:
        btrfs_scrub_continue(fs_info);
 cleanup_transaction:
        btrfs_trans_release_metadata(trans);
+       btrfs_cleanup_pending_block_groups(trans);
        btrfs_trans_release_chunk_metadata(trans);
        trans->block_rsv = NULL;
        btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
index 3e4f8f8..1556192 100644 (file)
@@ -957,11 +957,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                else
                        fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
 
-               fs_devices->fsid_change = fsid_change_in_progress;
-
                if (IS_ERR(fs_devices))
                        return ERR_CAST(fs_devices);
 
+               fs_devices->fsid_change = fsid_change_in_progress;
+
                mutex_lock(&fs_devices->device_list_mutex);
                list_add(&fs_devices->fs_list, &fs_uuids);
 
index d1f9c2f..7652551 100644 (file)
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.16"
+#define CIFS_VERSION   "2.17"
 #endif                         /* _CIFSFS_H */
index 2c7689f..659ce1b 100644 (file)
@@ -2696,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
 
                        rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
                        if (rc) {
+                               kvfree(wdata->pages);
                                kfree(wdata);
                                add_credits_and_wake_if(server, credits, 0);
                                break;
@@ -2707,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                        if (rc) {
                                for (i = 0; i < nr_pages; i++)
                                        put_page(wdata->pages[i]);
+                               kvfree(wdata->pages);
                                kfree(wdata);
                                add_credits_and_wake_if(server, credits, 0);
                                break;
@@ -3386,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                        }
 
                        rc = cifs_read_allocate_pages(rdata, npages);
-                       if (rc)
-                               goto error;
+                       if (rc) {
+                               kvfree(rdata->pages);
+                               kfree(rdata);
+                               add_credits_and_wake_if(server, credits, 0);
+                               break;
+                       }
 
                        rdata->tailsz = PAGE_SIZE;
                }
@@ -3407,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                if (!rdata->cfile->invalidHandle ||
                    !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
-error:
                if (rc) {
                        add_credits_and_wake_if(server, rdata->credits, 0);
                        kref_put(&rdata->refcount,
index 153238f..6f96e22 100644 (file)
@@ -866,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
                                      FILE_READ_EA,
                                      FILE_FULL_EA_INFORMATION,
                                      SMB2_O_INFO_FILE,
-                                     SMB2_MAX_EA_BUF,
+                                     CIFSMaxBufSize -
+                                     MAX_SMB2_CREATE_RESPONSE_SIZE -
+                                     MAX_SMB2_CLOSE_RESPONSE_SIZE,
                                      &rsp_iov, &buftype, cifs_sb);
        if (rc) {
                /*
index 2ff209e..77b3aaa 100644 (file)
@@ -3241,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
                rdata->mr = NULL;
        }
 #endif
-       if (rdata->result)
+       if (rdata->result && rdata->result != -ENODATA) {
                cifs_stats_fail_inc(tcon, SMB2_READ_HE);
+               trace_smb3_read_err(0 /* xid */,
+                                   rdata->cfile->fid.persistent_fid,
+                                   tcon->tid, tcon->ses->Suid, rdata->offset,
+                                   rdata->bytes, rdata->result);
+       } else
+               trace_smb3_read_done(0 /* xid */,
+                                    rdata->cfile->fid.persistent_fid,
+                                    tcon->tid, tcon->ses->Suid,
+                                    rdata->offset, rdata->got_bytes);
 
        queue_work(cifsiod_wq, &rdata->work);
        DeleteMidQEntry(mid);
@@ -3317,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
        if (rc) {
                kref_put(&rdata->refcount, cifs_readdata_release);
                cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
-               trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
-                                  io_parms.tcon->tid, io_parms.tcon->ses->Suid,
-                                  io_parms.offset, io_parms.length);
-       } else
-               trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
-                                  io_parms.tcon->tid, io_parms.tcon->ses->Suid,
-                                  io_parms.offset, io_parms.length);
+               trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
+                                   io_parms.tcon->tid,
+                                   io_parms.tcon->ses->Suid,
+                                   io_parms.offset, io_parms.length, rc);
+       }
 
        cifs_small_buf_release(buf);
        return rc;
@@ -3367,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
                if (rc != -ENODATA) {
                        cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
                        cifs_dbg(VFS, "Send error in read = %d\n", rc);
+                       trace_smb3_read_err(xid, req->PersistentFileId,
+                                           io_parms->tcon->tid, ses->Suid,
+                                           io_parms->offset, io_parms->length,
+                                           rc);
                }
-               trace_smb3_read_err(rc, xid, req->PersistentFileId,
-                                   io_parms->tcon->tid, ses->Suid,
-                                   io_parms->offset, io_parms->length);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
                return rc == -ENODATA ? 0 : rc;
        } else
@@ -3459,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
                wdata->mr = NULL;
        }
 #endif
-       if (wdata->result)
+       if (wdata->result) {
                cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+               trace_smb3_write_err(0 /* no xid */,
+                                    wdata->cfile->fid.persistent_fid,
+                                    tcon->tid, tcon->ses->Suid, wdata->offset,
+                                    wdata->bytes, wdata->result);
+       } else
+               trace_smb3_write_done(0 /* no xid */,
+                                     wdata->cfile->fid.persistent_fid,
+                                     tcon->tid, tcon->ses->Suid,
+                                     wdata->offset, wdata->bytes);
 
        queue_work(cifsiod_wq, &wdata->work);
        DeleteMidQEntry(mid);
@@ -3602,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
                                     wdata->bytes, rc);
                kref_put(&wdata->refcount, release);
                cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
-       } else
-               trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
-                                    tcon->tid, tcon->ses->Suid, wdata->offset,
-                                    wdata->bytes);
+       }
 
 async_writev_out:
        cifs_small_buf_release(req);
@@ -3831,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
                    rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
                        srch_inf->endOfSearch = true;
                        rc = 0;
-               }
-               cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+               } else
+                       cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
                goto qdir_exit;
        }
 
@@ -4427,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
        rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
-       please_key_low = (__u64 *)req->LeaseKey;
-       please_key_high = (__u64 *)(req->LeaseKey+8);
+       please_key_low = (__u64 *)lease_key;
+       please_key_high = (__u64 *)(lease_key+8);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
                trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
index 7a2d0a2..538e229 100644 (file)
@@ -84,8 +84,9 @@
 
 #define NUMBER_OF_SMB2_COMMANDS        0x0013
 
-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
-#define MAX_SMB2_HDR_SIZE 0x00b0
+/* 52 transform hdr + 64 hdr + 88 create rsp */
+#define SMB2_TRANSFORM_HEADER_SIZE 52
+#define MAX_SMB2_HDR_SIZE 204
 
 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
@@ -648,6 +649,13 @@ struct smb2_create_req {
        __u8   Buffer[0];
 } __packed;
 
+/*
+ * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
+ * 88 (fixed part of create response) + 520 (path) + 150 (contexts) +
+ * 2 bytes of padding.
+ */
+#define MAX_SMB2_CREATE_RESPONSE_SIZE 824
+
 struct smb2_create_rsp {
        struct smb2_sync_hdr sync_hdr;
        __le16 StructureSize;   /* Must be 89 */
@@ -996,6 +1004,11 @@ struct smb2_close_req {
        __u64  VolatileFileId; /* opaque endianness */
 } __packed;
 
+/*
+ * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
+ */
+#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
+
 struct smb2_close_rsp {
        struct smb2_sync_hdr sync_hdr;
        __le16 StructureSize; /* 60 */
@@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
        char   FileName[0];     /* Name to be assigned to new link */
 } __packed; /* level 11 Set */
 
-#define SMB2_MAX_EA_BUF 65536
-
 struct smb2_file_full_ea_info { /* encoding of response for level 15 */
        __le32 next_entry_offset;
        __u8   flags;
index 2593153..aac41ad 100644 (file)
@@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = {
 
 static DEFINE_PER_CPU(long, nr_dentry);
 static DEFINE_PER_CPU(long, nr_dentry_unused);
+static DEFINE_PER_CPU(long, nr_dentry_negative);
 
 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 
@@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void)
        return sum < 0 ? 0 : sum;
 }
 
+static long get_nr_dentry_negative(void)
+{
+       int i;
+       long sum = 0;
+
+       for_each_possible_cpu(i)
+               sum += per_cpu(nr_dentry_negative, i);
+       return sum < 0 ? 0 : sum;
+}
+
 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
                   size_t *lenp, loff_t *ppos)
 {
        dentry_stat.nr_dentry = get_nr_dentry();
        dentry_stat.nr_unused = get_nr_dentry_unused();
+       dentry_stat.nr_negative = get_nr_dentry_negative();
        return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 }
 #endif
@@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
        flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
        WRITE_ONCE(dentry->d_flags, flags);
        dentry->d_inode = NULL;
+       if (dentry->d_flags & DCACHE_LRU_LIST)
+               this_cpu_inc(nr_dentry_negative);
 }
 
 static void dentry_free(struct dentry *dentry)
@@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
  * The per-cpu "nr_dentry_unused" counters are updated with
  * the DCACHE_LRU_LIST bit.
  *
+ * The per-cpu "nr_dentry_negative" counters are only updated
+ * when deleted from or added to the per-superblock LRU list, not
+ * from/to the shrink list. That is to avoid an unneeded dec/inc
+ * pair when moving from LRU to shrink list in select_collect().
+ *
  * These helper functions make sure we always follow the
  * rules. d_lock must be held by the caller.
  */
@@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry)
        D_FLAG_VERIFY(dentry, 0);
        dentry->d_flags |= DCACHE_LRU_LIST;
        this_cpu_inc(nr_dentry_unused);
+       if (d_is_negative(dentry))
+               this_cpu_inc(nr_dentry_negative);
        WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 }
 
@@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry)
        D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
        dentry->d_flags &= ~DCACHE_LRU_LIST;
        this_cpu_dec(nr_dentry_unused);
+       if (d_is_negative(dentry))
+               this_cpu_dec(nr_dentry_negative);
        WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
 }
 
@@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
        D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
        dentry->d_flags &= ~DCACHE_LRU_LIST;
        this_cpu_dec(nr_dentry_unused);
+       if (d_is_negative(dentry))
+               this_cpu_dec(nr_dentry_negative);
        list_lru_isolate(lru, &dentry->d_lru);
 }
 
@@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
 {
        D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
        dentry->d_flags |= DCACHE_SHRINK_LIST;
+       if (d_is_negative(dentry))
+               this_cpu_dec(nr_dentry_negative);
        list_lru_isolate_move(lru, &dentry->d_lru, list);
 }
 
@@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
  */
 void shrink_dcache_sb(struct super_block *sb)
 {
-       long freed;
-
        do {
                LIST_HEAD(dispose);
 
-               freed = list_lru_walk(&sb->s_dentry_lru,
+               list_lru_walk(&sb->s_dentry_lru,
                        dentry_lru_isolate_shrink, &dispose, 1024);
-
-               this_cpu_sub(nr_dentry_unused, freed);
                shrink_dentry_list(&dispose);
        } while (list_lru_count(&sb->s_dentry_lru) > 0);
 }
@@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        WARN_ON(d_in_lookup(dentry));
 
        spin_lock(&dentry->d_lock);
+       /*
+        * Decrement negative dentry count if it was in the LRU list.
+        */
+       if (dentry->d_flags & DCACHE_LRU_LIST)
+               this_cpu_dec(nr_dentry_negative);
        hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
        raw_write_seqcount_begin(&dentry->d_seq);
        __d_set_inode_and_type(dentry, inode, add_flags);
index 8237701..d31b6c7 100644 (file)
@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                spin_lock(&inode->i_lock);
+               /*
+                * We must skip inodes in unusual state. We may also skip
+                * inodes without pages but we deliberately won't in case
+                * we need to reschedule to avoid softlockups.
+                */
                if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
-                   (inode->i_mapping->nrpages == 0)) {
+                   (inode->i_mapping->nrpages == 0 && !need_resched())) {
                        spin_unlock(&inode->i_lock);
                        continue;
                }
@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
                spin_unlock(&inode->i_lock);
                spin_unlock(&sb->s_inode_list_lock);
 
+               cond_resched();
                invalidate_mapping_pages(inode->i_mapping, 0, -1);
                iput(toput_inode);
                toput_inode = inode;
index 831d7cb..17a8d3b 100644 (file)
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
                        goto next_iter;
                }
                if (ret == -E2BIG) {
-                       n += rbm->bii - initial_bii;
                        rbm->bii = 0;
                        rbm->offset = 0;
+                       n += (rbm->bii - initial_bii);
                        goto res_covered_end_of_rgrp;
                }
                return ret;
index a3088fa..897c602 100644 (file)
@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
        atomic_set(&iop->read_count, 0);
        atomic_set(&iop->write_count, 0);
        bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+
+       /*
+        * migrate_page_move_mapping() assumes that pages with private data have
+        * their count elevated by 1.
+        */
+       get_page(page);
        set_page_private(page, (unsigned long)iop);
        SetPagePrivate(page);
        return iop;
@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
        WARN_ON_ONCE(atomic_read(&iop->write_count));
        ClearPagePrivate(page);
        set_page_private(page, 0);
+       put_page(page);
        kfree(iop);
 }
 
@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
 
        if (page_has_private(page)) {
                ClearPagePrivate(page);
+               get_page(newpage);
                set_page_private(newpage, page_private(page));
                set_page_private(page, 0);
+               put_page(page);
                SetPagePrivate(newpage);
        }
 
@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        loff_t pos = iocb->ki_pos, start = pos;
        loff_t end = iocb->ki_pos + count - 1, ret = 0;
        unsigned int flags = IOMAP_DIRECT;
+       bool wait_for_completion = is_sync_kiocb(iocb);
        struct blk_plug plug;
        struct iomap_dio *dio;
 
@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        dio->end_io = end_io;
        dio->error = 0;
        dio->flags = 0;
-       dio->wait_for_completion = is_sync_kiocb(iocb);
 
        dio->submit.iter = iter;
        dio->submit.waiter = current;
@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                dio_warn_stale_pagecache(iocb->ki_filp);
        ret = 0;
 
-       if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
+       if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
            !inode->i_sb->s_dio_done_wq) {
                ret = sb_init_dio_done_wq(inode->i_sb);
                if (ret < 0)
@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                if (ret <= 0) {
                        /* magic error code to fall back to buffered I/O */
                        if (ret == -ENOTBLK) {
-                               dio->wait_for_completion = true;
+                               wait_for_completion = true;
                                ret = 0;
                        }
                        break;
@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (dio->flags & IOMAP_DIO_WRITE_FUA)
                dio->flags &= ~IOMAP_DIO_NEED_SYNC;
 
+       /*
+        * We are about to drop our additional submission reference, which
+        * might be the last reference to the dio.  There are three three
+        * different ways we can progress here:
+        *
+        *  (a) If this is the last reference we will always complete and free
+        *      the dio ourselves.
+        *  (b) If this is not the last reference, and we serve an asynchronous
+        *      iocb, we must never touch the dio after the decrement, the
+        *      I/O completion handler will complete and free it.
+        *  (c) If this is not the last reference, but we serve a synchronous
+        *      iocb, the I/O completion handler will wake us up on the drop
+        *      of the final reference, and we will complete and free it here
+        *      after we got woken by the I/O completion handler.
+        */
+       dio->wait_for_completion = wait_for_completion;
        if (!atomic_dec_and_test(&dio->ref)) {
-               if (!dio->wait_for_completion)
+               if (!wait_for_completion)
                        return -EIOCBQUEUED;
 
                for (;;) {
@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                __set_current_state(TASK_RUNNING);
        }
 
-       ret = iomap_dio_complete(dio);
-
-       return ret;
+       return iomap_dio_complete(dio);
 
 out_free_dio:
        kfree(dio);
index 22ce3c8..0570391 100644 (file)
@@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name,
        size_t len;
        char *end;
 
+       if (unlikely(!dev_name || !*dev_name)) {
+               dfprintk(MOUNT, "NFS: device name not specified\n");
+               return -EINVAL;
+       }
+
        /* Is the host name protected with square brakcets? */
        if (*dev_name == '[') {
                end = strchr(++dev_name, ']');
index 5a0bbf9..f12cb31 100644 (file)
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        nfs_set_page_writeback(page);
        WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 
-       ret = 0;
+       ret = req->wb_context->error;
        /* If there is a fatal error that covers this write, just exit */
-       if (nfs_error_is_fatal_on_server(req->wb_context->error))
+       if (nfs_error_is_fatal_on_server(ret))
                goto out_launder;
 
+       ret = 0;
        if (!nfs_pageio_add_request(pgio, req)) {
                ret = pgio->pg_error;
                /*
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
                        nfs_context_set_write_error(req->wb_context, ret);
                        if (nfs_error_is_fatal_on_server(ret))
                                goto out_launder;
-               }
+               } else
+                       ret = -EAGAIN;
                nfs_redirty_request(req);
-               ret = -EAGAIN;
        } else
                nfs_add_stats(page_file_mapping(page)->host,
                                NFSIOS_WRITEPAGES, 1);
index 8ae1094..e39bac9 100644 (file)
@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
                inode = proc_get_inode(dir->i_sb, de);
                if (!inode)
                        return ERR_PTR(-ENOMEM);
-               d_set_d_op(dentry, &proc_misc_dentry_ops);
+               d_set_d_op(dentry, de->proc_dops);
                return d_splice_alias(inode, dentry);
        }
        read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
        INIT_LIST_HEAD(&ent->pde_openers);
        proc_set_user(ent, (*parent)->uid, (*parent)->gid);
 
+       ent->proc_dops = &proc_misc_dentry_ops;
+
 out:
        return ent;
 }
index 5185d7f..95b1419 100644 (file)
@@ -44,6 +44,7 @@ struct proc_dir_entry {
        struct completion *pde_unload_completion;
        const struct inode_operations *proc_iops;
        const struct file_operations *proc_fops;
+       const struct dentry_operations *proc_dops;
        union {
                const struct seq_operations *seq_ops;
                int (*single_show)(struct seq_file *, void *);
index d5e0fcb..a7b1243 100644 (file)
@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
        return maybe_get_net(PDE_NET(PDE(inode)));
 }
 
+static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       return 0;
+}
+
+static const struct dentry_operations proc_net_dentry_ops = {
+       .d_revalidate   = proc_net_d_revalidate,
+       .d_delete       = always_delete_dentry,
+};
+
+static void pde_force_lookup(struct proc_dir_entry *pde)
+{
+       /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
+       pde->proc_dops = &proc_net_dentry_ops;
+}
+
 static int seq_open_net(struct inode *inode, struct file *file)
 {
        unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
+       pde_force_lookup(p);
        p->proc_fops = &proc_net_seq_fops;
        p->seq_ops = ops;
        p->state_size = state_size;
@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
+       pde_force_lookup(p);
        p->proc_fops = &proc_net_seq_fops;
        p->seq_ops = ops;
        p->state_size = state_size;
@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
+       pde_force_lookup(p);
        p->proc_fops = &proc_net_single_fops;
        p->single_show = show;
        return proc_register(parent, p);
@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
+       pde_force_lookup(p);
        p->proc_fops = &proc_net_single_fops;
        p->single_show = show;
        p->write = write;
index 7b24fc7..228a5e2 100644 (file)
@@ -71,7 +71,6 @@
 #define MMP2_CLK_CCIC1_MIX             117
 #define MMP2_CLK_CCIC1_PHY             118
 #define MMP2_CLK_CCIC1_SPHY            119
-#define MMP2_CLK_SP                    120
 
 #define MMP2_NR_CLKS                   200
 #endif
index ef4b70f..60996e6 100644 (file)
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
 struct dentry_stat_t {
        long nr_dentry;
        long nr_unused;
-       long age_limit;          /* age in seconds */
-       long want_pages;         /* pages requested by system */
-       long dummy[2];
+       long age_limit;         /* age in seconds */
+       long want_pages;        /* pages requested by system */
+       long nr_negative;       /* # of unused negative dentries */
+       long dummy;             /* Reserved for future use */
 };
 extern struct dentry_stat_t dentry_stat;
 
index 811c777..29d8e2c 100644 (file)
@@ -1479,11 +1479,12 @@ struct super_block {
        struct user_namespace *s_user_ns;
 
        /*
-        * Keep the lru lists last in the structure so they always sit on their
-        * own individual cachelines.
+        * The list_lru structure is essentially just a pointer to a table
+        * of per-node lru lists, each of which has its own spinlock.
+        * There is no need to put them into separate cachelines.
         */
-       struct list_lru         s_dentry_lru ____cacheline_aligned_in_smp;
-       struct list_lru         s_inode_lru ____cacheline_aligned_in_smp;
+       struct list_lru         s_dentry_lru;
+       struct list_lru         s_inode_lru;
        struct rcu_head         rcu;
        struct work_struct      destroy_work;
 
index e7d29ae..971cf76 100644 (file)
@@ -615,6 +615,7 @@ struct ide_drive_s {
 
        /* current sense rq and buffer */
        bool sense_rq_armed;
+       bool sense_rq_active;
        struct request *sense_rq;
        struct request_sense sense_data;
 
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
 extern void ide_timer_expiry(struct timer_list *t);
 extern irqreturn_t ide_intr(int irq, void *dev_id);
 extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
 
 void ide_init_disk(struct gendisk *, ide_drive_t *);
index 07da5c6..368267c 100644 (file)
@@ -21,14 +21,16 @@ struct vmem_altmap;
  * walkers which rely on the fully initialized page->flags and others
  * should use this rather than pfn_valid && pfn_to_page
  */
-#define pfn_to_online_page(pfn)                                \
-({                                                     \
-       struct page *___page = NULL;                    \
-       unsigned long ___nr = pfn_to_section_nr(pfn);   \
-                                                       \
-       if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
-               ___page = pfn_to_page(pfn);             \
-       ___page;                                        \
+#define pfn_to_online_page(pfn)                                           \
+({                                                                \
+       struct page *___page = NULL;                               \
+       unsigned long ___pfn = pfn;                                \
+       unsigned long ___nr = pfn_to_section_nr(___pfn);           \
+                                                                  \
+       if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
+           pfn_valid_within(___pfn))                              \
+               ___page = pfn_to_page(___pfn);                     \
+       ___page;                                                   \
 })
 
 /*
index 54af4ee..fed5be7 100644 (file)
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
 
 static inline void pm_runtime_mark_last_busy(struct device *dev)
 {
-       WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
+       WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
 }
 
 static inline bool pm_runtime_is_irq_safe(struct device *dev)
index ec912d0..ecdc654 100644 (file)
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_HUGE_ZERO_PAGE     23      /* mm has ever used the global huge zero page */
 #define MMF_DISABLE_THP                24      /* disable THP for all VMAs */
 #define MMF_OOM_VICTIM         25      /* mm is the oom victim */
+#define MMF_OOM_REAP_QUEUED    26      /* mm was queued for oom_reaper */
 #define MMF_DISABLE_THP_MASK   (1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
index 2a6ac8d..1486b60 100644 (file)
@@ -120,6 +120,8 @@ struct tls_rec {
        struct scatterlist sg_aead_out[2];
 
        char aad_space[TLS_AAD_SPACE_SIZE];
+       u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
+                  TLS_CIPHER_AES_GCM_128_SALT_SIZE];
        struct aead_request aead_req;
        u8 aead_req_ctx[];
 };
index a3ceed3..80debf5 100644 (file)
@@ -2579,9 +2579,10 @@ struct ib_device {
 
        const struct uapi_definition   *driver_def;
        enum rdma_driver_id             driver_id;
+
        /*
-        * Provides synchronization between device unregistration and netlink
-        * commands on a device. To be used only by core.
+        * Positive refcount indicates that the device is currently
+        * registered and cannot be unregistered.
         */
        refcount_t refcount;
        struct completion unreg_completion;
@@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags)
 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
                       struct ib_mr_status *mr_status);
 
+/**
+ * ib_device_try_get: Hold a registration lock
+ * device: The device to lock
+ *
+ * A device under an active registration lock cannot become unregistered. It
+ * is only possible to obtain a registration lock on a device that is fully
+ * registered, otherwise this function returns false.
+ *
+ * The registration lock is only necessary for actions which require the
+ * device to still be registered. Uses that only require the device pointer to
+ * be valid should use get_device(&ibdev->dev) to hold the memory.
+ *
+ */
+static inline bool ib_device_try_get(struct ib_device *dev)
+{
+       return refcount_inc_not_zero(&dev->refcount);
+}
+
+void ib_device_put(struct ib_device *device);
 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
                                            u16 pkey, const union ib_gid *gid,
                                            const struct sockaddr *addr);
index ef3c7ec..eb76b38 100644 (file)
@@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq {
        __aligned_u64 que_addr;
 };
 
+struct hns_roce_ib_create_srq_resp {
+       __u32   srqn;
+       __u32   reserved;
+};
+
 struct hns_roce_ib_create_qp {
        __aligned_u64 buf_addr;
        __aligned_u64 db_addr;
index 513fa54..c9386a3 100644 (file)
@@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED
          per default but can be enabled through passing psi=1 on the
          kernel commandline during boot.
 
+         This feature adds some code to the task wakeup and sleep
+         paths of the scheduler. The overhead is too low to affect
+         common scheduling-intense workloads in practice (such as
+         webservers, memcache), but it does show up in artificial
+         scheduler stress tests, such as hackbench.
+
+         If you are paranoid and not sure what the kernel will be
+         used for, say Y.
+
+         Say N if unsure.
+
 endmenu # "CPU/Task time and stats accounting"
 
 config CPU_ISOLATION
@@ -825,7 +836,7 @@ config CGROUP_PIDS
          PIDs controller is designed to stop this from happening.
 
          It should be noted that organisational operations (such as attaching
-         to a cgroup hierarchy will *not* be blocked by the PIDs controller),
+         to a cgroup hierarchy) will *not* be blocked by the PIDs controller,
          since the PIDs limit only affects a process's ability to fork, not to
          attach to a cgroup.
 
index 3cd13a3..e5ede69 100644 (file)
@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
-       int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
-       if (ret || !write)
-               return ret;
-
+       int ret;
+       int perf_cpu = sysctl_perf_cpu_time_max_percent;
        /*
         * If throttling is disabled don't allow the write:
         */
-       if (sysctl_perf_cpu_time_max_percent == 100 ||
-           sysctl_perf_cpu_time_max_percent == 0)
+       if (write && (perf_cpu == 100 || perf_cpu == 0))
                return -EINVAL;
 
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (ret || !write)
+               return ret;
+
        max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
        perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
        update_perf_cpu_limits();
index 3fb7be0..2639a30 100644 (file)
@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
        return NULL;
 }
 
-static struct task_struct *find_child_reaper(struct task_struct *father)
+static struct task_struct *find_child_reaper(struct task_struct *father,
+                                               struct list_head *dead)
        __releases(&tasklist_lock)
        __acquires(&tasklist_lock)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(father);
        struct task_struct *reaper = pid_ns->child_reaper;
+       struct task_struct *p, *n;
 
        if (likely(reaper != father))
                return reaper;
@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
                panic("Attempted to kill init! exitcode=0x%08x\n",
                        father->signal->group_exit_code ?: father->exit_code);
        }
+
+       list_for_each_entry_safe(p, n, dead, ptrace_entry) {
+               list_del_init(&p->ptrace_entry);
+               release_task(p);
+       }
+
        zap_pid_ns_processes(pid_ns);
        write_lock_irq(&tasklist_lock);
 
@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
                exit_ptrace(father, dead);
 
        /* Can drop and reacquire tasklist_lock */
-       reaper = find_child_reaper(father);
+       reaper = find_child_reaper(father, dead);
        if (list_empty(&father->children))
                return;
 
index fe24de3..c348478 100644 (file)
  * sampling of the aggregate task states would be.
  */
 
+#include "../workqueue_internal.h"
 #include <linux/sched/loadavg.h>
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
                        groupc->tasks[t]++;
 
        write_seqcount_end(&groupc->seq);
-
-       if (!delayed_work_pending(&group->clock_work))
-               schedule_delayed_work(&group->clock_work, PSI_FREQ);
 }
 
 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
 {
        int cpu = task_cpu(task);
        struct psi_group *group;
+       bool wake_clock = true;
        void *iter = NULL;
 
        if (!task->pid)
@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
        task->psi_flags &= ~clear;
        task->psi_flags |= set;
 
-       while ((group = iterate_groups(task, &iter)))
+       /*
+        * Periodic aggregation shuts off if there is a period of no
+        * task changes, so we wake it back up if necessary. However,
+        * don't do this if the task change is the aggregation worker
+        * itself going to sleep, or we'll ping-pong forever.
+        */
+       if (unlikely((clear & TSK_RUNNING) &&
+                    (task->flags & PF_WQ_WORKER) &&
+                    wq_worker_last_func(task) == psi_update_work))
+               wake_clock = false;
+
+       while ((group = iterate_groups(task, &iter))) {
                psi_group_change(group, cpu, clear, set);
+               if (wake_clock && !delayed_work_pending(&group->clock_work))
+                       schedule_delayed_work(&group->clock_work, PSI_FREQ);
+       }
 }
 
 void psi_memstall_tick(struct task_struct *task, int cpu)
index 392be4b..fc5d23d 100644 (file)
@@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
 }
 
 /**
+ * wq_worker_last_func - retrieve worker's last work function
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+       struct worker *worker = kthread_data(task);
+
+       return worker->last_func;
+}
+
+/**
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
  * @flags: flags to set
@@ -2184,6 +2204,9 @@ __acquires(&pool->lock)
        if (unlikely(cpu_intensive))
                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
+       /* tag the worker for identification in schedule() */
+       worker->last_func = worker->current_func;
+
        /* we're done with it, release */
        hash_del(&worker->hentry);
        worker->current_work = NULL;
index 66fbb5a..cb68b03 100644 (file)
@@ -53,6 +53,9 @@ struct worker {
 
        /* used only by rescuers to point to the target workqueue */
        struct workqueue_struct *rescue_wq;     /* I: the workqueue to rescue */
+
+       /* used by the scheduler to determine a worker's last known identity */
+       work_func_t             last_func;
 };
 
 /**
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
 
 /*
  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
- * sched/core.c and workqueue.c.
+ * sched/ and workqueue.c.
  */
 void wq_worker_waking_up(struct task_struct *task, int cpu);
 struct task_struct *wq_worker_sleeping(struct task_struct *task);
+work_func_t wq_worker_last_func(struct task_struct *task);
 
 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
index d82d022..9cf7762 100644 (file)
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
        config->test_driver = NULL;
 
        kfree_const(config->test_fs);
-       config->test_driver = NULL;
+       config->test_fs = NULL;
 }
 
 static void kmod_config_free(struct kmod_test_device *test_dev)
index df2e7dd..afef616 100644 (file)
@@ -4268,7 +4268,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                break;
                        }
                        if (ret & VM_FAULT_RETRY) {
-                               if (nonblocking)
+                               if (nonblocking &&
+                                   !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
                                        *nonblocking = 0;
                                *nr_pages = 0;
                                /*
index 0a14fcf..e2bb06c 100644 (file)
@@ -5,6 +5,7 @@ UBSAN_SANITIZE_generic.o := n
 UBSAN_SANITIZE_tags.o := n
 KCOV_INSTRUMENT := n
 
+CFLAGS_REMOVE_common.o = -pg
 CFLAGS_REMOVE_generic.o = -pg
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
index 7c72f2a..831be5f 100644 (file)
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
                        if (fail || tk->addr_valid == 0) {
                                pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
                                       pfn, tk->tsk->comm, tk->tsk->pid);
-                               force_sig(SIGKILL, tk->tsk);
+                               do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
+                                                tk->tsk, PIDTYPE_PID);
                        }
 
                        /*
index b9a667d..124e794 100644 (file)
@@ -1233,7 +1233,8 @@ static bool is_pageblock_removable_nolock(struct page *page)
 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
        struct page *page = pfn_to_page(start_pfn);
-       struct page *end_page = page + nr_pages;
+       unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
+       struct page *end_page = pfn_to_page(end_pfn);
 
        /* Check the starting page of each pageblock within the range */
        for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1273,6 +1274,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
                                i++;
                        if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
                                continue;
+                       /* Check if we got outside of the zone */
+                       if (zone && !zone_spans_pfn(zone, pfn + i))
+                               return 0;
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
@@ -1301,23 +1305,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
 {
        unsigned long pfn;
-       struct page *page;
+
        for (pfn = start; pfn < end; pfn++) {
-               if (pfn_valid(pfn)) {
-                       page = pfn_to_page(pfn);
-                       if (PageLRU(page))
-                               return pfn;
-                       if (__PageMovable(page))
-                               return pfn;
-                       if (PageHuge(page)) {
-                               if (hugepage_migration_supported(page_hstate(page)) &&
-                                   page_huge_active(page))
-                                       return pfn;
-                               else
-                                       pfn = round_up(pfn + 1,
-                                               1 << compound_order(page)) - 1;
-                       }
-               }
+               struct page *page, *head;
+               unsigned long skip;
+
+               if (!pfn_valid(pfn))
+                       continue;
+               page = pfn_to_page(pfn);
+               if (PageLRU(page))
+                       return pfn;
+               if (__PageMovable(page))
+                       return pfn;
+
+               if (!PageHuge(page))
+                       continue;
+               head = compound_head(page);
+               if (hugepage_migration_supported(page_hstate(head)) &&
+                   page_huge_active(head))
+                       return pfn;
+               skip = (1 << compound_order(head)) - (page - head);
+               pfn += skip - 1;
        }
        return 0;
 }
@@ -1344,7 +1352,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
        struct page *page;
-       int not_managed = 0;
        int ret = 0;
        LIST_HEAD(source);
 
@@ -1392,7 +1399,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                else
                        ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
                if (!ret) { /* Success */
-                       put_page(page);
                        list_add_tail(&page->lru, &source);
                        if (!__PageMovable(page))
                                inc_node_page_state(page, NR_ISOLATED_ANON +
@@ -1401,22 +1407,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                } else {
                        pr_warn("failed to isolate pfn %lx\n", pfn);
                        dump_page(page, "isolation failed");
-                       put_page(page);
-                       /* Because we don't have big zone->lock. we should
-                          check this again here. */
-                       if (page_count(page)) {
-                               not_managed++;
-                               ret = -EBUSY;
-                               break;
-                       }
                }
+               put_page(page);
        }
        if (!list_empty(&source)) {
-               if (not_managed) {
-                       putback_movable_pages(&source);
-                       goto out;
-               }
-
                /* Allocate a new page from the nearest neighbor node */
                ret = migrate_pages(&source, new_node_page, NULL, 0,
                                        MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
@@ -1429,7 +1423,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        putback_movable_pages(&source);
                }
        }
-out:
+
        return ret;
 }
 
@@ -1576,7 +1570,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
           we assume this for now. .*/
        if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
                                  &valid_end)) {
-               mem_hotplug_done();
                ret = -EINVAL;
                reason = "multizone range";
                goto failed_removal;
@@ -1591,7 +1584,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
                                       MIGRATE_MOVABLE,
                                       SKIP_HWPOISON | REPORT_FAILURE);
        if (ret) {
-               mem_hotplug_done();
                reason = "failure to isolate range";
                goto failed_removal;
        }
index a16b150..d4fd680 100644 (file)
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
        /* Simple case, sync compaction */
        if (mode != MIGRATE_ASYNC) {
                do {
-                       get_bh(bh);
                        lock_buffer(bh);
                        bh = bh->b_this_page;
 
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 
        /* async case, we cannot block on lock_buffer so use trylock_buffer */
        do {
-               get_bh(bh);
                if (!trylock_buffer(bh)) {
                        /*
                         * We failed to lock the buffer and cannot stall in
                         * async migration. Release the taken locks
                         */
                        struct buffer_head *failed_bh = bh;
-                       put_bh(failed_bh);
                        bh = head;
                        while (bh != failed_bh) {
                                unlock_buffer(bh);
-                               put_bh(bh);
                                bh = bh->b_this_page;
                        }
                        return false;
@@ -818,7 +814,6 @@ unlock_buffers:
        bh = head;
        do {
                unlock_buffer(bh);
-               put_bh(bh);
                bh = bh->b_this_page;
 
        } while (bh != head);
@@ -1135,10 +1130,13 @@ out:
         * If migration is successful, decrease refcount of the newpage
         * which will not free the page because new page owner increased
         * refcounter. As well, if it is LRU page, add the page to LRU
-        * list in here.
+        * list in here. Use the old state of the isolated source page to
+        * determine if we migrated a LRU page. newpage was already unlocked
+        * and possibly modified by its owner - don't rely on the page
+        * state.
         */
        if (rc == MIGRATEPAGE_SUCCESS) {
-               if (unlikely(__PageMovable(newpage)))
+               if (unlikely(!is_lru))
                        put_page(newpage);
                else
                        putback_lru_page(newpage);
index f0e8cd9..26ea863 100644 (file)
@@ -647,8 +647,8 @@ static int oom_reaper(void *unused)
 
 static void wake_oom_reaper(struct task_struct *tsk)
 {
-       /* tsk is already queued? */
-       if (tsk == oom_reaper_list || tsk->oom_reaper_list)
+       /* mm is already queued? */
+       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
                return;
 
        get_task_struct(tsk);
@@ -975,6 +975,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
         * still freeing memory.
         */
        read_lock(&tasklist_lock);
+
+       /*
+        * The task 'p' might have already exited before reaching here. The
+        * put_task_struct() will free task_struct 'p' while the loop still try
+        * to access the field of 'p', so, get an extra reference.
+        */
+       get_task_struct(p);
        for_each_thread(p, t) {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
@@ -994,6 +1001,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
                        }
                }
        }
+       put_task_struct(p);
        read_unlock(&tasklist_lock);
 
        /*
index 5e55cef..6693e20 100644 (file)
@@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
 
        xt_compat_lock(NFPROTO_BRIDGE);
 
-       ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-       if (ret < 0)
-               goto out_unlock;
+       if (tmp.nentries) {
+               ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+               if (ret < 0)
+                       goto out_unlock;
+       }
+
        ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
        if (ret < 0)
                goto out_unlock;
index 82f2002..8e276e0 100644 (file)
@@ -8712,6 +8712,9 @@ int init_dummy_netdev(struct net_device *dev)
        set_bit(__LINK_STATE_PRESENT, &dev->state);
        set_bit(__LINK_STATE_START, &dev->state);
 
+       /* napi_busy_loop stats accounting wants this */
+       dev_net_set(dev, &init_net);
+
        /* Note : We dont allocate pcpu_refcnt for dummy devices,
         * because users of this 'device' dont need to change
         * its refcount.
index d0b3e69..0962f92 100644 (file)
@@ -56,7 +56,7 @@
 #include <net/dn_neigh.h>
 #include <net/dn_fib.h>
 
-#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
+#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
 
 static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
 static char dn_rt_all_rt_mcast[ETH_ALEN]  = {0xAB,0x00,0x00,0x03,0x00,0x00};
index d7b43e7..68a21bf 100644 (file)
@@ -74,6 +74,33 @@ drop:
        return 0;
 }
 
+static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
+                    int encap_type)
+{
+       struct ip_tunnel *tunnel;
+       const struct iphdr *iph = ip_hdr(skb);
+       struct net *net = dev_net(skb->dev);
+       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+       tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+                                 iph->saddr, iph->daddr, 0);
+       if (tunnel) {
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+
+               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+
+               skb->dev = tunnel->dev;
+
+               return xfrm_input(skb, nexthdr, spi, encap_type);
+       }
+
+       return -EINVAL;
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
 static int vti_rcv(struct sk_buff *skb)
 {
        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
        return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
 }
 
+static int vti_rcv_ipip(struct sk_buff *skb)
+{
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+       return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+}
+
 static int vti_rcv_cb(struct sk_buff *skb, int err)
 {
        unsigned short family;
@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
        .priority       =       100,
 };
 
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+       .handler        =       vti_rcv_ipip,
+       .err_handler    =       vti4_err,
+       .priority       =       0,
+};
+
 static int __net_init vti_init_net(struct net *net)
 {
        int err;
@@ -603,6 +644,13 @@ static int __init vti_init(void)
        if (err < 0)
                goto xfrm_proto_comp_failed;
 
+       msg = "ipip tunnel";
+       err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+       if (err < 0) {
+               pr_info("%s: cant't register tunnel\n",__func__);
+               goto xfrm_tunnel_failed;
+       }
+
        msg = "netlink interface";
        err = rtnl_link_register(&vti_link_ops);
        if (err < 0)
@@ -612,6 +660,8 @@ static int __init vti_init(void)
 
 rtnl_link_failed:
        xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_tunnel_failed:
+       xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
 xfrm_proto_comp_failed:
        xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 xfrm_proto_ah_failed:
index b61977d..2a909e5 100644 (file)
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
 
 static void clusterip_net_exit(struct net *net)
 {
+#ifdef CONFIG_PROC_FS
        struct clusterip_net *cn = clusterip_pernet(net);
 
-#ifdef CONFIG_PROC_FS
        mutex_lock(&cn->mutex);
        proc_remove(cn->procdir);
        cn->procdir = NULL;
index 30337b3..cc01aa3 100644 (file)
@@ -1516,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
                        continue;
                rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
                list_del_rcu(&c->list);
+               call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
+                                              FIB_EVENT_ENTRY_DEL,
+                                              (struct mfc6_cache *)c, mrt->id);
                mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
                mr_cache_put(c);
        }
@@ -1524,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
                spin_lock_bh(&mfc_unres_lock);
                list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
                        list_del(&c->list);
-                       call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
-                                                      FIB_EVENT_ENTRY_DEL,
-                                                      (struct mfc6_cache *)c,
-                                                      mrt->id);
                        mr6_netlink_event(mrt, (struct mfc6_cache *)c,
                                          RTM_DELROUTE);
                        ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
index 432141f..7d63186 100644 (file)
@@ -2221,6 +2221,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
                  u->udp_timeout);
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
+       if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
+           u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
+               return -EINVAL;
+       }
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+       if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
+               return -EINVAL;
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
        if (u->tcp_timeout) {
                pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
                pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
index 6f41dd7..1f1d90c 100644 (file)
@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
                             int ttl_check,
                             struct nf_osf_hdr_ctx *ctx)
 {
+       const __u8 *optpinit = ctx->optp;
        unsigned int check_WSS = 0;
        int fmatch = FMATCH_WRONG;
        int foptsize, optnum;
@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
                }
        }
 
+       if (fmatch != FMATCH_OK)
+               ctx->optp = optpinit;
+
        return fmatch == FMATCH_OK;
 }
 
index 7334e0b..5eb2694 100644 (file)
 #include <linux/netfilter_bridge/ebtables.h>
 #include <linux/netfilter_arp/arp_tables.h>
 #include <net/netfilter/nf_tables.h>
+#include <net/netns/generic.h>
 
 struct nft_xt {
        struct list_head        head;
        struct nft_expr_ops     ops;
-       unsigned int            refcnt;
+       refcount_t              refcnt;
+
+       /* used only when transaction mutex is locked */
+       unsigned int            listcnt;
 
        /* Unlike other expressions, ops doesn't have static storage duration.
         * nft core assumes they do.  We use kfree_rcu so that nft core can
@@ -43,10 +47,24 @@ struct nft_xt_match_priv {
        void *info;
 };
 
+struct nft_compat_net {
+       struct list_head nft_target_list;
+       struct list_head nft_match_list;
+};
+
+static unsigned int nft_compat_net_id __read_mostly;
+static struct nft_expr_type nft_match_type;
+static struct nft_expr_type nft_target_type;
+
+static struct nft_compat_net *nft_compat_pernet(struct net *net)
+{
+       return net_generic(net, nft_compat_net_id);
+}
+
 static bool nft_xt_put(struct nft_xt *xt)
 {
-       if (--xt->refcnt == 0) {
-               list_del(&xt->head);
+       if (refcount_dec_and_test(&xt->refcnt)) {
+               WARN_ON_ONCE(!list_empty(&xt->head));
                kfree_rcu(xt, rcu_head);
                return true;
        }
@@ -273,7 +291,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
 
        nft_xt = container_of(expr->ops, struct nft_xt, ops);
-       nft_xt->refcnt++;
+       refcount_inc(&nft_xt->refcnt);
        return 0;
 }
 
@@ -486,7 +504,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return ret;
 
        nft_xt = container_of(expr->ops, struct nft_xt, ops);
-       nft_xt->refcnt++;
+       refcount_inc(&nft_xt->refcnt);
        return 0;
 }
 
@@ -540,6 +558,43 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
        __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
 }
 
+static void nft_compat_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr,
+                               struct list_head *h)
+{
+       struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
+
+       if (xt->listcnt == 0)
+               list_add(&xt->head, h);
+
+       xt->listcnt++;
+}
+
+static void nft_compat_activate_mt(const struct nft_ctx *ctx,
+                                  const struct nft_expr *expr)
+{
+       struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
+
+       nft_compat_activate(ctx, expr, &cn->nft_match_list);
+}
+
+static void nft_compat_activate_tg(const struct nft_ctx *ctx,
+                                  const struct nft_expr *expr)
+{
+       struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
+
+       nft_compat_activate(ctx, expr, &cn->nft_target_list);
+}
+
+static void nft_compat_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr)
+{
+       struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
+
+       if (--xt->listcnt == 0)
+               list_del_init(&xt->head);
+}
+
 static void
 nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
@@ -734,10 +789,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
        .cb             = nfnl_nft_compat_cb,
 };
 
-static LIST_HEAD(nft_match_list);
-
-static struct nft_expr_type nft_match_type;
-
 static bool nft_match_cmp(const struct xt_match *match,
                          const char *name, u32 rev, u32 family)
 {
@@ -749,6 +800,7 @@ static const struct nft_expr_ops *
 nft_match_select_ops(const struct nft_ctx *ctx,
                     const struct nlattr * const tb[])
 {
+       struct nft_compat_net *cn;
        struct nft_xt *nft_match;
        struct xt_match *match;
        unsigned int matchsize;
@@ -765,8 +817,10 @@ nft_match_select_ops(const struct nft_ctx *ctx,
        rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
        family = ctx->family;
 
+       cn = nft_compat_pernet(ctx->net);
+
        /* Re-use the existing match if it's already loaded. */
-       list_for_each_entry(nft_match, &nft_match_list, head) {
+       list_for_each_entry(nft_match, &cn->nft_match_list, head) {
                struct xt_match *match = nft_match->ops.data;
 
                if (nft_match_cmp(match, mt_name, rev, family))
@@ -789,11 +843,13 @@ nft_match_select_ops(const struct nft_ctx *ctx,
                goto err;
        }
 
-       nft_match->refcnt = 0;
+       refcount_set(&nft_match->refcnt, 0);
        nft_match->ops.type = &nft_match_type;
        nft_match->ops.eval = nft_match_eval;
        nft_match->ops.init = nft_match_init;
        nft_match->ops.destroy = nft_match_destroy;
+       nft_match->ops.activate = nft_compat_activate_mt;
+       nft_match->ops.deactivate = nft_compat_deactivate;
        nft_match->ops.dump = nft_match_dump;
        nft_match->ops.validate = nft_match_validate;
        nft_match->ops.data = match;
@@ -810,7 +866,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
 
        nft_match->ops.size = matchsize;
 
-       list_add(&nft_match->head, &nft_match_list);
+       nft_match->listcnt = 1;
+       list_add(&nft_match->head, &cn->nft_match_list);
 
        return &nft_match->ops;
 err:
@@ -826,10 +883,6 @@ static struct nft_expr_type nft_match_type __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
-static LIST_HEAD(nft_target_list);
-
-static struct nft_expr_type nft_target_type;
-
 static bool nft_target_cmp(const struct xt_target *tg,
                           const char *name, u32 rev, u32 family)
 {
@@ -841,6 +894,7 @@ static const struct nft_expr_ops *
 nft_target_select_ops(const struct nft_ctx *ctx,
                      const struct nlattr * const tb[])
 {
+       struct nft_compat_net *cn;
        struct nft_xt *nft_target;
        struct xt_target *target;
        char *tg_name;
@@ -861,8 +915,9 @@ nft_target_select_ops(const struct nft_ctx *ctx,
            strcmp(tg_name, "standard") == 0)
                return ERR_PTR(-EINVAL);
 
+       cn = nft_compat_pernet(ctx->net);
        /* Re-use the existing target if it's already loaded. */
-       list_for_each_entry(nft_target, &nft_target_list, head) {
+       list_for_each_entry(nft_target, &cn->nft_target_list, head) {
                struct xt_target *target = nft_target->ops.data;
 
                if (!target->target)
@@ -893,11 +948,13 @@ nft_target_select_ops(const struct nft_ctx *ctx,
                goto err;
        }
 
-       nft_target->refcnt = 0;
+       refcount_set(&nft_target->refcnt, 0);
        nft_target->ops.type = &nft_target_type;
        nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
        nft_target->ops.init = nft_target_init;
        nft_target->ops.destroy = nft_target_destroy;
+       nft_target->ops.activate = nft_compat_activate_tg;
+       nft_target->ops.deactivate = nft_compat_deactivate;
        nft_target->ops.dump = nft_target_dump;
        nft_target->ops.validate = nft_target_validate;
        nft_target->ops.data = target;
@@ -907,7 +964,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        else
                nft_target->ops.eval = nft_target_eval_xt;
 
-       list_add(&nft_target->head, &nft_target_list);
+       nft_target->listcnt = 1;
+       list_add(&nft_target->head, &cn->nft_target_list);
 
        return &nft_target->ops;
 err:
@@ -923,13 +981,74 @@ static struct nft_expr_type nft_target_type __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
+static int __net_init nft_compat_init_net(struct net *net)
+{
+       struct nft_compat_net *cn = nft_compat_pernet(net);
+
+       INIT_LIST_HEAD(&cn->nft_target_list);
+       INIT_LIST_HEAD(&cn->nft_match_list);
+
+       return 0;
+}
+
+static void __net_exit nft_compat_exit_net(struct net *net)
+{
+       struct nft_compat_net *cn = nft_compat_pernet(net);
+       struct nft_xt *xt, *next;
+
+       if (list_empty(&cn->nft_match_list) &&
+           list_empty(&cn->nft_target_list))
+               return;
+
+       /* If there was an error that caused nft_xt expr to not be initialized
+        * fully and noone else requested the same expression later, the lists
+        * contain 0-refcount entries that still hold module reference.
+        *
+        * Clean them here.
+        */
+       mutex_lock(&net->nft.commit_mutex);
+       list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
+               struct xt_target *target = xt->ops.data;
+
+               list_del_init(&xt->head);
+
+               if (refcount_read(&xt->refcnt))
+                       continue;
+               module_put(target->me);
+               kfree(xt);
+       }
+
+       list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
+               struct xt_match *match = xt->ops.data;
+
+               list_del_init(&xt->head);
+
+               if (refcount_read(&xt->refcnt))
+                       continue;
+               module_put(match->me);
+               kfree(xt);
+       }
+       mutex_unlock(&net->nft.commit_mutex);
+}
+
+static struct pernet_operations nft_compat_net_ops = {
+       .init   = nft_compat_init_net,
+       .exit   = nft_compat_exit_net,
+       .id     = &nft_compat_net_id,
+       .size   = sizeof(struct nft_compat_net),
+};
+
 static int __init nft_compat_module_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&nft_compat_net_ops);
+       if (ret < 0)
+               goto err_target;
+
        ret = nft_register_expr(&nft_match_type);
        if (ret < 0)
-               return ret;
+               goto err_pernet;
 
        ret = nft_register_expr(&nft_target_type);
        if (ret < 0)
@@ -942,45 +1061,21 @@ static int __init nft_compat_module_init(void)
        }
 
        return ret;
-
 err_target:
        nft_unregister_expr(&nft_target_type);
 err_match:
        nft_unregister_expr(&nft_match_type);
+err_pernet:
+       unregister_pernet_subsys(&nft_compat_net_ops);
        return ret;
 }
 
 static void __exit nft_compat_module_exit(void)
 {
-       struct nft_xt *xt, *next;
-
-       /* list should be empty here, it can be non-empty only in case there
-        * was an error that caused nft_xt expr to not be initialized fully
-        * and noone else requested the same expression later.
-        *
-        * In this case, the lists contain 0-refcount entries that still
-        * hold module reference.
-        */
-       list_for_each_entry_safe(xt, next, &nft_target_list, head) {
-               struct xt_target *target = xt->ops.data;
-
-               if (WARN_ON_ONCE(xt->refcnt))
-                       continue;
-               module_put(target->me);
-               kfree(xt);
-       }
-
-       list_for_each_entry_safe(xt, next, &nft_match_list, head) {
-               struct xt_match *match = xt->ops.data;
-
-               if (WARN_ON_ONCE(xt->refcnt))
-                       continue;
-               module_put(match->me);
-               kfree(xt);
-       }
        nfnetlink_subsys_unregister(&nfnl_compat_subsys);
        nft_unregister_expr(&nft_target_type);
        nft_unregister_expr(&nft_match_type);
+       unregister_pernet_subsys(&nft_compat_net_ops);
 }
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
index cbd51ed..908e53a 100644 (file)
@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
 {
        struct nr_sock *nr = nr_sk(sk);
 
-       mod_timer(&nr->t1timer, jiffies + nr->t1);
+       sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
 }
 
 void nr_start_t2timer(struct sock *sk)
 {
        struct nr_sock *nr = nr_sk(sk);
 
-       mod_timer(&nr->t2timer, jiffies + nr->t2);
+       sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
 }
 
 void nr_start_t4timer(struct sock *sk)
 {
        struct nr_sock *nr = nr_sk(sk);
 
-       mod_timer(&nr->t4timer, jiffies + nr->t4);
+       sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
 }
 
 void nr_start_idletimer(struct sock *sk)
@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
        struct nr_sock *nr = nr_sk(sk);
 
        if (nr->idle > 0)
-               mod_timer(&nr->idletimer, jiffies + nr->idle);
+               sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
 }
 
 void nr_start_heartbeat(struct sock *sk)
 {
-       mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
 }
 
 void nr_stop_t1timer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->t1timer);
+       sk_stop_timer(sk, &nr_sk(sk)->t1timer);
 }
 
 void nr_stop_t2timer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->t2timer);
+       sk_stop_timer(sk, &nr_sk(sk)->t2timer);
 }
 
 void nr_stop_t4timer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->t4timer);
+       sk_stop_timer(sk, &nr_sk(sk)->t4timer);
 }
 
 void nr_stop_idletimer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->idletimer);
+       sk_stop_timer(sk, &nr_sk(sk)->idletimer);
 }
 
 void nr_stop_heartbeat(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 }
 
 int nr_t1timer_running(struct sock *sk)
index 77e9f85..f2ff21d 100644 (file)
@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
 
 /*
  *     Route a frame to an appropriate AX.25 connection.
+ *     A NULL ax25_cb indicates an internally generated frame.
  */
 int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
 {
@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
 
        if (skb->len < ROSE_MIN_LEN)
                return res;
+
+       if (!ax25)
+               return rose_loopback_queue(skb, NULL);
+
        frametype = skb->data[2];
        lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
        if (frametype == ROSE_CALL_REQUEST &&
index 11cdc8f..bf5b54b 100644 (file)
@@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk,
        struct scatterlist *sge = sk_msg_elem(msg_en, start);
        int rc;
 
+       memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
+
        sge->offset += tls_ctx->tx.prepend_size;
        sge->length -= tls_ctx->tx.prepend_size;
 
@@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk,
        aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
        aead_request_set_crypt(aead_req, rec->sg_aead_in,
                               rec->sg_aead_out,
-                              data_len, tls_ctx->tx.iv);
+                              data_len, rec->iv_data);
 
        aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  tls_encrypt_done, sk);
@@ -1792,7 +1794,9 @@ void tls_sw_free_resources_tx(struct sock *sk)
        if (atomic_read(&ctx->encrypt_pending))
                crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
 
+       release_sock(sk);
        cancel_delayed_work_sync(&ctx->tx_work.work);
+       lock_sock(sk);
 
        /* Tx whatever records we can transmit and abandon the rest */
        tls_tx_records(sk, -1);
index 934492b..ba0a404 100644 (file)
@@ -680,16 +680,6 @@ static void xfrm_hash_resize(struct work_struct *work)
        mutex_unlock(&hash_resize_mutex);
 }
 
-static void xfrm_hash_reset_inexact_table(struct net *net)
-{
-       struct xfrm_pol_inexact_bin *b;
-
-       lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
-
-       list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins)
-               INIT_HLIST_HEAD(&b->hhead);
-}
-
 /* Make sure *pol can be inserted into fastbin.
  * Useful to check that later insert requests will be sucessful
  * (provided xfrm_policy_lock is held throughout).
@@ -833,13 +823,13 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
                                              u16 family)
 {
        unsigned int matched_s, matched_d;
-       struct hlist_node *newpos = NULL;
        struct xfrm_policy *policy, *p;
 
        matched_s = 0;
        matched_d = 0;
 
        list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+               struct hlist_node *newpos = NULL;
                bool matches_s, matches_d;
 
                if (!policy->bydst_reinsert)
@@ -849,16 +839,19 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
 
                policy->bydst_reinsert = false;
                hlist_for_each_entry(p, &n->hhead, bydst) {
-                       if (policy->priority >= p->priority)
+                       if (policy->priority > p->priority)
+                               newpos = &p->bydst;
+                       else if (policy->priority == p->priority &&
+                                policy->pos > p->pos)
                                newpos = &p->bydst;
                        else
                                break;
                }
 
                if (newpos)
-                       hlist_add_behind(&policy->bydst, newpos);
+                       hlist_add_behind_rcu(&policy->bydst, newpos);
                else
-                       hlist_add_head(&policy->bydst, &n->hhead);
+                       hlist_add_head_rcu(&policy->bydst, &n->hhead);
 
                /* paranoia checks follow.
                 * Check that the reinserted policy matches at least
@@ -893,12 +886,13 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
                                              struct rb_root *new,
                                              u16 family)
 {
-       struct rb_node **p, *parent = NULL;
        struct xfrm_pol_inexact_node *node;
+       struct rb_node **p, *parent;
 
        /* we should not have another subtree here */
        WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
-
+restart:
+       parent = NULL;
        p = &new->rb_node;
        while (*p) {
                u8 prefixlen;
@@ -918,12 +912,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
                } else {
                        struct xfrm_policy *tmp;
 
-                       hlist_for_each_entry(tmp, &node->hhead, bydst)
-                               tmp->bydst_reinsert = true;
-                       hlist_for_each_entry(tmp, &n->hhead, bydst)
+                       hlist_for_each_entry(tmp, &n->hhead, bydst) {
                                tmp->bydst_reinsert = true;
+                               hlist_del_rcu(&tmp->bydst);
+                       }
 
-                       INIT_HLIST_HEAD(&node->hhead);
                        xfrm_policy_inexact_list_reinsert(net, node, family);
 
                        if (node->prefixlen == n->prefixlen) {
@@ -935,8 +928,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
                        kfree_rcu(n, rcu);
                        n = node;
                        n->prefixlen = prefixlen;
-                       *p = new->rb_node;
-                       parent = NULL;
+                       goto restart;
                }
        }
 
@@ -965,12 +957,11 @@ static void xfrm_policy_inexact_node_merge(struct net *net,
                                                  family);
        }
 
-       hlist_for_each_entry(tmp, &v->hhead, bydst)
-               tmp->bydst_reinsert = true;
-       hlist_for_each_entry(tmp, &n->hhead, bydst)
+       hlist_for_each_entry(tmp, &v->hhead, bydst) {
                tmp->bydst_reinsert = true;
+               hlist_del_rcu(&tmp->bydst);
+       }
 
-       INIT_HLIST_HEAD(&n->hhead);
        xfrm_policy_inexact_list_reinsert(net, n, family);
 }
 
@@ -1235,6 +1226,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
        } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+       write_seqcount_begin(&xfrm_policy_hash_generation);
 
        /* make sure that we can insert the indirect policies again before
         * we start with destructive action.
@@ -1278,10 +1270,14 @@ static void xfrm_hash_rebuild(struct work_struct *work)
        }
 
        /* reset the bydst and inexact table in all directions */
-       xfrm_hash_reset_inexact_table(net);
-
        for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
-               INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
+               struct hlist_node *n;
+
+               hlist_for_each_entry_safe(policy, n,
+                                         &net->xfrm.policy_inexact[dir],
+                                         bydst_inexact_list)
+                       hlist_del_init(&policy->bydst_inexact_list);
+
                hmask = net->xfrm.policy_bydst[dir].hmask;
                odst = net->xfrm.policy_bydst[dir].table;
                for (i = hmask; i >= 0; i--)
@@ -1313,6 +1309,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
                newpos = NULL;
                chain = policy_hash_bysel(net, &policy->selector,
                                          policy->family, dir);
+
+               hlist_del_rcu(&policy->bydst);
+
                if (!chain) {
                        void *p = xfrm_policy_inexact_insert(policy, dir, 0);
 
@@ -1334,6 +1333,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
 out_unlock:
        __xfrm_policy_inexact_flush(net);
+       write_seqcount_end(&xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        mutex_unlock(&hash_resize_mutex);
@@ -2600,7 +2600,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                dst_copy_metrics(dst1, dst);
 
                if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
-                       __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
+                       __u32 mark = 0;
+
+                       if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
+                               mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
 
                        family = xfrm[i]->props.family;
                        dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
index 277c1c4..c6d26af 100644 (file)
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                if (!ut[i].family)
                        ut[i].family = family;
 
-               if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
-                   (ut[i].family != prev_family))
-                       return -EINVAL;
-
+               switch (ut[i].mode) {
+               case XFRM_MODE_TUNNEL:
+               case XFRM_MODE_BEET:
+                       break;
+               default:
+                       if (ut[i].family != prev_family)
+                               return -EINVAL;
+                       break;
+               }
                if (ut[i].mode >= XFRM_MODE_MAX)
                        return -EINVAL;
 
index 08c88de..11975ec 100644 (file)
@@ -1444,7 +1444,10 @@ check:
                        new = aa_label_merge(label, target, GFP_KERNEL);
                if (IS_ERR_OR_NULL(new)) {
                        info = "failed to build target label";
-                       error = PTR_ERR(new);
+                       if (!new)
+                               error = -ENOMEM;
+                       else
+                               error = PTR_ERR(new);
                        new = NULL;
                        perms.allow = 0;
                        goto audit;
index 2c01087..8db1731 100644 (file)
@@ -1599,12 +1599,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
        return apparmor_ip_postroute(priv, skb, state);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static unsigned int apparmor_ipv6_postroute(void *priv,
                                            struct sk_buff *skb,
                                            const struct nf_hook_state *state)
 {
        return apparmor_ip_postroute(priv, skb, state);
 }
+#endif
 
 static const struct nf_hook_ops apparmor_nf_ops[] = {
        {
index 40013b2..6c99fa8 100644 (file)
@@ -2112,6 +2112,13 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
        return 0;
 }
 
+/* allow waiting for a capture stream that hasn't been started */
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+#define wait_capture_start(substream)  ((substream)->oss.oss)
+#else
+#define wait_capture_start(substream)  false
+#endif
+
 /* the common loop for read/write data */
 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                                     void *data, bool interleaved,
@@ -2182,7 +2189,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                        err = snd_pcm_start(substream);
                        if (err < 0)
                                goto _end_unlock;
-               } else {
+               } else if (!wait_capture_start(substream)) {
                        /* nothing to do */
                        err = 0;
                        goto _end_unlock;
index b4f4721..4139ace 100644 (file)
@@ -117,6 +117,7 @@ struct alc_spec {
        int codec_variant;      /* flag for other variants */
        unsigned int has_alc5505_dsp:1;
        unsigned int no_depop_delay:1;
+       unsigned int done_hp_init:1;
 
        /* for PLL fix */
        hda_nid_t pll_nid;
@@ -3372,6 +3373,48 @@ static void alc_default_shutup(struct hda_codec *codec)
        snd_hda_shutup_pins(codec);
 }
 
+static void alc294_hp_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       int i, val;
+
+       if (!hp_pin)
+               return;
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+       msleep(100);
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+       alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+       /* Wait for depop procedure finish  */
+       val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       for (i = 0; i < 20 && val & 0x0080; i++) {
+               msleep(50);
+               val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       }
+       /* Set HP depop to auto mode */
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+       msleep(50);
+}
+
+static void alc294_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (!spec->done_hp_init) {
+               alc294_hp_init(codec);
+               spec->done_hp_init = true;
+       }
+       alc_default_init(codec);
+}
+
 static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
                             unsigned int val)
 {
@@ -7373,37 +7416,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
        alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
-static void alc294_hp_init(struct hda_codec *codec)
-{
-       struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
-       int i, val;
-
-       if (!hp_pin)
-               return;
-
-       snd_hda_codec_write(codec, hp_pin, 0,
-                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
-
-       msleep(100);
-
-       snd_hda_codec_write(codec, hp_pin, 0,
-                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
-       alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
-       alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
-
-       /* Wait for depop procedure finish  */
-       val = alc_read_coefex_idx(codec, 0x58, 0x01);
-       for (i = 0; i < 20 && val & 0x0080; i++) {
-               msleep(50);
-               val = alc_read_coefex_idx(codec, 0x58, 0x01);
-       }
-       /* Set HP depop to auto mode */
-       alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
-       msleep(50);
-}
-
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7529,7 +7541,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC294;
                spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
-               alc294_hp_init(codec);
+               spec->init_hook = alc294_init;
                break;
        case 0x10ec0300:
                spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7541,7 +7553,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
-               alc294_hp_init(codec);
+               spec->init_hook = alc294_init;
                break;
 
        }
index ebbadb3..bb83728 100644 (file)
@@ -1492,6 +1492,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
 
+       case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
        case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
        case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
        case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
index d079f36..ac221f1 100644 (file)
@@ -1681,13 +1681,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
                .force_header = false,
        };
        struct perf_evsel *ev2;
-       static bool init;
        u64 val;
 
-       if (!init) {
-               perf_stat__init_shadow_stats();
-               init = true;
-       }
        if (!evsel->stats)
                perf_evlist__alloc_stats(script->session->evlist, false);
        if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1794,7 +1789,7 @@ static void process_event(struct perf_script *script,
                return;
        }
 
-       if (PRINT_FIELD(TRACE)) {
+       if (PRINT_FIELD(TRACE) && sample->raw_data) {
                event_format__fprintf(evsel->tp_format, sample->cpu,
                                      sample->raw_data, sample->raw_size, fp);
        }
@@ -2359,6 +2354,8 @@ static int __cmd_script(struct perf_script *script)
 
        signal(SIGINT, sig_handler);
 
+       perf_stat__init_shadow_stats();
+
        /* override event processing functions */
        if (script->show_task_events) {
                script->tool.comm = process_comm_event;
index 1d00e5e..82e16bf 100644 (file)
@@ -224,20 +224,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
        return ret;
 }
 
-static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
+static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
+                                                 int percent_type)
 {
        int i;
 
        for (i = 0; i < a->data_nr; i++) {
-               if (a->data[i].percent == b->data[i].percent)
+               if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
                        continue;
-               return a->data[i].percent < b->data[i].percent;
+               return a->data[i].percent[percent_type] -
+                          b->data[i].percent[percent_type];
        }
        return 0;
 }
 
-static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al)
+static void disasm_rb_tree__insert(struct annotate_browser *browser,
+                               struct annotation_line *al)
 {
+       struct rb_root *root = &browser->entries;
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
        struct annotation_line *l;
@@ -246,7 +250,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line
                parent = *p;
                l = rb_entry(parent, struct annotation_line, rb_node);
 
-               if (disasm__cmp(al, l))
+               if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -329,7 +333,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
                        RB_CLEAR_NODE(&pos->al.rb_node);
                        continue;
                }
-               disasm_rb_tree__insert(&browser->entries, &pos->al);
+               disasm_rb_tree__insert(browser, &pos->al);
        }
        pthread_mutex_unlock(&notes->lock);
 
index 1ccbd33..383674f 100644 (file)
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
        if (!cpu_list)
                return cpu_map__read_all_cpu_map();
 
-       if (!isdigit(*cpu_list))
+       /*
+        * must handle the case of empty cpumap to cover
+        * TOPOLOGY header for NUMA nodes with no CPU
+        * ( e.g., because of CPU hotplug)
+        */
+       if (!isdigit(*cpu_list) && *cpu_list != '\0')
                goto out;
 
        while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
 
        if (nr_cpus > 0)
                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
-       else
+       else if (*cpu_list != '\0')
                cpus = cpu_map__default_new();
+       else
+               cpus = cpu_map__dummy_new();
 invalid:
        free(tmp_cpus);
 out:
index 8975895..ea523d3 100644 (file)
@@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe)
         * Current buffer might not have all the events allocated
         * yet, we need to free only allocated ones ...
         */
-       list_del(&oe->buffer->list);
-       ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+       if (oe->buffer) {
+               list_del(&oe->buffer->list);
+               ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+       }
 
        /* ... and continue with the rest */
        list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
index 63f758c..64d1f36 100644 (file)
@@ -17,6 +17,8 @@ if cc == "clang":
             vars[var] = sub("-mcet", "", vars[var])
         if not clang_has_option("-fcf-protection"):
             vars[var] = sub("-fcf-protection", "", vars[var])
+        if not clang_has_option("-fstack-clash-protection"):
+            vars[var] = sub("-fstack-clash-protection", "", vars[var])
 
 from distutils.core import setup, Extension
 
index bab13dd..0d26b5e 100755 (executable)
@@ -37,6 +37,10 @@ prerequisite()
                exit $ksft_skip
        fi
 
+       present_cpus=`cat $SYSFS/devices/system/cpu/present`
+       present_max=${present_cpus##*-}
+       echo "present_cpus = $present_cpus present_max = $present_max"
+
        echo -e "\t Cpus in online state: $online_cpus"
 
        offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@ online_cpus=0
 online_max=0
 offline_cpus=0
 offline_max=0
+present_cpus=0
+present_max=0
 
 while getopts e:ahp: opt; do
        case $opt in
@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
        online_cpu_expect_success $online_max
 
        if [[ $offline_cpus -gt 0 ]]; then
-               echo -e "\t offline to online to offline: cpu $offline_max"
-               online_cpu_expect_success $offline_max
-               offline_cpu_expect_success $offline_max
+               echo -e "\t offline to online to offline: cpu $present_max"
+               online_cpu_expect_success $present_max
+               offline_cpu_expect_success $present_max
+               online_cpu $present_max
        fi
        exit 0
 else
index f4ba8eb..ad06489 100644 (file)
@@ -1,5 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 TEST_PROGS := ir_loopback.sh
 TEST_GEN_PROGS_EXTENDED := ir_loopback
+APIDIR := ../../../include/uapi
+CFLAGS += -Wall -O2 -I$(APIDIR)
 
 include ../lib.mk
index f8f3e90..1e6d14d 100644 (file)
@@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
 KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
+$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
 $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
 $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
index 8db35b9..71d7fdc 100755 (executable)
@@ -28,6 +28,19 @@ KEY_AES=0x0123456789abcdef0123456789012345
 SPI1=0x1
 SPI2=0x2
 
+do_esp_policy() {
+    local ns=$1
+    local me=$2
+    local remote=$3
+    local lnet=$4
+    local rnet=$5
+
+    # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
+    ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
+    # to fwd decrypted packets after esp processing:
+    ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
+}
+
 do_esp() {
     local ns=$1
     local me=$2
@@ -40,10 +53,59 @@ do_esp() {
     ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in  enc aes $KEY_AES  auth sha1 $KEY_SHA  mode tunnel sel src $rnet dst $lnet
     ip -net $ns xfrm state add src $me  dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
 
-    # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
-    ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
-    # to fwd decrypted packets after esp processing:
-    ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
+    do_esp_policy $ns $me $remote $lnet $rnet
+}
+
+# add policies with different netmasks, to make sure kernel carries
+# the policies contained within new netmask over when search tree is
+# re-built.
+# peer netns that are supposed to be encapsulated via esp have addresses
+# in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively.
+#
+# Adding a policy for '10.0.1.0/23' will make it necessary to
+# alter the prefix of 10.0.1.0 subnet.
+# In case new prefix overlaps with existing node, the node and all
+# policies it carries need to be merged with the existing one(s).
+#
+# Do that here.
+do_overlap()
+{
+    local ns=$1
+
+    # adds new nodes to tree (neither network exists yet in policy database).
+    ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
+
+    # adds a new node in the 10.0.0.0/24 tree (dst node exists).
+    ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
+
+    # adds a 10.2.0.0/23 node, but for different dst.
+    ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block
+
+    # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd.
+    # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23.
+    # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node
+    # also has to be merged too, including source-sorted subtrees.
+    # old:
+    # 10.0.0.0/24 (node 1 in dst tree of the bin)
+    #    10.1.0.0/24 (node in src tree of dst node 1)
+    #    10.2.0.0/24 (node in src tree of dst node 1)
+    # 10.0.1.0/24 (node 2 in dst tree of the bin)
+    #    10.0.2.0/24 (node in src tree of dst node 2)
+    #    10.2.0.0/24 (node in src tree of dst node 2)
+    #
+    # The next 'policy add' adds dst '10.0.0.0/23', which means
+    # that dst node 1 and dst node 2 have to be merged including
+    # the sub-tree.  As no duplicates are allowed, policies in
+    # the two '10.0.2.0/24' are also merged.
+    #
+    # after the 'add', internal search tree should look like this:
+    # 10.0.0.0/23 (node in dst tree of bin)
+    #     10.0.2.0/24 (node in src tree of dst node)
+    #     10.1.0.0/24 (node in src tree of dst node)
+    #     10.2.0.0/24 (node in src tree of dst node)
+    #
+    # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
+    ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
 }
 
 do_esp_policy_get_check() {
@@ -160,6 +222,41 @@ check_xfrm() {
        return $lret
 }
 
+check_exceptions()
+{
+       logpostfix="$1"
+       local lret=0
+
+       # ping to .254 should be excluded from the tunnel (exception is in place).
+       check_xfrm 0 254
+       if [ $? -ne 0 ]; then
+               echo "FAIL: expected ping to .254 to fail ($logpostfix)"
+               lret=1
+       else
+               echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)"
+       fi
+
+       # ping to .253 should use use ipsec due to direct policy exception.
+       check_xfrm 1 253
+       if [ $? -ne 0 ]; then
+               echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)"
+               lret=1
+       else
+               echo "PASS: direct policy matches ($logpostfix)"
+       fi
+
+       # ping to .2 should use ipsec.
+       check_xfrm 1 2
+       if [ $? -ne 0 ]; then
+               echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)"
+               lret=1
+       else
+               echo "PASS: policy matches ($logpostfix)"
+       fi
+
+       return $lret
+}
+
 #check for needed privileges
 if [ "$(id -u)" -ne 0 ];then
        echo "SKIP: Need root privileges"
@@ -270,33 +367,45 @@ do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28
 do_exception ns3 dead:3::1 dead:3::10 dead:2::fd  dead:2:f0::/96
 do_exception ns4 dead:3::10 dead:3::1 dead:1::fd  dead:1:f0::/96
 
-# ping to .254 should now be excluded from the tunnel
-check_xfrm 0 254
+check_exceptions "exceptions"
 if [ $? -ne 0 ]; then
-       echo "FAIL: expected ping to .254 to fail"
        ret=1
-else
-       echo "PASS: ping to .254 bypassed ipsec tunnel"
 fi
 
-# ping to .253 should use use ipsec due to direct policy exception.
-check_xfrm 1 253
-if [ $? -ne 0 ]; then
-       echo "FAIL: expected ping to .253 to use ipsec tunnel"
-       ret=1
-else
-       echo "PASS: direct policy matches"
-fi
+# insert block policies with adjacent/overlapping netmasks
+do_overlap ns3
 
-# ping to .2 should use ipsec.
-check_xfrm 1 2
+check_exceptions "exceptions and block policies"
 if [ $? -ne 0 ]; then
-       echo "FAIL: expected ping to .2 to use ipsec tunnel"
        ret=1
-else
-       echo "PASS: policy matches"
 fi
 
+for n in ns3 ns4;do
+       ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125
+       sleep $((RANDOM%5))
+done
+
+check_exceptions "exceptions and block policies after hresh changes"
+
+# full flush of policy db, check everything gets freed incl. internal meta data
+ip -net ns3 xfrm policy flush
+
+do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24
+do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28
+
+# move inexact policies to hash table
+ip -net ns3 xfrm policy set hthresh4 16 16
+
+sleep $((RANDOM%5))
+check_exceptions "exceptions and block policies after hthresh change in ns3"
+
+# restore original hthresh settings -- move policies back to tables
+for n in ns3 ns4;do
+       ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128
+       sleep $((RANDOM%5))
+done
+check_exceptions "exceptions and block policies after hresh change to normal"
+
 for i in 1 2 3 4;do ip netns del ns$i;done
 
 exit $ret
index 82121a8..29bac5e 100644 (file)
@@ -10,4 +10,5 @@
 /proc-uptime-002
 /read
 /self
+/setns-dcache
 /thread-self
index 1c12c34..434d033 100644 (file)
@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
 TEST_GEN_PROGS += proc-uptime-002
 TEST_GEN_PROGS += read
 TEST_GEN_PROGS += self
+TEST_GEN_PROGS += setns-dcache
 TEST_GEN_PROGS += thread-self
 
 include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644 (file)
index 0000000..60ab197
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
+ * if old one is in dcache.
+ *
+ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+static pid_t pid = -1;
+
+static void f(void)
+{
+       if (pid > 0) {
+               kill(pid, SIGTERM);
+       }
+}
+
+int main(void)
+{
+       int fd[2];
+       char _ = 0;
+       int nsfd;
+
+       atexit(f);
+
+       /* Check for priviledges and syscall availability straight away. */
+       if (unshare(CLONE_NEWNET) == -1) {
+               if (errno == ENOSYS || errno == EPERM) {
+                       return 4;
+               }
+               return 1;
+       }
+       /* Distinguisher between two otherwise empty net namespaces. */
+       if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
+               return 1;
+       }
+
+       if (pipe(fd) == -1) {
+               return 1;
+       }
+
+       pid = fork();
+       if (pid == -1) {
+               return 1;
+       }
+
+       if (pid == 0) {
+               if (unshare(CLONE_NEWNET) == -1) {
+                       return 1;
+               }
+
+               if (write(fd[1], &_, 1) != 1) {
+                       return 1;
+               }
+
+               pause();
+
+               return 0;
+       }
+
+       if (read(fd[0], &_, 1) != 1) {
+               return 1;
+       }
+
+       {
+               char buf[64];
+               snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
+               nsfd = open(buf, O_RDONLY);
+               if (nsfd == -1) {
+                       return 1;
+               }
+       }
+
+       /* Reliably pin dentry into dcache. */
+       (void)open("/proc/net/unix", O_RDONLY);
+
+       if (setns(nsfd, CLONE_NEWNET) == -1) {
+               return 1;
+       }
+
+       kill(pid, SIGTERM);
+       pid = 0;
+
+       {
+               char buf[4096];
+               ssize_t rv;
+               int fd;
+
+               fd = open("/proc/net/unix", O_RDONLY);
+               if (fd == -1) {
+                       return 1;
+               }
+
+#define S "Num       RefCount Protocol Flags    Type St Inode Path\n"
+               rv = read(fd, buf, sizeof(buf));
+
+               assert(rv == strlen(S));
+               assert(memcmp(buf, S, strlen(S)) == 0);
+       }
+
+       return 0;
+}
index 496a9a8..7e632b4 100644 (file)
@@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
 #ifdef SYSCALL_NUM_RET_SHARE_REG
 # define EXPECT_SYSCALL_RETURN(val, action)    EXPECT_EQ(-1, action)
 #else
-# define EXPECT_SYSCALL_RETURN(val, action)    EXPECT_EQ(val, action)
+# define EXPECT_SYSCALL_RETURN(val, action)            \
+       do {                                            \
+               errno = 0;                              \
+               if (val < 0) {                          \
+                       EXPECT_EQ(-1, action);          \
+                       EXPECT_EQ(-(val), errno);       \
+               } else {                                \
+                       EXPECT_EQ(val, action);         \
+               }                                       \
+       } while (0)
 #endif
 
 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
 
 /* Architecture-specific syscall changing routine. */
 void change_syscall(struct __test_metadata *_metadata,
-                   pid_t tracee, int syscall)
+                   pid_t tracee, int syscall, int result)
 {
        int ret;
        ARCH_REGS regs;
@@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata,
 #ifdef SYSCALL_NUM_RET_SHARE_REG
                TH_LOG("Can't modify syscall return on this architecture");
 #else
-               regs.SYSCALL_RET = EPERM;
+               regs.SYSCALL_RET = result;
 #endif
 
 #ifdef HAVE_GETREGS
@@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
        case 0x1002:
                /* change getpid to getppid. */
                EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
-               change_syscall(_metadata, tracee, __NR_getppid);
+               change_syscall(_metadata, tracee, __NR_getppid, 0);
                break;
        case 0x1003:
-               /* skip gettid. */
+               /* skip gettid with valid return code. */
                EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
-               change_syscall(_metadata, tracee, -1);
+               change_syscall(_metadata, tracee, -1, 45000);
                break;
        case 0x1004:
+               /* skip openat with error. */
+               EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
+               change_syscall(_metadata, tracee, -1, -ESRCH);
+               break;
+       case 0x1005:
                /* do nothing (allow getppid) */
                EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
                break;
@@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
        nr = get_syscall(_metadata, tracee);
 
        if (nr == __NR_getpid)
-               change_syscall(_metadata, tracee, __NR_getppid);
+               change_syscall(_metadata, tracee, __NR_getppid, 0);
+       if (nr == __NR_gettid)
+               change_syscall(_metadata, tracee, -1, 45000);
        if (nr == __NR_openat)
-               change_syscall(_metadata, tracee, -1);
+               change_syscall(_metadata, tracee, -1, -ESRCH);
 }
 
 FIXTURE_DATA(TRACE_syscall) {
@@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall)
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
                BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
-               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
        };
 
@@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
        EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
+TEST_F(TRACE_syscall, ptrace_syscall_errno)
+{
+       /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+       teardown_trace_fixture(_metadata, self->tracer);
+       self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+                                          true);
+
+       /* Tracer should skip the open syscall, resulting in ESRCH. */
+       EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, ptrace_syscall_faked)
 {
        /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
        teardown_trace_fixture(_metadata, self->tracer);
        self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
                                           true);
 
-       /* Tracer should skip the open syscall, resulting in EPERM. */
-       EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
+       /* Tracer should skip the gettid syscall, resulting fake pid. */
+       EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, syscall_allowed)
@@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
        EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, syscall_dropped)
+TEST_F(TRACE_syscall, syscall_errno)
+{
+       long ret;
+
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       /* openat has been skipped and an errno return. */
+       EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, syscall_faked)
 {
        long ret;
 
@@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
        ASSERT_EQ(0, ret);
 
        /* gettid has been skipped and an altered return value stored. */
-       EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
-       EXPECT_NE(self->mytid, syscall(__NR_gettid));
+       EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, skip_after_RET_TRACE)
index c02683c..7656c7c 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS += -O3 -Wl,-no-as-needed -Wall
-LDFLAGS += -lrt -lpthread -lm
+LDLIBS += -lrt -lpthread -lm
 
 # these are all "safe" tests that don't modify
 # system time or require escalated privileges